Chris4K commited on
Commit
53122c5
·
verified ·
1 Parent(s): 8f82242

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +149 -0
app.py CHANGED
@@ -9,3 +9,152 @@ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torc
9
 
10
  # Helper function to process long contexts
11
  MAX_TOKENS = 100000 # Replace with the max token limit of the Llama model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  # Helper function to process long contexts
11
  MAX_TOKENS = 100000 # Replace with the max token limit of the Llama model
12
+
13
+
14
+ #########
15
+ ###
16
+ #########
17
+ import faiss
18
+ import torch
19
+ import pandas as pd
20
+ from sentence_transformers import SentenceTransformer
21
+ from transformers import AutoTokenizer, AutoModelForCausalLM
22
+ import gradio as gr
23
+
24
+ # Load Llama model
25
+ #model_name = "meta-llama/Llama-3.2-3B-Instruct" # Replace with the exact model path
26
+ #tokenizer = AutoTokenizer.from_pretrained(model_name)
27
+ #model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
28
+
29
+ # Load Sentence Transformer model for embeddings
30
+ embedder = SentenceTransformer('distiluse-base-multilingual-cased') # Suitable for German text
31
+
32
+ ########
33
+ ###
34
+ ###
35
+ #####
36
+ # Load the CSV data
37
+ url = 'https://www.bofrost.de/datafeed/DE/products.csv'
38
+ data = pd.read_csv(url, sep='|')
39
+
40
+ # List of columns to keep
41
+ columns_to_keep = [
42
+ 'ID', 'Name', 'Description', 'Price',
43
+ 'ProductCategory', 'Grammage',
44
+ 'BasePriceText', 'Rating', 'RatingCount',
45
+ 'Ingredients', 'CreationDate', 'Keywords', 'Brand'
46
+ ]
47
+
48
+ # Filter the DataFrame
49
+ data_cleaned = data[columns_to_keep]
50
+
51
+ # Remove unwanted characters from the 'Description' column
52
+ data_cleaned['Description'] = data_cleaned['Description'].str.replace(r'[^\w\s.,;:\'"/?!€$%&()\[\]{}<>|=+\\-]', ' ', regex=True)
53
+
54
+ # Combine relevant text columns for embedding
55
+ data_cleaned['combined_text'] = data_cleaned.apply(lambda row: ' '.join([str(row[col]) for col in ['Name', 'Description', 'Keywords'] if pd.notnull(row[col])]), axis=1)
56
+
57
+ ######
58
+ ##
59
+ #####
60
+
61
+ # Generate embeddings for the combined text
62
+ embeddings = embedder.encode(data_cleaned['combined_text'].tolist(), convert_to_tensor=True)
63
+
64
+ # Convert embeddings to numpy array
65
+ embeddings = embeddings.cpu().detach().numpy()
66
+
67
+ # Initialize FAISS index
68
+ d = embeddings.shape[1] # Dimension of embeddings
69
+ faiss_index = faiss.IndexFlatL2(d)
70
+
71
+ # Add embeddings to the index
72
+ faiss_index.add(embeddings)
73
+
74
+ #######
75
+ ##
76
+ ######
77
+ def search_products(query, top_k=7):
78
+ # Generate embedding for the query
79
+ query_embedding = embedder.encode([query], convert_to_tensor=True).cpu().detach().numpy()
80
+
81
+ # Search FAISS index
82
+ distances, indices = faiss_index.search(query_embedding, top_k)
83
+
84
+ # Retrieve corresponding products
85
+ results = data_cleaned.iloc[indices[0]].to_dict(orient='records')
86
+ return results
87
+
88
+
89
+
90
+ # Update the prompt construction to include ChromaDB results
91
+ def construct_system_prompt( context):
92
+ prompt = f"You are a friendly bot specializing in Bofrost products. Return comprehensive german answers. Always add product ids. Use the following product descriptions:\n\n{context}\n\n"
93
+ return prompt
94
+
95
+ # Helper function to construct the prompt
96
+ def construct_prompt(user_input, context, chat_history, max_history_turns=1): # Added max_history_turns
97
+ system_message = construct_system_prompt(context)
98
+ prompt = f"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|>"
99
+
100
+ # Limit history to the last max_history_turns
101
+ for i, (user_msg, assistant_msg) in enumerate(chat_history[-max_history_turns:]):
102
+ prompt += f"<|start_header_id|>user<|end_header_id|>\n\n{user_msg}<|eot_id|>"
103
+ prompt += f"<|start_header_id|>assistant<|end_header_id|>\n\n{assistant_msg}<|eot_id|>"
104
+
105
+ prompt += f"<|start_header_id|>user<|end_header_id|>\n\n{user_input}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
106
+ print("-------------------------")
107
+ print(prompt)
108
+ return prompt
109
+
110
+ def chat_with_model(user_input, chat_history=[]):
111
+ # Search for relevant products
112
+ search_results = search_products(user_input)
113
+
114
+ # Create context with search results
115
+ if search_results:
116
+ context = "Product Context:\n"
117
+ for product in search_results:
118
+ context += f"Produkt ID: {product['ID']}\n"
119
+ context += f"Name: {product['Name']}\n"
120
+ context += f"Beschreibung: {product['Description']}\n"
121
+ context += f"Preis: {product['Price']}€\n"
122
+ context += f"Bewertung: {product['Rating']} ({product['RatingCount']} Bewertungen)\n"
123
+ context += f"Kategorie: {product['ProductCategory']}\n"
124
+ context += f"Marke: {product['Brand']}\n"
125
+ context += "---\n"
126
+ else:
127
+ context = "Das weiß ich nicht."
128
+ print("context: ------------------------------------- \n"+context)
129
+ # Pass both user_input and context to construct_prompt
130
+ prompt = construct_prompt(user_input, context, chat_history) # This line is changed
131
+ print("prompt: ------------------------------------- \n"+prompt)
132
+ input_ids = tokenizer.encode(prompt, return_tensors="pt", truncation=True, max_length=4096).to("cuda")
133
+ tokenizer.pad_token = tokenizer.eos_token
134
+ attention_mask = torch.ones_like(input_ids).to("cuda")
135
+ outputs = model.generate(input_ids, attention_mask=attention_mask,
136
+ max_new_tokens=1200, do_sample=True,
137
+ top_k=50, temperature=0.7)
138
+ response = tokenizer.decode(outputs[0][input_ids.shape[-1]:], skip_special_tokens=True)
139
+ print("respone: ------------------------------------- \n"+response)
140
+ chat_history.append((context, response)) # or chat_history.append((user_input, response)) if you want to store user input
141
+ return response, chat_history
142
+
143
+ #####
144
+ ###
145
+ ###
146
+ # Gradio Interface
147
+ def gradio_interface(user_input, history):
148
+ response, updated_history = chat_with_model(user_input, history)
149
+ return response, updated_history
150
+
151
+ with gr.Blocks() as demo:
152
+ gr.Markdown("# 🦙 Llama Instruct Chat with ChromaDB Integration")
153
+ with gr.Row():
154
+ user_input = gr.Textbox(label="Your Message", lines=2, placeholder="Type your message here...")
155
+ submit_btn = gr.Button("Send")
156
+ chat_history = gr.State([])
157
+ chat_display = gr.Textbox(label="Chat Response", lines=10, placeholder="Chat history will appear here...", interactive=False)
158
+ submit_btn.click(gradio_interface, inputs=[user_input, chat_history], outputs=[chat_display, chat_history])
159
+
160
+ demo.launch(debug=True)