Spaces:
Runtime error
Runtime error
don't remove <br> only reduce newlines
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import retrieval
|
|
5 |
# UNCOMMENT ONLY WHEN RUNNING LOCALLY (not on Spaces)
|
6 |
# from dotenv import load_dotenv
|
7 |
from text_generation import Client, InferenceAPIClient
|
|
|
8 |
|
9 |
# load API keys from globally-availabe .env file
|
10 |
# SECRETS_FILEPATH = "/mnt/project/chatbotai/huggingface_cache/internal_api_keys.env"
|
@@ -106,7 +107,7 @@ def predict(
|
|
106 |
stop_sequences=[user_name.rstrip(), assistant_name.rstrip()],
|
107 |
)
|
108 |
|
109 |
-
|
110 |
for i, response in enumerate(iterator):
|
111 |
if response.token.special:
|
112 |
continue
|
@@ -123,33 +124,33 @@ def predict(
|
|
123 |
history[-1] = partial_words
|
124 |
|
125 |
chat = [(history[i].strip(), history[i + 1].strip()) for i in range(0, len(history) - 1, 2)]
|
126 |
-
|
127 |
yield chat, history, None, None, None, []
|
128 |
|
129 |
-
|
130 |
-
print("1. history:", history)
|
131 |
-
|
132 |
-
# nested list, lame.
|
133 |
-
cleaned_final_chat_response = []
|
134 |
-
for human_chat, bot_chat in final_chat_response:
|
135 |
-
human_chat = human_chat.replace("<br>", "")
|
136 |
-
human_chat = human_chat.replace("\n\n", "\n")
|
137 |
-
bot_chat = bot_chat.replace("<br>", "")
|
138 |
-
bot_chat = bot_chat.replace("\n\n", "\n")
|
139 |
-
cleaned_final_chat_response.append( (human_chat, bot_chat) )
|
140 |
-
|
141 |
-
print(">>>>>>>>>>>> KEYY. final_chat_response -- AFTER CLEANING -----:", cleaned_final_chat_response)
|
142 |
-
|
143 |
# Pinecone context retrieval
|
144 |
top_context_list = ta.retrieve_contexts_from_pinecone(user_question=inputs, topk=NUM_ANSWERS_GENERATED)
|
145 |
# yield chat, history, top_context_list[0], top_context_list[1], top_context_list[2], []
|
146 |
yield cleaned_final_chat_response, history, top_context_list[0], top_context_list[1], top_context_list[2], []
|
|
|
|
|
147 |
|
148 |
# run CLIP
|
149 |
images_list = ta.clip_text_to_image(inputs)
|
150 |
# yield chat, history, top_context_list[0], top_context_list[1], top_context_list[2], images_list
|
151 |
yield cleaned_final_chat_response, history, top_context_list[0], top_context_list[1], top_context_list[2], images_list
|
152 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
|
154 |
def reset_textbox():
|
155 |
return gr.update(value="")
|
|
|
5 |
# UNCOMMENT ONLY WHEN RUNNING LOCALLY (not on Spaces)
|
6 |
# from dotenv import load_dotenv
|
7 |
from text_generation import Client, InferenceAPIClient
|
8 |
+
from typing import List, Tuple
|
9 |
|
10 |
# load API keys from globally-availabe .env file
|
11 |
# SECRETS_FILEPATH = "/mnt/project/chatbotai/huggingface_cache/internal_api_keys.env"
|
|
|
107 |
stop_sequences=[user_name.rstrip(), assistant_name.rstrip()],
|
108 |
)
|
109 |
|
110 |
+
chat_response = None
|
111 |
for i, response in enumerate(iterator):
|
112 |
if response.token.special:
|
113 |
continue
|
|
|
124 |
history[-1] = partial_words
|
125 |
|
126 |
chat = [(history[i].strip(), history[i + 1].strip()) for i in range(0, len(history) - 1, 2)]
|
127 |
+
chat_response = chat
|
128 |
yield chat, history, None, None, None, []
|
129 |
|
130 |
+
cleaned_final_chat_response = clean_chat_response(chat_response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
# Pinecone context retrieval
|
132 |
top_context_list = ta.retrieve_contexts_from_pinecone(user_question=inputs, topk=NUM_ANSWERS_GENERATED)
|
133 |
# yield chat, history, top_context_list[0], top_context_list[1], top_context_list[2], []
|
134 |
yield cleaned_final_chat_response, history, top_context_list[0], top_context_list[1], top_context_list[2], []
|
135 |
+
|
136 |
+
cleaned_final_chat_response = clean_chat_response(chat_response)
|
137 |
|
138 |
# run CLIP
|
139 |
images_list = ta.clip_text_to_image(inputs)
|
140 |
# yield chat, history, top_context_list[0], top_context_list[1], top_context_list[2], images_list
|
141 |
yield cleaned_final_chat_response, history, top_context_list[0], top_context_list[1], top_context_list[2], images_list
|
142 |
|
143 |
+
def clean_chat_response(chat: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
|
144 |
+
''' Not perfect, but much better at removing all the crazy newlines. '''
|
145 |
+
cleaned_chat = []
|
146 |
+
for human_chat, bot_chat in chat:
|
147 |
+
# human_chat = human_chat.replace("<br>", "")
|
148 |
+
human_chat = human_chat.replace("\n\n", "\n")
|
149 |
+
# bot_chat = bot_chat.replace("<br>", "")
|
150 |
+
bot_chat = bot_chat.replace("\n\n", "\n")
|
151 |
+
cleaned_chat.append( (human_chat, bot_chat) )
|
152 |
+
return cleaned_chat
|
153 |
+
|
154 |
|
155 |
def reset_textbox():
|
156 |
return gr.update(value="")
|