add search only button
Browse files- app.py +4 -4
- climateqa/engine/graph.py +12 -3
- style.css +4 -0
app.py
CHANGED
@@ -118,7 +118,7 @@ reranker = get_reranker("nano")
|
|
118 |
agent = make_graph_agent(llm=llm, vectorstore_ipcc=vectorstore, vectorstore_graphs=vectorstore_graphs, reranker=reranker)
|
119 |
|
120 |
|
121 |
-
async def chat(query, history, audience, sources, reports, relevant_content_sources):
|
122 |
"""taking a query and a message history, use a pipeline (reformulation, retriever, answering) to yield a tuple of:
|
123 |
(messages in gradio format, messages in langchain format, source documents)"""
|
124 |
|
@@ -134,7 +134,7 @@ async def chat(query, history, audience, sources, reports, relevant_content_sour
|
|
134 |
if reports is None or len(reports) == 0:
|
135 |
reports = []
|
136 |
|
137 |
-
inputs = {"user_input": query,"audience": audience_prompt,"sources_input":sources, "relevant_content_sources" : relevant_content_sources}
|
138 |
result = agent.astream_events(inputs,version = "v1")
|
139 |
|
140 |
|
@@ -569,14 +569,14 @@ with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=t
|
|
569 |
|
570 |
(textbox
|
571 |
.submit(start_chat, [textbox,chatbot], [textbox,tabs,chatbot],queue = False,api_name = "start_chat_textbox")
|
572 |
-
.then(chat, [textbox,chatbot,dropdown_audience, dropdown_sources,dropdown_reports, dropdown_external_sources] ,[chatbot,sources_textbox,output_query,output_language, sources_raw, current_graphs],concurrency_limit = 8,api_name = "chat_textbox")
|
573 |
.then(finish_chat, None, [textbox],api_name = "finish_chat_textbox")
|
574 |
# .then(update_sources_number_display, [sources_textbox, figures_cards, current_graphs,papers_html],[tab_sources, tab_figures, tab_graphs, tab_papers] )
|
575 |
)
|
576 |
|
577 |
(examples_hidden
|
578 |
.change(start_chat, [examples_hidden,chatbot], [textbox,tabs,chatbot],queue = False,api_name = "start_chat_examples")
|
579 |
-
.then(chat, [examples_hidden,chatbot,dropdown_audience, dropdown_sources,dropdown_reports, dropdown_external_sources] ,[chatbot,sources_textbox,output_query,output_language, sources_raw, current_graphs],concurrency_limit = 8,api_name = "chat_textbox")
|
580 |
.then(finish_chat, None, [textbox],api_name = "finish_chat_examples")
|
581 |
# .then(update_sources_number_display, [sources_textbox, figures_cards, current_graphs,papers_html],[tab_sources, tab_figures, tab_graphs, tab_papers] )
|
582 |
)
|
|
|
118 |
agent = make_graph_agent(llm=llm, vectorstore_ipcc=vectorstore, vectorstore_graphs=vectorstore_graphs, reranker=reranker)
|
119 |
|
120 |
|
121 |
+
async def chat(query, history, audience, sources, reports, relevant_content_sources, search_only):
|
122 |
"""taking a query and a message history, use a pipeline (reformulation, retriever, answering) to yield a tuple of:
|
123 |
(messages in gradio format, messages in langchain format, source documents)"""
|
124 |
|
|
|
134 |
if reports is None or len(reports) == 0:
|
135 |
reports = []
|
136 |
|
137 |
+
inputs = {"user_input": query,"audience": audience_prompt,"sources_input":sources, "relevant_content_sources" : relevant_content_sources, "search_only": search_only}
|
138 |
result = agent.astream_events(inputs,version = "v1")
|
139 |
|
140 |
|
|
|
569 |
|
570 |
(textbox
|
571 |
.submit(start_chat, [textbox,chatbot], [textbox,tabs,chatbot],queue = False,api_name = "start_chat_textbox")
|
572 |
+
.then(chat, [textbox,chatbot,dropdown_audience, dropdown_sources,dropdown_reports, dropdown_external_sources, search_only] ,[chatbot,sources_textbox,output_query,output_language, sources_raw, current_graphs],concurrency_limit = 8,api_name = "chat_textbox")
|
573 |
.then(finish_chat, None, [textbox],api_name = "finish_chat_textbox")
|
574 |
# .then(update_sources_number_display, [sources_textbox, figures_cards, current_graphs,papers_html],[tab_sources, tab_figures, tab_graphs, tab_papers] )
|
575 |
)
|
576 |
|
577 |
(examples_hidden
|
578 |
.change(start_chat, [examples_hidden,chatbot], [textbox,tabs,chatbot],queue = False,api_name = "start_chat_examples")
|
579 |
+
.then(chat, [examples_hidden,chatbot,dropdown_audience, dropdown_sources,dropdown_reports, dropdown_external_sources, search_only] ,[chatbot,sources_textbox,output_query,output_language, sources_raw, current_graphs],concurrency_limit = 8,api_name = "chat_textbox")
|
580 |
.then(finish_chat, None, [textbox],api_name = "finish_chat_examples")
|
581 |
# .then(update_sources_number_display, [sources_textbox, figures_cards, current_graphs,papers_html],[tab_sources, tab_figures, tab_graphs, tab_papers] )
|
582 |
)
|
climateqa/engine/graph.py
CHANGED
@@ -20,7 +20,7 @@ from .chains.retrieve_documents import make_retriever_node
|
|
20 |
from .chains.answer_rag import make_rag_node
|
21 |
from .chains.graph_retriever import make_graph_retriever_node
|
22 |
from .chains.chitchat_categorization import make_chitchat_intent_categorization_node
|
23 |
-
from .chains.set_defaults import set_defaults
|
24 |
|
25 |
class GraphState(TypedDict):
|
26 |
"""
|
@@ -43,6 +43,7 @@ class GraphState(TypedDict):
|
|
43 |
documents: List[Document]
|
44 |
related_contents : Dict[str,Document]
|
45 |
recommended_content : List[Document]
|
|
|
46 |
|
47 |
def search(state): #TODO
|
48 |
return state
|
@@ -80,6 +81,13 @@ def route_based_on_relevant_docs(state,threshold_docs=0.2):
|
|
80 |
else:
|
81 |
return "answer_rag_no_docs"
|
82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
def make_id_dict(values):
|
85 |
return {k:k for k in values}
|
@@ -138,8 +146,9 @@ def make_graph_agent(llm, vectorstore_ipcc, vectorstore_graphs, reranker, thresh
|
|
138 |
)
|
139 |
workflow.add_conditional_edges(
|
140 |
"retrieve_documents",
|
141 |
-
lambda state : "retrieve_documents" if len(state["remaining_questions"]) > 0 else "answer_search",
|
142 |
-
|
|
|
143 |
)
|
144 |
|
145 |
workflow.add_conditional_edges(
|
|
|
20 |
from .chains.answer_rag import make_rag_node
|
21 |
from .chains.graph_retriever import make_graph_retriever_node
|
22 |
from .chains.chitchat_categorization import make_chitchat_intent_categorization_node
|
23 |
+
# from .chains.set_defaults import set_defaults
|
24 |
|
25 |
class GraphState(TypedDict):
|
26 |
"""
|
|
|
43 |
documents: List[Document]
|
44 |
related_contents : Dict[str,Document]
|
45 |
recommended_content : List[Document]
|
46 |
+
search_only : bool = False
|
47 |
|
48 |
def search(state): #TODO
|
49 |
return state
|
|
|
81 |
else:
|
82 |
return "answer_rag_no_docs"
|
83 |
|
84 |
+
def route_retrieve_documents(state):
|
85 |
+
if state["search_only"] :
|
86 |
+
return END
|
87 |
+
elif len(state["remaining_questions"]) > 0:
|
88 |
+
return "retrieve_documents"
|
89 |
+
else:
|
90 |
+
return "answer_search"
|
91 |
|
92 |
def make_id_dict(values):
|
93 |
return {k:k for k in values}
|
|
|
146 |
)
|
147 |
workflow.add_conditional_edges(
|
148 |
"retrieve_documents",
|
149 |
+
# lambda state : "retrieve_documents" if len(state["remaining_questions"]) > 0 else "answer_search",
|
150 |
+
route_retrieve_documents,
|
151 |
+
make_id_dict([END,"retrieve_documents","answer_search"])
|
152 |
)
|
153 |
|
154 |
workflow.add_conditional_edges(
|
style.css
CHANGED
@@ -561,6 +561,10 @@ span.chatbot > p > img{
|
|
561 |
input[type="checkbox"]:checked + .dropdown-content {
|
562 |
display: block;
|
563 |
}
|
|
|
|
|
|
|
|
|
564 |
|
565 |
.dropdown-content {
|
566 |
display: none;
|
|
|
561 |
input[type="checkbox"]:checked + .dropdown-content {
|
562 |
display: block;
|
563 |
}
|
564 |
+
|
565 |
+
#checkbox-chat input[type="checkbox"] {
|
566 |
+
display: flex !important;
|
567 |
+
}
|
568 |
|
569 |
.dropdown-content {
|
570 |
display: none;
|