m-ric HF staff commited on
Commit
e76df21
·
1 Parent(s): b97f2de

Numerous fixes

Browse files
Files changed (1) hide show
  1. app.py +43 -17
app.py CHANGED
@@ -5,12 +5,14 @@ import threading
5
  from concurrent.futures import ThreadPoolExecutor, as_completed
6
  from datetime import datetime
7
  from pathlib import Path
8
- from typing import List
9
 
10
  import datasets
11
  import pandas as pd
12
  from dotenv import load_dotenv
13
  from huggingface_hub import login
 
 
14
  from scripts.reformulator import prepare_response
15
  from scripts.run_agents import (
16
  get_single_file_description,
@@ -38,6 +40,7 @@ from smolagents import (
38
  Model,
39
  ToolCallingAgent,
40
  )
 
41
 
42
 
43
  AUTHORIZED_IMPORTS = [
@@ -106,10 +109,10 @@ BROWSER_CONFIG = {
106
 
107
  os.makedirs(f"./{BROWSER_CONFIG['downloads_folder']}", exist_ok=True)
108
 
109
-
110
- model = HfApiModel(
111
- "Qwen/Qwen2.5-32B-Instruct",
112
  custom_role_conversions=custom_role_conversions,
 
113
  )
114
 
115
  text_limit = 20000
@@ -176,17 +179,46 @@ document_inspection_tool = TextInspectorTool(model, 20000)
176
 
177
  # final_result = agent.run(augmented_question)
178
 
179
- import gradio as gr
180
- from smolagents.gradio_ui import stream_to_gradio
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
182
  class GradioUI:
183
  """A one-line interface to launch your agent in Gradio"""
184
 
185
- def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None):
186
- if not _is_package_available("gradio"):
187
- raise ModuleNotFoundError(
188
- "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
189
- )
190
  self.agent = agent
191
  self.file_upload_folder = file_upload_folder
192
  if self.file_upload_folder is not None:
@@ -194,8 +226,6 @@ class GradioUI:
194
  os.mkdir(file_upload_folder)
195
 
196
  def interact_with_agent(self, prompt, messages):
197
- import gradio as gr
198
-
199
  messages.append(gr.ChatMessage(role="user", content=prompt))
200
  yield messages
201
  for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
@@ -216,8 +246,6 @@ class GradioUI:
216
  """
217
  Handle file uploads, default allowed types are .pdf, .docx, and .txt
218
  """
219
- import gradio as gr
220
-
221
  if file is None:
222
  return gr.Textbox("No file uploaded", visible=True), file_uploads_log
223
 
@@ -263,8 +291,6 @@ class GradioUI:
263
  )
264
 
265
  def launch(self, **kwargs):
266
- import gradio as gr
267
-
268
  with gr.Blocks(fill_height=True) as demo:
269
  stored_messages = gr.State([])
270
  file_uploads_log = gr.State([])
 
5
  from concurrent.futures import ThreadPoolExecutor, as_completed
6
  from datetime import datetime
7
  from pathlib import Path
8
+ from typing import List, Optional
9
 
10
  import datasets
11
  import pandas as pd
12
  from dotenv import load_dotenv
13
  from huggingface_hub import login
14
+ import gradio as gr
15
+
16
  from scripts.reformulator import prepare_response
17
  from scripts.run_agents import (
18
  get_single_file_description,
 
40
  Model,
41
  ToolCallingAgent,
42
  )
43
+ from smolagents.gradio_ui import pull_messages_from_step, handle_agent_output_types
44
 
45
 
46
  AUTHORIZED_IMPORTS = [
 
109
 
110
  os.makedirs(f"./{BROWSER_CONFIG['downloads_folder']}", exist_ok=True)
111
 
112
+ model = LiteLLMModel(
113
+ "gpt-4o",
 
114
  custom_role_conversions=custom_role_conversions,
115
+ api_key=os.getenv("OPENAI_API_KEY")
116
  )
117
 
118
  text_limit = 20000
 
179
 
180
  # final_result = agent.run(augmented_question)
181
 
182
+
183
+ def stream_to_gradio(
184
+ agent,
185
+ task: str,
186
+ reset_agent_memory: bool = False,
187
+ additional_args: Optional[dict] = None,
188
+ ):
189
+ """Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
190
+ for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
191
+ for message in pull_messages_from_step(
192
+ step_log,
193
+ ):
194
+ yield message
195
+
196
+ final_answer = step_log # Last log is the run's final_answer
197
+ final_answer = handle_agent_output_types(final_answer)
198
+
199
+ if isinstance(final_answer, AgentText):
200
+ yield gr.ChatMessage(
201
+ role="assistant",
202
+ content=f"**Final answer:**\n{final_answer.to_string()}\n",
203
+ )
204
+ elif isinstance(final_answer, AgentImage):
205
+ yield gr.ChatMessage(
206
+ role="assistant",
207
+ content={"path": final_answer.to_string(), "mime_type": "image/png"},
208
+ )
209
+ elif isinstance(final_answer, AgentAudio):
210
+ yield gr.ChatMessage(
211
+ role="assistant",
212
+ content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
213
+ )
214
+ else:
215
+ yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
216
+
217
 
218
  class GradioUI:
219
  """A one-line interface to launch your agent in Gradio"""
220
 
221
+ def __init__(self, agent, file_upload_folder: str | None = None):
 
 
 
 
222
  self.agent = agent
223
  self.file_upload_folder = file_upload_folder
224
  if self.file_upload_folder is not None:
 
226
  os.mkdir(file_upload_folder)
227
 
228
  def interact_with_agent(self, prompt, messages):
 
 
229
  messages.append(gr.ChatMessage(role="user", content=prompt))
230
  yield messages
231
  for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
 
246
  """
247
  Handle file uploads, default allowed types are .pdf, .docx, and .txt
248
  """
 
 
249
  if file is None:
250
  return gr.Textbox("No file uploaded", visible=True), file_uploads_log
251
 
 
291
  )
292
 
293
  def launch(self, **kwargs):
 
 
294
  with gr.Blocks(fill_height=True) as demo:
295
  stored_messages = gr.State([])
296
  file_uploads_log = gr.State([])