updates
Browse files- app.py +23 -9
- app_agent_config.py +36 -8
- controller.py +62 -18
- model/conversation_chain_singleton.py +18 -31
- utils/tool_loader.py +35 -1
- view/app_chat.py +54 -31
- view/app_sidebar.py +44 -21
app.py
CHANGED
@@ -1,25 +1,39 @@
|
|
1 |
-
|
|
|
2 |
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
from view.app_header import app_header
|
6 |
from view.app_sidebar import app_sidebar
|
7 |
from view.app_chat import app_chat
|
8 |
|
9 |
-
|
10 |
st.set_page_config(
|
11 |
-
page_title="Custom Transformers can
|
12 |
page_icon="👋"
|
13 |
)
|
14 |
|
15 |
-
# Create an instance of Controller with agentConfig ## holds all data, config and settings
|
16 |
controller = Controller()
|
17 |
|
18 |
-
|
19 |
app_sidebar(controller)
|
20 |
|
21 |
-
|
22 |
app_header(controller)
|
23 |
|
24 |
-
|
25 |
-
app_chat(controller)
|
|
|
1 |
+
"""
|
2 |
+
Module: main
|
3 |
|
4 |
+
This module initializes the Streamlit app and sets up the user interface for interacting with the chatbot.
|
5 |
+
|
6 |
+
Dependencies:
|
7 |
+
- streamlit: The Streamlit library for building web applications.
|
8 |
+
- controller: Module providing the Controller class for handling user submissions and managing conversations.
|
9 |
+
- view.app_header: Module providing the app_header function for displaying the header section of the app.
|
10 |
+
- view.app_sidebar: Module providing the app_sidebar function for displaying the sidebar section of the app.
|
11 |
+
- view.app_chat: Module providing the app_chat function for displaying the main chat interface.
|
12 |
|
13 |
+
Usage:
|
14 |
+
- Run the Streamlit app using 'streamlit run main.py' command in the terminal.
|
15 |
+
"""
|
16 |
+
|
17 |
+
import streamlit as st
|
18 |
+
from controller import Controller
|
19 |
from view.app_header import app_header
|
20 |
from view.app_sidebar import app_sidebar
|
21 |
from view.app_chat import app_chat
|
22 |
|
23 |
+
# Streamlit configuration (holds the session and session history as well)
|
24 |
st.set_page_config(
|
25 |
+
page_title="Custom Transformers can really do anything...",
|
26 |
page_icon="👋"
|
27 |
)
|
28 |
|
29 |
+
# Create an instance of Controller with agentConfig ## holds all data, config, and settings
|
30 |
controller = Controller()
|
31 |
|
32 |
+
# Sidebar for context & config
|
33 |
app_sidebar(controller)
|
34 |
|
35 |
+
# App header
|
36 |
app_header(controller)
|
37 |
|
38 |
+
# Main content - the chat interface
|
39 |
+
app_chat(controller)
|
app_agent_config.py
CHANGED
@@ -1,9 +1,38 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
class AgentConfig:
|
|
|
|
|
|
|
6 |
def __init__(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
self.url_endpoint = ""
|
8 |
self.tool_checkboxes = []
|
9 |
self.s_tool_checkboxes = []
|
@@ -13,8 +42,7 @@ class AgentConfig:
|
|
13 |
self.context = ""
|
14 |
self.tool_loader = ToolLoader(tool_names)
|
15 |
self.agent_urls = [
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
1 |
+
"""
|
2 |
+
Module: app_agent_config
|
3 |
+
|
4 |
+
This module defines the AgentConfig class, which holds configuration settings for the agent.
|
5 |
+
|
6 |
+
Dependencies:
|
7 |
+
- utils.tool_loader: Module providing the ToolLoader class for loading tools.
|
8 |
+
- utils.tool_config: Module providing tool_names for configuration.
|
9 |
+
|
10 |
+
Classes:
|
11 |
+
- AgentConfig: A class for managing configuration settings for the agent.
|
12 |
+
"""
|
13 |
+
|
14 |
+
from utils.tool_loader import ToolLoader # Importing ToolLoader class from utils.tool_loader module
|
15 |
+
from utils.tool_config import tool_names # Importing tool_names from utils.tool_config module
|
16 |
|
17 |
class AgentConfig:
|
18 |
+
"""
|
19 |
+
A class for managing configuration settings for the agent.
|
20 |
+
"""
|
21 |
def __init__(self):
|
22 |
+
"""
|
23 |
+
Initializes an instance of the AgentConfig class.
|
24 |
+
|
25 |
+
Attributes:
|
26 |
+
- url_endpoint (str): The URL endpoint for the agent.
|
27 |
+
- tool_checkboxes (list): Checkboxes for available tools.
|
28 |
+
- s_tool_checkboxes (list): Selected checkboxes for tools.
|
29 |
+
- image (list): Image data.
|
30 |
+
- document (str): Document data.
|
31 |
+
- log_enabled (bool): Flag indicating whether logging is enabled.
|
32 |
+
- context (str): Context data.
|
33 |
+
- tool_loader (ToolLoader): Instance of ToolLoader class for loading tools.
|
34 |
+
- agent_urls (list): URLs for different agents.
|
35 |
+
"""
|
36 |
self.url_endpoint = ""
|
37 |
self.tool_checkboxes = []
|
38 |
self.s_tool_checkboxes = []
|
|
|
42 |
self.context = ""
|
43 |
self.tool_loader = ToolLoader(tool_names)
|
44 |
self.agent_urls = [
|
45 |
+
"https://api-inference.huggingface.co/models/bigcode/starcoder",
|
46 |
+
"https://api-inference.huggingface.co/models/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
|
47 |
+
"https://api-inference.huggingface.co/models/gpt2"
|
48 |
+
]
|
|
controller.py
CHANGED
@@ -1,30 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
-
from app_agent_config import AgentConfig
|
3 |
-
from utils.logger import log_response
|
4 |
-
from model.custom_agent import CustomHfAgent
|
5 |
-
from model.conversation_chain_singleton import ConversationChainSingleton
|
6 |
|
7 |
def cut_text_after_keyword(text, keyword):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
index = text.find(keyword)
|
9 |
if index != -1:
|
10 |
return text[:index].strip()
|
11 |
return text
|
12 |
|
13 |
-
|
14 |
class Controller:
|
|
|
|
|
|
|
15 |
def __init__(self):
|
16 |
-
self.agent_config = AgentConfig()
|
17 |
-
#self.app_config = AppConfig()
|
18 |
|
19 |
-
image = []
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
log_response("User input \n {}".format(user_message))
|
22 |
log_response("selected_tools \n {}".format(self.agent_config.s_tool_checkboxes))
|
23 |
log_response("url_endpoint \n {}".format(self.agent_config.url_endpoint))
|
24 |
log_response("document \n {}".format(self.agent_config.document))
|
25 |
log_response("image \n {}".format(self.agent_config.image))
|
26 |
-
log_response("context \n {}".format(self.agent_config.context))
|
|
|
27 |
selected_tools = [self.agent_config.tool_loader.tools[idx] for idx, checkbox in enumerate(self.agent_config.s_tool_checkboxes) if checkbox]
|
|
|
28 |
agent = CustomHfAgent(
|
29 |
url_endpoint=self.agent_config.url_endpoint,
|
30 |
token=os.environ['HF_token'],
|
@@ -32,22 +69,29 @@ class Controller:
|
|
32 |
input_params={"max_new_tokens": 192},
|
33 |
)
|
34 |
|
|
|
|
|
|
|
35 |
|
36 |
-
|
37 |
|
38 |
-
|
|
|
|
|
39 |
|
40 |
-
|
|
|
|
|
41 |
|
42 |
-
|
43 |
-
|
|
|
44 |
agent_chat_bot = ConversationChainSingleton().get_conversation_chain()
|
45 |
-
if
|
46 |
-
text = agent_chat_bot.predict(input=user_message +
|
47 |
else:
|
48 |
text = agent_chat_bot.predict(input=user_message)
|
49 |
|
50 |
result = cut_text_after_keyword(text, "Human:")
|
51 |
print(result)
|
52 |
return result
|
53 |
-
|
|
|
1 |
+
"""
|
2 |
+
Module: controller
|
3 |
+
|
4 |
+
This module provides a Controller class for handling user submissions and managing conversations.
|
5 |
+
|
6 |
+
Dependencies:
|
7 |
+
- app_agent_config: Module providing the AgentConfig class for configuring agents.
|
8 |
+
- utils.logger: Module providing logging functionalities.
|
9 |
+
- model.custom_agent: Module providing the CustomHfAgent class for interacting with Hugging Face models.
|
10 |
+
- model.conversation_chain_singleton: Module providing the ConversationChainSingleton class for managing conversation chains.
|
11 |
+
|
12 |
+
Classes:
|
13 |
+
- Controller: A class for handling user submissions and managing conversations.
|
14 |
+
"""
|
15 |
import os
|
16 |
+
from app_agent_config import AgentConfig # Importing AgentConfig class from app_agent_config module
|
17 |
+
from utils.logger import log_response # Importing log_response function from utils.logger module
|
18 |
+
from model.custom_agent import CustomHfAgent # Importing CustomHfAgent class from model.custom_agent module
|
19 |
+
from model.conversation_chain_singleton import ConversationChainSingleton # Importing ConversationChainSingleton class from model.conversation_chain_singleton module
|
20 |
|
21 |
def cut_text_after_keyword(text, keyword):
|
22 |
+
"""
|
23 |
+
Cuts text after the occurrence of a keyword.
|
24 |
+
|
25 |
+
Args:
|
26 |
+
- text (str): The text to be processed.
|
27 |
+
- keyword (str): The keyword to search for in the text.
|
28 |
+
|
29 |
+
Returns:
|
30 |
+
- str: The processed text.
|
31 |
+
"""
|
32 |
index = text.find(keyword)
|
33 |
if index != -1:
|
34 |
return text[:index].strip()
|
35 |
return text
|
36 |
|
|
|
37 |
class Controller:
|
38 |
+
"""
|
39 |
+
Controller class for handling user submissions and managing conversations.
|
40 |
+
"""
|
41 |
def __init__(self):
|
42 |
+
self.agent_config = AgentConfig() # Initialize AgentConfig instance
|
|
|
43 |
|
44 |
+
image = [] # Class attribute for storing image data
|
45 |
+
|
46 |
+
def handle_submission(self, user_message):
|
47 |
+
"""
|
48 |
+
Handles user submission and interaction with the Hugging Face model.
|
49 |
+
|
50 |
+
Args:
|
51 |
+
- user_message (str): The message submitted by the user.
|
52 |
+
|
53 |
+
Returns:
|
54 |
+
- str: The response from the Hugging Face model.
|
55 |
+
"""
|
56 |
log_response("User input \n {}".format(user_message))
|
57 |
log_response("selected_tools \n {}".format(self.agent_config.s_tool_checkboxes))
|
58 |
log_response("url_endpoint \n {}".format(self.agent_config.url_endpoint))
|
59 |
log_response("document \n {}".format(self.agent_config.document))
|
60 |
log_response("image \n {}".format(self.agent_config.image))
|
61 |
+
log_response("context \n {}".format(self.agent_config.context))
|
62 |
+
|
63 |
selected_tools = [self.agent_config.tool_loader.tools[idx] for idx, checkbox in enumerate(self.agent_config.s_tool_checkboxes) if checkbox]
|
64 |
+
|
65 |
agent = CustomHfAgent(
|
66 |
url_endpoint=self.agent_config.url_endpoint,
|
67 |
token=os.environ['HF_token'],
|
|
|
69 |
input_params={"max_new_tokens": 192},
|
70 |
)
|
71 |
|
72 |
+
agent_response = agent.chat(user_message, document=self.agent_config.document, image=self.agent_config.image, context=self.agent_config.context)
|
73 |
+
|
74 |
+
log_response("Agent Response\n {}".format(agent_response))
|
75 |
|
76 |
+
return agent_response
|
77 |
|
78 |
+
def handle_submission_chat(self, user_message, agent_response):
|
79 |
+
"""
|
80 |
+
Handles user messages and responses in a conversation chain.
|
81 |
|
82 |
+
Args:
|
83 |
+
- user_message (str): The message submitted by the user.
|
84 |
+
- agent_response (str): The response from the agent.
|
85 |
|
86 |
+
Returns:
|
87 |
+
- str: The response from the conversation chain.
|
88 |
+
"""
|
89 |
agent_chat_bot = ConversationChainSingleton().get_conversation_chain()
|
90 |
+
if agent_response is not None:
|
91 |
+
text = agent_chat_bot.predict(input=user_message + agent_response)
|
92 |
else:
|
93 |
text = agent_chat_bot.predict(input=user_message)
|
94 |
|
95 |
result = cut_text_after_keyword(text, "Human:")
|
96 |
print(result)
|
97 |
return result
|
|
model/conversation_chain_singleton.py
CHANGED
@@ -35,43 +35,30 @@ class ConversationChainSingleton:
|
|
35 |
_instance = None
|
36 |
|
37 |
def __new__(cls, *args, **kwargs):
|
38 |
-
"""
|
39 |
-
Create a new instance of the ConversationChainSingleton class if it doesn't exist.
|
40 |
-
|
41 |
-
Returns:
|
42 |
-
- ConversationChainSingleton: The singleton instance.
|
43 |
-
"""
|
44 |
if not cls._instance:
|
45 |
cls._instance = super(ConversationChainSingleton, cls).__new__(cls)
|
46 |
# Initialize your conversation chain here
|
47 |
-
cls._instance.conversation_chain = cls.get_conversation_chain()
|
48 |
return cls._instance
|
49 |
|
50 |
-
|
|
|
51 |
"""
|
52 |
-
|
|
|
|
|
|
|
53 |
|
54 |
Returns:
|
55 |
-
- ConversationChain: The conversation chain
|
56 |
"""
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
llm = HuggingFaceHub(
|
68 |
-
repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
|
69 |
-
model_kwargs={"max_length": 1048, "temperature": 0.2, "max_new_tokens": 256, "top_p": 0.95, "repetition_penalty": 1.0},
|
70 |
-
)
|
71 |
-
# llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
|
72 |
-
|
73 |
-
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
74 |
-
conversation_chain = ConversationChain(
|
75 |
-
llm=llm, verbose=True, memory=memory
|
76 |
-
)
|
77 |
-
return conversation_chain
|
|
|
35 |
_instance = None
|
36 |
|
37 |
def __new__(cls, *args, **kwargs):
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
if not cls._instance:
|
39 |
cls._instance = super(ConversationChainSingleton, cls).__new__(cls)
|
40 |
# Initialize your conversation chain here
|
41 |
+
cls._instance.conversation_chain = cls.get_conversation_chain(cls._instance)
|
42 |
return cls._instance
|
43 |
|
44 |
+
@staticmethod
|
45 |
+
def get_conversation_chain(instance):
|
46 |
"""
|
47 |
+
Create a conversational retrieval chain and a language model.
|
48 |
+
|
49 |
+
Args:
|
50 |
+
- instance: The instance of the ConversationChainSingleton class.
|
51 |
|
52 |
Returns:
|
53 |
+
- ConversationChain: The initialized conversation chain.
|
54 |
"""
|
55 |
+
llm = HuggingFaceHub(
|
56 |
+
repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1",
|
57 |
+
model_kwargs={"max_length": 1048, "temperature": 0.2, "max_new_tokens": 256, "top_p": 0.95, "repetition_penalty": 1.0},
|
58 |
+
)
|
59 |
+
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
60 |
+
conversation_chain = ConversationChain(
|
61 |
+
llm=llm, verbose=True, memory=memory
|
62 |
+
)
|
63 |
+
return conversation_chain
|
64 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/tool_loader.py
CHANGED
@@ -1,4 +1,17 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
import logging
|
4 |
from transformers import load_tool
|
@@ -6,10 +19,31 @@ from utils.logger import log_response # Import the logger
|
|
6 |
from utils.tool_config import tool_names
|
7 |
|
8 |
class ToolLoader:
|
|
|
|
|
|
|
9 |
def __init__(self, tool_names):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
self.tools = self.load_tools(tool_names)
|
11 |
|
12 |
def load_tools(self, tool_names):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
loaded_tools = []
|
14 |
for tool_name in tool_names:
|
15 |
try:
|
|
|
1 |
+
"""
|
2 |
+
Module: tool_loader
|
3 |
+
|
4 |
+
This module defines the ToolLoader class for loading tools.
|
5 |
+
|
6 |
+
Dependencies:
|
7 |
+
- logging: Standard Python logging library for logging messages.
|
8 |
+
- transformers: Library for natural language processing with pre-trained models.
|
9 |
+
- utils.logger: Module providing logging functionalities.
|
10 |
+
- utils.tool_config: Module providing tool_names for configuration.
|
11 |
+
|
12 |
+
Classes:
|
13 |
+
- ToolLoader: A class for loading tools.
|
14 |
+
"""
|
15 |
|
16 |
import logging
|
17 |
from transformers import load_tool
|
|
|
19 |
from utils.tool_config import tool_names
|
20 |
|
21 |
class ToolLoader:
|
22 |
+
"""
|
23 |
+
A class for loading tools.
|
24 |
+
"""
|
25 |
def __init__(self, tool_names):
|
26 |
+
"""
|
27 |
+
Initializes an instance of the ToolLoader class.
|
28 |
+
|
29 |
+
Args:
|
30 |
+
- tool_names (list): A list of tool names to load.
|
31 |
+
|
32 |
+
Returns:
|
33 |
+
- None
|
34 |
+
"""
|
35 |
self.tools = self.load_tools(tool_names)
|
36 |
|
37 |
def load_tools(self, tool_names):
|
38 |
+
"""
|
39 |
+
Loads tools based on the provided tool names.
|
40 |
+
|
41 |
+
Args:
|
42 |
+
- tool_names (list): A list of tool names to load.
|
43 |
+
|
44 |
+
Returns:
|
45 |
+
- list: A list of loaded tools.
|
46 |
+
"""
|
47 |
loaded_tools = []
|
48 |
for tool_name in tool_names:
|
49 |
try:
|
view/app_chat.py
CHANGED
@@ -1,82 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
-
import pandas as pd
|
3 |
-
import matplotlib.figure
|
4 |
-
|
5 |
from PIL import Image
|
6 |
from pydub import AudioSegment
|
|
|
7 |
|
8 |
def app_chat(controller):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
agent_config = controller.agent_config
|
|
|
10 |
# Chat code (user input, agent responses, etc.)
|
11 |
if "messages" not in st.session_state:
|
12 |
st.session_state.messages = []
|
13 |
st.markdown("Hello there! How can I assist you today?")
|
14 |
-
|
15 |
for message in st.session_state.messages:
|
16 |
with st.chat_message(message["role"]):
|
17 |
st.markdown(message["content"])
|
18 |
-
|
19 |
if user_message := st.chat_input("Enter message"):
|
20 |
st.chat_message("user").markdown(user_message)
|
21 |
st.session_state.messages.append({"role": "user", "content": user_message, "avatar": "🤗"})
|
22 |
-
|
23 |
response = ""
|
24 |
-
|
25 |
with st.spinner('Please stand by ...'):
|
26 |
response = controller.handle_submission(user_message)
|
27 |
|
28 |
with st.chat_message("assistant"):
|
29 |
if response is None:
|
30 |
-
|
31 |
-
st.write(
|
32 |
-
# st.warning("The agent's response is None. Please try again. Generate an image of a flying uncormn.")
|
33 |
elif isinstance(response, Image.Image):
|
34 |
agent_config.image = response
|
35 |
-
|
36 |
-
st.write(
|
37 |
st.image(response)
|
38 |
elif isinstance(response, AudioSegment):
|
39 |
agent_config.audio = response
|
40 |
-
|
41 |
-
st.write(
|
42 |
st.audio(response)
|
43 |
elif isinstance(response, int):
|
44 |
-
|
45 |
-
st.write(
|
46 |
st.markdown(response)
|
47 |
elif isinstance(response, str):
|
48 |
if "emojified_text" in response:
|
49 |
-
|
50 |
-
st.write(
|
51 |
st.markdown(f"{response['emojified_text']}")
|
52 |
else:
|
53 |
-
|
54 |
-
st.write(
|
55 |
st.markdown(response)
|
56 |
elif isinstance(response, list):
|
57 |
-
|
58 |
for item in response:
|
59 |
st.markdown(item) # Assuming the list contains strings
|
60 |
-
st.write(
|
61 |
elif isinstance(response, pd.DataFrame):
|
62 |
-
|
63 |
-
st.write(
|
64 |
st.dataframe(response)
|
65 |
-
|
66 |
elif isinstance(response, pd.Series):
|
67 |
-
|
68 |
-
st.write(
|
69 |
st.table(response.iloc[0:10])
|
70 |
elif isinstance(response, dict):
|
71 |
-
|
72 |
-
st.write(
|
73 |
st.json(response)
|
74 |
else:
|
75 |
st.warning("Unrecognized response type. Please try again. e.g. Generate an image of a flying horse.")
|
76 |
|
77 |
-
st.session_state.messages.append({"role": "assistant", "content":
|
78 |
st.session_state.messages.append({"role": "assistant", "content": response, "avatar": "🤖"})
|
79 |
-
|
80 |
""" elif isinstance(response, st.graphics_altair.AltairChart):
|
81 |
chat_respone = controller.handle_submission_chat(user_message, "Agent Tool produced a st.graphics_altair.AltairChart")
|
82 |
st.write(chat_respone)
|
|
|
1 |
+
"""
|
2 |
+
Module: app_chat
|
3 |
+
|
4 |
+
This module defines the app_chat function for managing user interactions in the chat interface.
|
5 |
+
|
6 |
+
Dependencies:
|
7 |
+
- streamlit: The Streamlit library for building web applications.
|
8 |
+
- pandas: Library for data manipulation and analysis.
|
9 |
+
- PIL: Python Imaging Library for image processing.
|
10 |
+
- pydub: Library for audio manipulation.
|
11 |
+
- controller: Module providing the Controller class for handling user submissions and managing conversations.
|
12 |
+
|
13 |
+
Functions:
|
14 |
+
- app_chat: Function for managing user interactions in the chat interface.
|
15 |
+
"""
|
16 |
+
|
17 |
import streamlit as st
|
18 |
+
import pandas as pd
|
|
|
|
|
19 |
from PIL import Image
|
20 |
from pydub import AudioSegment
|
21 |
+
from controller import Controller
|
22 |
|
23 |
def app_chat(controller):
|
24 |
+
"""
|
25 |
+
Function for managing user interactions in the chat interface.
|
26 |
+
|
27 |
+
Args:
|
28 |
+
- controller (Controller): An instance of the Controller class for handling user submissions and managing conversations.
|
29 |
+
|
30 |
+
Returns:
|
31 |
+
- None
|
32 |
+
"""
|
33 |
agent_config = controller.agent_config
|
34 |
+
|
35 |
# Chat code (user input, agent responses, etc.)
|
36 |
if "messages" not in st.session_state:
|
37 |
st.session_state.messages = []
|
38 |
st.markdown("Hello there! How can I assist you today?")
|
39 |
+
|
40 |
for message in st.session_state.messages:
|
41 |
with st.chat_message(message["role"]):
|
42 |
st.markdown(message["content"])
|
43 |
+
|
44 |
if user_message := st.chat_input("Enter message"):
|
45 |
st.chat_message("user").markdown(user_message)
|
46 |
st.session_state.messages.append({"role": "user", "content": user_message, "avatar": "🤗"})
|
47 |
+
|
48 |
response = ""
|
49 |
+
chat_response = ""
|
50 |
with st.spinner('Please stand by ...'):
|
51 |
response = controller.handle_submission(user_message)
|
52 |
|
53 |
with st.chat_message("assistant"):
|
54 |
if response is None:
|
55 |
+
chat_response = controller.handle_submission_chat(user_message, response)
|
56 |
+
st.write(chat_response)
|
|
|
57 |
elif isinstance(response, Image.Image):
|
58 |
agent_config.image = response
|
59 |
+
chat_response = controller.handle_submission_chat(user_message, "No context. Created an image.")
|
60 |
+
st.write(chat_response)
|
61 |
st.image(response)
|
62 |
elif isinstance(response, AudioSegment):
|
63 |
agent_config.audio = response
|
64 |
+
chat_response = controller.handle_submission_chat(user_message, "Agent Tool created audio file.")
|
65 |
+
st.write(chat_response)
|
66 |
st.audio(response)
|
67 |
elif isinstance(response, int):
|
68 |
+
chat_response = controller.handle_submission_chat(user_message, response)
|
69 |
+
st.write(chat_response)
|
70 |
st.markdown(response)
|
71 |
elif isinstance(response, str):
|
72 |
if "emojified_text" in response:
|
73 |
+
chat_response = controller.handle_submission_chat(user_message, "Agent Tool created the text with emojis.")
|
74 |
+
st.write(chat_response)
|
75 |
st.markdown(f"{response['emojified_text']}")
|
76 |
else:
|
77 |
+
chat_response = controller.handle_submission_chat(user_message, response)
|
78 |
+
st.write(chat_response)
|
79 |
st.markdown(response)
|
80 |
elif isinstance(response, list):
|
81 |
+
chat_response = controller.handle_submission_chat(user_message, "Agent Tool produced a list")
|
82 |
for item in response:
|
83 |
st.markdown(item) # Assuming the list contains strings
|
84 |
+
st.write(chat_response)
|
85 |
elif isinstance(response, pd.DataFrame):
|
86 |
+
chat_response = controller.handle_submission_chat(user_message, "Agent Tool produced a pd.DataFrame")
|
87 |
+
st.write(chat_response)
|
88 |
st.dataframe(response)
|
|
|
89 |
elif isinstance(response, pd.Series):
|
90 |
+
chat_response = controller.handle_submission_chat(user_message, "Agent Tool produced a pd.Series")
|
91 |
+
st.write(chat_response)
|
92 |
st.table(response.iloc[0:10])
|
93 |
elif isinstance(response, dict):
|
94 |
+
chat_response = controller.handle_submission_chat(user_message, "Agent Tool produced a dict")
|
95 |
+
st.write(chat_response)
|
96 |
st.json(response)
|
97 |
else:
|
98 |
st.warning("Unrecognized response type. Please try again. e.g. Generate an image of a flying horse.")
|
99 |
|
100 |
+
st.session_state.messages.append({"role": "assistant", "content": chat_response, "avatar": "🦖"})
|
101 |
st.session_state.messages.append({"role": "assistant", "content": response, "avatar": "🤖"})
|
102 |
+
|
103 |
""" elif isinstance(response, st.graphics_altair.AltairChart):
|
104 |
chat_respone = controller.handle_submission_chat(user_message, "Agent Tool produced a st.graphics_altair.AltairChart")
|
105 |
st.write(chat_respone)
|
view/app_sidebar.py
CHANGED
@@ -1,3 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
|
2 |
import streamlit as st
|
3 |
from PIL import Image
|
@@ -5,15 +21,32 @@ import numpy as np
|
|
5 |
import pandas as pd
|
6 |
|
7 |
def app_sidebar(controller):
|
|
|
|
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
with st.sidebar:
|
10 |
st.header("Set Tools and Option. ")
|
11 |
with st.expander("Configure the agent and tools"):
|
12 |
-
|
13 |
with st.expander("Set the Content and Context"):
|
14 |
-
|
15 |
|
16 |
def configure(agent_config):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
st.markdown("Change the agent's configuration here.")
|
18 |
|
19 |
agent_config.url_endpoint = st.selectbox("Select Inference URL", agent_config.agent_urls)
|
@@ -23,6 +56,15 @@ def configure(agent_config):
|
|
23 |
agent_config.s_tool_checkboxes = [st.checkbox(f"{tool.name} --- {tool.description} ") for tool in agent_config.tool_loader.tools]
|
24 |
|
25 |
def content_and_context(agent_config):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
agent_config.context = st.text_area("Context")
|
27 |
|
28 |
agent_config.image = st.camera_input("Take a picture")
|
@@ -31,50 +73,31 @@ def content_and_context(agent_config):
|
|
31 |
|
32 |
if img_file_buffer is not None:
|
33 |
image_raw = Image.open(img_file_buffer)
|
34 |
-
#global image
|
35 |
agent_config.image = np.array(image_raw)
|
36 |
-
########
|
37 |
st.image(agent_config.image)
|
38 |
|
39 |
uploaded_file = st.file_uploader("Choose a pdf", type='pdf')
|
40 |
if uploaded_file is not None:
|
41 |
-
# To read file as bytes:
|
42 |
agent_config.document = uploaded_file.getvalue()
|
43 |
st.write(agent_config.document)
|
44 |
|
45 |
uploaded_txt_file = st.file_uploader("Choose a txt", type='txt')
|
46 |
if uploaded_txt_file is not None:
|
47 |
-
# To read file as bytes:
|
48 |
agent_config.document = uploaded_txt_file.getvalue()
|
49 |
st.write(agent_config.document)
|
50 |
|
51 |
uploaded_csv_file = st.file_uploader("Choose a csv", type='csv')
|
52 |
if uploaded_csv_file is not None:
|
53 |
-
# To read file as bytes:
|
54 |
agent_config.document = uploaded_csv_file.getvalue()
|
55 |
st.write(agent_config.document)
|
56 |
|
57 |
uploaded_csv_file = st.file_uploader("Choose audio", type='wav')
|
58 |
if uploaded_csv_file is not None:
|
59 |
-
# To read file as bytes:
|
60 |
agent_config.document = uploaded_csv_file.getvalue()
|
61 |
st.write(agent_config.document)
|
62 |
|
63 |
uploaded_csv_file = st.file_uploader("Choose video", type='avi')
|
64 |
if uploaded_csv_file is not None:
|
65 |
-
# To read file as bytes:
|
66 |
agent_config.document = uploaded_csv_file.getvalue()
|
67 |
st.write(agent_config.document)
|
68 |
|
69 |
-
# To convert to a string based IO:
|
70 |
-
#stringio = StringIO(uploaded_file.getvalue().decode("utf-8"))
|
71 |
-
#st.write(stringio)
|
72 |
-
|
73 |
-
# To read file as string:
|
74 |
-
#string_data = stringio.read()
|
75 |
-
#st.write(string_data)
|
76 |
-
|
77 |
-
# Can be used wherever a "file-like" object is accepted:
|
78 |
-
dataframe = pd.read_csv(uploaded_file)
|
79 |
-
st.write(dataframe)
|
80 |
-
|
|
|
1 |
+
"""
|
2 |
+
Module: app_sidebar
|
3 |
+
|
4 |
+
This module defines the app_sidebar function for managing the sidebar interface.
|
5 |
+
|
6 |
+
Dependencies:
|
7 |
+
- streamlit: The Streamlit library for building web applications.
|
8 |
+
- PIL: Python Imaging Library for image processing.
|
9 |
+
- numpy: Library for numerical computing.
|
10 |
+
- pandas: Library for data manipulation and analysis.
|
11 |
+
|
12 |
+
Functions:
|
13 |
+
- app_sidebar: Function for managing the sidebar interface.
|
14 |
+
- configure: Function for configuring the agent and tools.
|
15 |
+
- content_and_context: Function for setting the content and context.
|
16 |
+
"""
|
17 |
|
18 |
import streamlit as st
|
19 |
from PIL import Image
|
|
|
21 |
import pandas as pd
|
22 |
|
23 |
def app_sidebar(controller):
|
24 |
+
"""
|
25 |
+
Function for managing the sidebar interface.
|
26 |
|
27 |
+
Args:
|
28 |
+
- controller (Controller): An instance of the Controller class for handling user submissions and managing conversations.
|
29 |
+
|
30 |
+
Returns:
|
31 |
+
- None
|
32 |
+
"""
|
33 |
with st.sidebar:
|
34 |
st.header("Set Tools and Option. ")
|
35 |
with st.expander("Configure the agent and tools"):
|
36 |
+
configure(controller.agent_config)
|
37 |
with st.expander("Set the Content and Context"):
|
38 |
+
content_and_context(controller.agent_config)
|
39 |
|
40 |
def configure(agent_config):
|
41 |
+
"""
|
42 |
+
Function for configuring the agent and tools.
|
43 |
+
|
44 |
+
Args:
|
45 |
+
- agent_config (AgentConfig): An instance of the AgentConfig class for managing configuration settings for the agent.
|
46 |
+
|
47 |
+
Returns:
|
48 |
+
- None
|
49 |
+
"""
|
50 |
st.markdown("Change the agent's configuration here.")
|
51 |
|
52 |
agent_config.url_endpoint = st.selectbox("Select Inference URL", agent_config.agent_urls)
|
|
|
56 |
agent_config.s_tool_checkboxes = [st.checkbox(f"{tool.name} --- {tool.description} ") for tool in agent_config.tool_loader.tools]
|
57 |
|
58 |
def content_and_context(agent_config):
|
59 |
+
"""
|
60 |
+
Function for setting the content and context.
|
61 |
+
|
62 |
+
Args:
|
63 |
+
- agent_config (AgentConfig): An instance of the AgentConfig class for managing configuration settings for the agent.
|
64 |
+
|
65 |
+
Returns:
|
66 |
+
- None
|
67 |
+
"""
|
68 |
agent_config.context = st.text_area("Context")
|
69 |
|
70 |
agent_config.image = st.camera_input("Take a picture")
|
|
|
73 |
|
74 |
if img_file_buffer is not None:
|
75 |
image_raw = Image.open(img_file_buffer)
|
|
|
76 |
agent_config.image = np.array(image_raw)
|
|
|
77 |
st.image(agent_config.image)
|
78 |
|
79 |
uploaded_file = st.file_uploader("Choose a pdf", type='pdf')
|
80 |
if uploaded_file is not None:
|
|
|
81 |
agent_config.document = uploaded_file.getvalue()
|
82 |
st.write(agent_config.document)
|
83 |
|
84 |
uploaded_txt_file = st.file_uploader("Choose a txt", type='txt')
|
85 |
if uploaded_txt_file is not None:
|
|
|
86 |
agent_config.document = uploaded_txt_file.getvalue()
|
87 |
st.write(agent_config.document)
|
88 |
|
89 |
uploaded_csv_file = st.file_uploader("Choose a csv", type='csv')
|
90 |
if uploaded_csv_file is not None:
|
|
|
91 |
agent_config.document = uploaded_csv_file.getvalue()
|
92 |
st.write(agent_config.document)
|
93 |
|
94 |
uploaded_csv_file = st.file_uploader("Choose audio", type='wav')
|
95 |
if uploaded_csv_file is not None:
|
|
|
96 |
agent_config.document = uploaded_csv_file.getvalue()
|
97 |
st.write(agent_config.document)
|
98 |
|
99 |
uploaded_csv_file = st.file_uploader("Choose video", type='avi')
|
100 |
if uploaded_csv_file is not None:
|
|
|
101 |
agent_config.document = uploaded_csv_file.getvalue()
|
102 |
st.write(agent_config.document)
|
103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|