Spaces:
Sleeping
Sleeping
Commit
·
3cf5b2b
1
Parent(s):
1a2c78c
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import streamlit as st
|
3 |
+
from llama_index import VectorStoreIndex, ServiceContext, Document
|
4 |
+
from llama_index.llms import OpenAI
|
5 |
+
import openai
|
6 |
+
from llama_hub.youtube_transcript import YoutubeTranscriptReader
|
7 |
+
|
8 |
+
from llama_index import VectorStoreIndex
|
9 |
+
|
10 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
11 |
+
from llama_index import LangchainEmbedding, ServiceContext
|
12 |
+
from llama_index.llm_predictor import LLMPredictor
|
13 |
+
from langchain.llms import LlamaCpp
|
14 |
+
|
15 |
+
|
16 |
+
## For embedding the video, we will use the Hugging Face Sentence Transformers
|
17 |
+
model_name = "sentence-transformers/all-mpnet-base-v2"
|
18 |
+
hf = HuggingFaceEmbeddings(
|
19 |
+
model_name=model_name
|
20 |
+
)
|
21 |
+
|
22 |
+
### We are using LlamaCPP to load the LLAMA-2-18 8 bit quantised model in GGUF format
|
23 |
+
llm = LlamaCpp(
|
24 |
+
|
25 |
+
model_path="codeup-llama-2-13b-chat-hf.Q8_0.gguf",
|
26 |
+
n_gpu_layers=-1,
|
27 |
+
n_batch=512,
|
28 |
+
temperature=0.1,
|
29 |
+
max_tokens=256,
|
30 |
+
top_p=1,
|
31 |
+
verbose=True,
|
32 |
+
f16_kv=True,
|
33 |
+
n_ctx=4096,
|
34 |
+
use_mlock=True,n_threads=4,
|
35 |
+
stop=["Human:","User:"]
|
36 |
+
|
37 |
+
)
|
38 |
+
|
39 |
+
## Create a service context object, that will allow us to use the Hugging Face embeddings and llama 2 model as our Language model
|
40 |
+
llm_predictor=LLMPredictor(llm=llm)
|
41 |
+
embed_model = LangchainEmbedding(hf)
|
42 |
+
service_context = ServiceContext.from_defaults(embed_model=embed_model,llm_predictor=llm_predictor)
|
43 |
+
index=None
|
44 |
+
|
45 |
+
|
46 |
+
|
47 |
+
'''
|
48 |
+
The load data function , takes in youtube_url and allows us to index the youtube video.
|
49 |
+
'''
|
50 |
+
|
51 |
+
def load_data(youtube_url):
|
52 |
+
print("In Load Data")
|
53 |
+
|
54 |
+
if youtube_url.strip()=="":
|
55 |
+
st.error("Enter A youtube URL")
|
56 |
+
return None
|
57 |
+
else:
|
58 |
+
try:
|
59 |
+
loader = YoutubeTranscriptReader()
|
60 |
+
documents = loader.load_data(ytlinks=[youtube_url])
|
61 |
+
|
62 |
+
|
63 |
+
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
|
64 |
+
return index
|
65 |
+
except:
|
66 |
+
print("Enter a valid youtube URL")
|
67 |
+
st.error("Enter a valid youtube URL")
|
68 |
+
return None
|
69 |
+
|
70 |
+
#### We will have user enter the youtube_url and press submit => which loads the index
|
71 |
+
index=None
|
72 |
+
|
73 |
+
|
74 |
+
chat_engine=None
|
75 |
+
|
76 |
+
### we initiate twp session_state object : clicked and index.
|
77 |
+
### Clicked: This is set to true when the Submit button is clicked.
|
78 |
+
### Index: This stores the vector index. By keeping this session state, we allow the index to be persistent till a new yoputube url is enteres
|
79 |
+
|
80 |
+
if 'clicked' not in st.session_state:
|
81 |
+
st.session_state.clicked = False
|
82 |
+
if 'index' not in st.session_state:
|
83 |
+
st.session_state.index=None
|
84 |
+
|
85 |
+
### click_button-> changes state to Truw when button is clicked
|
86 |
+
def click_button():
|
87 |
+
st.session_state.clicked = True
|
88 |
+
with st.sidebar:
|
89 |
+
st.title("Youtube QA with Llama 2 Bot")
|
90 |
+
|
91 |
+
st.subheader("Upload Documents/URL")
|
92 |
+
youtube_url = st.sidebar.text_input('Enter Youtube URL', '')
|
93 |
+
submit_btn=st.sidebar.button('Submit',on_click=click_button)
|
94 |
+
## When the submit button is clicked, load the data and set the index session_state to the loaded index
|
95 |
+
if st.session_state.clicked:
|
96 |
+
print("Going to Load Data")
|
97 |
+
index=load_data(youtube_url)
|
98 |
+
st.session_state.index=index
|
99 |
+
print("Index ",index)
|
100 |
+
|
101 |
+
st.session_state.clicked=False # set it to false , so that load_data function is not called for every single user message
|
102 |
+
|
103 |
+
|
104 |
+
|
105 |
+
#print("Index",index)
|
106 |
+
|
107 |
+
print("Index State ",st.session_state.index)
|
108 |
+
### If the index has been loaded, create the chat_engine object
|
109 |
+
if st.session_state.index!=None:
|
110 |
+
chat_engine=st.session_state.index.as_chat_engine(verbose=True,chat_mode="context",service_context=service_context)
|
111 |
+
print("CHat engine",chat_engine)
|
112 |
+
if "messages" not in st.session_state.keys():
|
113 |
+
st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
|
114 |
+
|
115 |
+
for message in st.session_state.messages:
|
116 |
+
with st.chat_message(message["role"]):
|
117 |
+
st.write(message["content"])
|
118 |
+
|
119 |
+
def clear_chat_history():
|
120 |
+
st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
|
121 |
+
st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
|
122 |
+
|
123 |
+
if prompt := st.chat_input():
|
124 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
125 |
+
with st.chat_message("user"):
|
126 |
+
st.write(prompt)
|
127 |
+
# Generate a new response if last message is not from assistant
|
128 |
+
if st.session_state.messages[-1]["role"] != "assistant":
|
129 |
+
full_response = ''
|
130 |
+
with st.chat_message("assistant"):
|
131 |
+
with st.spinner("Thinking..."):
|
132 |
+
print("Calling CHat Engine")
|
133 |
+
if chat_engine!=None:
|
134 |
+
response = chat_engine.stream_chat(prompt)
|
135 |
+
placeholder = st.empty()
|
136 |
+
|
137 |
+
for item in response.response_gen:
|
138 |
+
full_response += item
|
139 |
+
placeholder.markdown(full_response.strip("Assistant:"))
|
140 |
+
placeholder.markdown(full_response)
|
141 |
+
if full_response!="":
|
142 |
+
message = {"role": "assistant", "content": full_response}
|
143 |
+
st.session_state.messages.append(message)
|
144 |
+
|