Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ from haystack.schema import Document
|
|
8 |
|
9 |
from typing import Optional, List
|
10 |
|
11 |
-
from huggingface_hub import get_inference_endpoint
|
12 |
from datasets import load_dataset
|
13 |
from time import perf_counter
|
14 |
import gradio as gr
|
@@ -16,18 +16,27 @@ import numpy as np
|
|
16 |
import requests
|
17 |
import os
|
18 |
|
|
|
|
|
19 |
|
20 |
-
RETRIEVER_URL = os.getenv("RETRIEVER_URL")
|
21 |
-
RANKER_URL = os.getenv("RANKER_URL")
|
22 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
|
|
|
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
-
RETRIEVER_IE
|
26 |
-
|
27 |
-
)
|
28 |
-
|
29 |
-
|
30 |
-
)
|
|
|
31 |
|
32 |
|
33 |
def post(url, payload):
|
@@ -137,10 +146,6 @@ class Ranker(BaseRanker):
|
|
137 |
return [[Document.from_dict(d) for d in docs] for docs in response]
|
138 |
|
139 |
|
140 |
-
TOP_K = 2
|
141 |
-
BATCH_SIZE = 16
|
142 |
-
|
143 |
-
|
144 |
if (
|
145 |
os.path.exists("/data/faiss_document_store.db")
|
146 |
and os.path.exists("/data/faiss_index.json")
|
@@ -152,21 +157,27 @@ if (
|
|
152 |
)
|
153 |
document_store.save(index_path="/data/faiss_index")
|
154 |
else:
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
|
|
|
|
|
|
161 |
|
162 |
document_store = FAISSDocumentStore(
|
163 |
sql_url="sqlite:////data/faiss_document_store.db",
|
164 |
return_embedding=True,
|
165 |
embedding_dim=384,
|
166 |
)
|
167 |
-
|
168 |
-
|
169 |
-
|
|
|
|
|
|
|
170 |
document_store.update_embeddings(retriever=retriever)
|
171 |
document_store.save(index_path="/data/faiss_index")
|
172 |
|
@@ -178,27 +189,9 @@ pipe.add_node(component=ranker, name="Ranker", inputs=["Retriever"])
|
|
178 |
|
179 |
|
180 |
def run(query: str) -> dict:
|
181 |
-
if RETRIEVER_IE.status != "running":
|
182 |
-
RETRIEVER_IE.resume()
|
183 |
-
raise gr.Error(
|
184 |
-
"Retriever Inference Endpoint is not running. "
|
185 |
-
"Sent a request to resume it. Please try again in a few minutes."
|
186 |
-
)
|
187 |
-
|
188 |
-
if RANKER_IE.status != "running":
|
189 |
-
RANKER_IE.resume()
|
190 |
-
raise gr.Error(
|
191 |
-
"Ranker Inference Endpoint is not running. "
|
192 |
-
"Sent a request to resume it. Please try again in a few minutes."
|
193 |
-
)
|
194 |
-
|
195 |
pipe_output = pipe.run(query=query)
|
196 |
|
197 |
-
output = f"""
|
198 |
-
<h2>Query</h2>
|
199 |
-
<p>{query}</p>
|
200 |
-
<h2>Top {TOP_K} Documents</h2>
|
201 |
-
"""
|
202 |
|
203 |
for i, doc in enumerate(pipe_output["documents"]):
|
204 |
output += f"""
|
@@ -221,23 +214,24 @@ examples = [
|
|
221 |
"How did Colossus of Rhodes collapse?",
|
222 |
]
|
223 |
|
224 |
-
|
225 |
input_text = gr.components.Textbox(
|
226 |
-
label="Query",
|
227 |
-
placeholder="Enter a query",
|
228 |
-
value=examples[0],
|
229 |
-
lines=3,
|
230 |
)
|
231 |
-
output_html = gr.components.HTML(label="
|
232 |
|
233 |
gr.Interface(
|
234 |
fn=run,
|
235 |
inputs=input_text,
|
236 |
outputs=output_html,
|
237 |
-
title="End-to-End Retrieval & Ranking",
|
238 |
examples=examples,
|
239 |
-
|
240 |
-
"
|
241 |
-
"
|
242 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
243 |
).launch()
|
|
|
8 |
|
9 |
from typing import Optional, List
|
10 |
|
11 |
+
# from huggingface_hub import get_inference_endpoint
|
12 |
from datasets import load_dataset
|
13 |
from time import perf_counter
|
14 |
import gradio as gr
|
|
|
16 |
import requests
|
17 |
import os
|
18 |
|
19 |
+
TOP_K = 2
|
20 |
+
BATCH_SIZE = 16
|
21 |
|
|
|
|
|
22 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
23 |
+
RANKER_URL = os.getenv("RANKER_URL")
|
24 |
+
RETRIEVER_URL = os.getenv("RETRIEVER_URL")
|
25 |
|
26 |
+
# RETRIEVER_IE = get_inference_endpoint(
|
27 |
+
# "fastrag-retriever", namespace="optimum-intel", token=HF_TOKEN
|
28 |
+
# )
|
29 |
+
# RANKER_IE = get_inference_endpoint(
|
30 |
+
# "fastrag-ranker", namespace="optimum-intel", token=HF_TOKEN
|
31 |
+
# )
|
32 |
|
33 |
+
# if RETRIEVER_IE.status != "running":
|
34 |
+
# RETRIEVER_IE.resume()
|
35 |
+
# RETRIEVER_IE.wait()
|
36 |
+
|
37 |
+
# if RANKER_IE.status != "running":
|
38 |
+
# RANKER_IE.resume()
|
39 |
+
# RANKER_IE.wait()
|
40 |
|
41 |
|
42 |
def post(url, payload):
|
|
|
146 |
return [[Document.from_dict(d) for d in docs] for docs in response]
|
147 |
|
148 |
|
|
|
|
|
|
|
|
|
149 |
if (
|
150 |
os.path.exists("/data/faiss_document_store.db")
|
151 |
and os.path.exists("/data/faiss_index.json")
|
|
|
157 |
)
|
158 |
document_store.save(index_path="/data/faiss_index")
|
159 |
else:
|
160 |
+
for file in [
|
161 |
+
"/data/faiss_document_store.db",
|
162 |
+
"/data/faiss_index.json",
|
163 |
+
"/data/faiss_index",
|
164 |
+
]:
|
165 |
+
try:
|
166 |
+
os.remove(file)
|
167 |
+
except FileNotFoundError:
|
168 |
+
pass
|
169 |
|
170 |
document_store = FAISSDocumentStore(
|
171 |
sql_url="sqlite:////data/faiss_document_store.db",
|
172 |
return_embedding=True,
|
173 |
embedding_dim=384,
|
174 |
)
|
175 |
+
document_store.write_documents(
|
176 |
+
load_dataset("bilgeyucel/seven-wonders", split="train")
|
177 |
+
)
|
178 |
+
retriever = Retriever(
|
179 |
+
document_store=document_store, top_k=TOP_K, batch_size=BATCH_SIZE
|
180 |
+
)
|
181 |
document_store.update_embeddings(retriever=retriever)
|
182 |
document_store.save(index_path="/data/faiss_index")
|
183 |
|
|
|
189 |
|
190 |
|
191 |
def run(query: str) -> dict:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
192 |
pipe_output = pipe.run(query=query)
|
193 |
|
194 |
+
output = f"""<h2>Top {TOP_K} Documents</h2>"""
|
|
|
|
|
|
|
|
|
195 |
|
196 |
for i, doc in enumerate(pipe_output["documents"]):
|
197 |
output += f"""
|
|
|
214 |
"How did Colossus of Rhodes collapse?",
|
215 |
]
|
216 |
|
|
|
217 |
input_text = gr.components.Textbox(
|
218 |
+
label="Query", placeholder="Enter a query", value=examples[0], lines=1
|
|
|
|
|
|
|
219 |
)
|
220 |
+
output_html = gr.components.HTML(label="Documents")
|
221 |
|
222 |
gr.Interface(
|
223 |
fn=run,
|
224 |
inputs=input_text,
|
225 |
outputs=output_html,
|
|
|
226 |
examples=examples,
|
227 |
+
cache_examples=False,
|
228 |
+
allow_flagging="never",
|
229 |
+
title="End-to-End Retrieval & Ranking with Hugging Face Inference Endpoints and Spaces",
|
230 |
+
description="""## A [haystack](https://haystack.deepset.ai/) pipeline with the following components
|
231 |
+
- <strong>Retriever</strong>: [Quantized FastRAG Retriever](https://huggingface.co/optimum-intel/fastrag-retriever) deployed on [Inference Endpoints](https://huggingface.co/docs/inference-endpoints/index) + Intel Sapphire Rapids CPU.
|
232 |
+
- <strong>Ranker</strong>: [Quantized FastRAG Retriever](https://huggingface.co/optimum-intel/fastrag-ranker) deployed on [Inference Endpoints](https://huggingface.co/docs/inference-endpoints/index) + Intel Sapphire Rapids CPU.
|
233 |
+
- <strong>Document Store</strong>: A [FAISS document store](https://github.com/facebookresearch/faiss/tree/main) containing the [`seven-wonders` dataset](https://huggingface.co/datasets/bilgeyucel/seven-wonders), created on this Space's [persistent storage](https://huggingface.co/docs/hub/en/spaces-storage).
|
234 |
+
|
235 |
+
This Space is based on the optimizations demonstrated in the blog [CPU Optimized Embeddings with π€ Optimum Intel and fastRAG](https://huggingface.co/blog/intel-fast-embedding)
|
236 |
+
""",
|
237 |
).launch()
|