cheesyFishes commited on
Commit
3ae2199
·
1 Parent(s): 094d16a

hardcode CPU only

Browse files
Files changed (1) hide show
  1. app.py +5 -19
app.py CHANGED
@@ -14,11 +14,11 @@ example_indexes = {
14
  "Uber 10k 2021": "./uber_index",
15
  }
16
 
17
- device = "cpu"
18
- if torch.cuda.is_available():
19
- device = "cuda"
20
- elif torch.backends.mps.is_available():
21
- device = "mps"
22
 
23
  image_embed_model = HuggingFaceEmbedding(
24
  model_name="llamaindex/vdr-2b-multi-v1",
@@ -80,9 +80,6 @@ def create_index(file, llama_parse_key, progress=gr.Progress()):
80
  image_docs.append(ImageDocument(text=image_dict["name"], image_path=image_dict["path"]))
81
 
82
  # Create index
83
- # move models back to CPU
84
- index._image_embed_model._model.to(device)
85
- index._embed_model._model.to(device)
86
  progress(0.9, desc="Creating final index...")
87
  index = MultiModalVectorStoreIndex.from_documents(
88
  text_docs + image_docs,
@@ -95,17 +92,10 @@ def create_index(file, llama_parse_key, progress=gr.Progress()):
95
 
96
  except Exception as e:
97
  return None, f"Error creating index: {str(e)}"
98
- finally:
99
- # move models back to CPU
100
- index._image_embed_model._model.to("cpu")
101
- index._embed_model._model.to("cpu")
102
 
103
  def run_search(index, query, text_top_k, image_top_k):
104
  if not index:
105
  return "Please create or select an index first.", [], []
106
- # move models back to CPU
107
- index._image_embed_model._model.to(device)
108
- index._embed_model._model.to(device)
109
  retriever = index.as_retriever(
110
  similarity_top_k=text_top_k,
111
  image_similarity_top_k=image_top_k,
@@ -114,10 +104,6 @@ def run_search(index, query, text_top_k, image_top_k):
114
  image_nodes = retriever.text_to_image_retrieve(query)
115
  text_nodes = retriever.text_retrieve(query)
116
 
117
- # move models back to CPU
118
- index._image_embed_model._model.to("cpu")
119
- index._embed_model._model.to("cpu")
120
-
121
  # Extract text and scores from nodes
122
  text_results = [{"text": node.text, "score": f"{node.score:.3f}"} for node in text_nodes]
123
 
 
14
  "Uber 10k 2021": "./uber_index",
15
  }
16
 
17
+ # device = "cpu"
18
+ # if torch.cuda.is_available():
19
+ # device = "cuda"
20
+ # elif torch.backends.mps.is_available():
21
+ # device = "mps"
22
 
23
  image_embed_model = HuggingFaceEmbedding(
24
  model_name="llamaindex/vdr-2b-multi-v1",
 
80
  image_docs.append(ImageDocument(text=image_dict["name"], image_path=image_dict["path"]))
81
 
82
  # Create index
 
 
 
83
  progress(0.9, desc="Creating final index...")
84
  index = MultiModalVectorStoreIndex.from_documents(
85
  text_docs + image_docs,
 
92
 
93
  except Exception as e:
94
  return None, f"Error creating index: {str(e)}"
 
 
 
 
95
 
96
  def run_search(index, query, text_top_k, image_top_k):
97
  if not index:
98
  return "Please create or select an index first.", [], []
 
 
 
99
  retriever = index.as_retriever(
100
  similarity_top_k=text_top_k,
101
  image_similarity_top_k=image_top_k,
 
104
  image_nodes = retriever.text_to_image_retrieve(query)
105
  text_nodes = retriever.text_retrieve(query)
106
 
 
 
 
 
107
  # Extract text and scores from nodes
108
  text_results = [{"text": node.text, "score": f"{node.score:.3f}"} for node in text_nodes]
109