fredaddy commited on
Commit
a4ca6f4
·
verified ·
1 Parent(s): 193dc22

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +7 -8
handler.py CHANGED
@@ -1,6 +1,6 @@
1
  import torch
2
  from PIL import Image
3
- import requests
4
  from io import BytesIO
5
  from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
6
  from qwen_vl_utils import process_vision_info
@@ -17,19 +17,18 @@ class EndpointHandler:
17
 
18
  def __call__(self, data):
19
  # Extract image and text from the input data
20
- image_url = data.get("inputs", {}).get("image", "")
21
  text_prompt = data.get("inputs", {}).get("text", "")
22
 
23
- if not image_url or not text_prompt:
24
  return {"error": "Both 'image' and 'text' must be provided in the input data."}
25
 
26
- # Download and process the image
27
  try:
28
- response = requests.get(image_url)
29
- response.raise_for_status()
30
- image = Image.open(BytesIO(response.content)).convert("RGB")
31
  except Exception as e:
32
- return {"error": f"Failed to load image from URL: {e}"}
33
 
34
  # Prepare the input in the format expected by the model
35
  messages = [
 
1
  import torch
2
  from PIL import Image
3
+ import base64
4
  from io import BytesIO
5
  from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
6
  from qwen_vl_utils import process_vision_info
 
17
 
18
  def __call__(self, data):
19
  # Extract image and text from the input data
20
+ image_data = data.get("inputs", {}).get("image", "")
21
  text_prompt = data.get("inputs", {}).get("text", "")
22
 
23
+ if not image_data or not text_prompt:
24
  return {"error": "Both 'image' and 'text' must be provided in the input data."}
25
 
26
+ # Process the image data
27
  try:
28
+ image_bytes = base64.b64decode(image_data)
29
+ image = Image.open(BytesIO(image_bytes)).convert("RGB")
 
30
  except Exception as e:
31
+ return {"error": f"Failed to process image data: {e}"}
32
 
33
  # Prepare the input in the format expected by the model
34
  messages = [