Update handler.py
Browse files- handler.py +7 -8
handler.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import torch
|
2 |
from PIL import Image
|
3 |
-
import
|
4 |
from io import BytesIO
|
5 |
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
|
6 |
from qwen_vl_utils import process_vision_info
|
@@ -17,19 +17,18 @@ class EndpointHandler:
|
|
17 |
|
18 |
def __call__(self, data):
|
19 |
# Extract image and text from the input data
|
20 |
-
|
21 |
text_prompt = data.get("inputs", {}).get("text", "")
|
22 |
|
23 |
-
if not
|
24 |
return {"error": "Both 'image' and 'text' must be provided in the input data."}
|
25 |
|
26 |
-
#
|
27 |
try:
|
28 |
-
|
29 |
-
|
30 |
-
image = Image.open(BytesIO(response.content)).convert("RGB")
|
31 |
except Exception as e:
|
32 |
-
return {"error": f"Failed to
|
33 |
|
34 |
# Prepare the input in the format expected by the model
|
35 |
messages = [
|
|
|
1 |
import torch
|
2 |
from PIL import Image
|
3 |
+
import base64
|
4 |
from io import BytesIO
|
5 |
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
|
6 |
from qwen_vl_utils import process_vision_info
|
|
|
17 |
|
18 |
def __call__(self, data):
|
19 |
# Extract image and text from the input data
|
20 |
+
image_data = data.get("inputs", {}).get("image", "")
|
21 |
text_prompt = data.get("inputs", {}).get("text", "")
|
22 |
|
23 |
+
if not image_data or not text_prompt:
|
24 |
return {"error": "Both 'image' and 'text' must be provided in the input data."}
|
25 |
|
26 |
+
# Process the image data
|
27 |
try:
|
28 |
+
image_bytes = base64.b64decode(image_data)
|
29 |
+
image = Image.open(BytesIO(image_bytes)).convert("RGB")
|
|
|
30 |
except Exception as e:
|
31 |
+
return {"error": f"Failed to process image data: {e}"}
|
32 |
|
33 |
# Prepare the input in the format expected by the model
|
34 |
messages = [
|