Sunsimiao-V
Collection
1 item
•
Updated
模型名称 | 模型参数 | 🤗 HuggingFace 下载 | 🤖 ModelScope 下载 | ✡️ WiseModel 下载 |
---|---|---|---|---|
Sunsimiao-V-Phi3 | 4B | thomas-yanxin/Sunsimiao-V-Phi3 | thomas/Sunsimiao-V-Phi3 | thomas/Sunsimiao-V-Phi3 |
from transformers import pipeline
from PIL import Image
import requests
model_id = "thomas-yanxin/Sunsimiao-V-Phi3"
pipe = pipeline("image-to-text", model=model_id, device=0)
image = Image.open('./images/test.png')
prompt = "<|user|>\n<image>\nWhat appears unusual in the image?<|end|>\n<|assistant|>\n"
outputs = pipe(image, prompt=prompt, generate_kwargs={"max_new_tokens": 200})
print(outputs)
>>> What appears unusual in the image? Airspace opacity
import requests
from PIL import Image
import torch
from transformers import AutoProcessor, LlavaForConditionalGeneration
model_id = "xtuner/llava-phi-3-mini-hf"
prompt = "<|user|>\n<image>\nWhat are these?<|end|>\n<|assistant|>\n"
image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
model = LlavaForConditionalGeneration.from_pretrained(
model_id,
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
).to(0)
processor = AutoProcessor.from_pretrained(model_id)
raw_image = Image.open(requests.get(image_file, stream=True).raw)
inputs = processor(prompt, raw_image, return_tensors='pt').to(0, torch.float16)
output = model.generate(**inputs, max_new_tokens=200, do_sample=False)
print(processor.decode(output[0][2:], skip_special_tokens=True))
@misc{2023xtuner,
title={XTuner: A Toolkit for Efficiently Fine-tuning LLM},
author={XTuner Contributors},
howpublished = {\url{https://github.com/InternLM/xtuner}},
year={2023}
}