Spaces:
Running
on
Zero
Running
on
Zero
add app
Browse files- README.md +2 -2
- app.py +83 -0
- requirements.txt +7 -0
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
colorFrom: gray
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
|
|
1 |
---
|
2 |
+
title: CogVLMv1 Captionner
|
3 |
+
emoji: ⚙️
|
4 |
colorFrom: gray
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
app.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# type: ignore
|
2 |
+
from typing import Any
|
3 |
+
import gradio as gr
|
4 |
+
import spaces
|
5 |
+
import torch
|
6 |
+
from PIL import Image
|
7 |
+
from transformers import AutoModelForCausalLM, LlamaTokenizer
|
8 |
+
|
9 |
+
|
10 |
+
DEFAULT_PARAMS = {
|
11 |
+
"do_sample": False,
|
12 |
+
"max_new_tokens": 256,
|
13 |
+
}
|
14 |
+
DEFAULT_QUERY = (
|
15 |
+
"Provide a factual description of this image in up to two paragraphs. "
|
16 |
+
"Include details on objects, background, scenery, interactions, gestures, poses, and any visible text content. "
|
17 |
+
"Specify the number of repeated objects. "
|
18 |
+
"Describe the dominant colors, color contrasts, textures, and materials. "
|
19 |
+
"Mention the composition, including the arrangement of elements and focus points. "
|
20 |
+
"Note the camera angle or perspective, and provide any identifiable contextual information. "
|
21 |
+
"Include details on the style, lighting, and shadows. "
|
22 |
+
"Avoid subjective interpretations or speculation."
|
23 |
+
)
|
24 |
+
|
25 |
+
DTYPE = torch.bfloat16
|
26 |
+
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
27 |
+
|
28 |
+
tokenizer = LlamaTokenizer.from_pretrained(
|
29 |
+
pretrained_model_name_or_path="lmsys/vicuna-7b-v1.5",
|
30 |
+
)
|
31 |
+
model = AutoModelForCausalLM.from_pretrained(
|
32 |
+
pretrained_model_name_or_path="THUDM/cogvlm-chat-hf",
|
33 |
+
torch_dtype=DTYPE,
|
34 |
+
trust_remote_code=True,
|
35 |
+
low_cpu_mem_usage=True,
|
36 |
+
)
|
37 |
+
|
38 |
+
model = model.to(device=DEVICE)
|
39 |
+
|
40 |
+
@spaces.GPU
|
41 |
+
@torch.no_grad()
|
42 |
+
def generate_caption(
|
43 |
+
image: Image.Image,
|
44 |
+
query: str = DEFAULT_QUERY,
|
45 |
+
params: dict[str, Any] = DEFAULT_PARAMS,
|
46 |
+
) -> str:
|
47 |
+
inputs = model.build_conversation_input_ids(
|
48 |
+
tokenizer=tokenizer,
|
49 |
+
query=query,
|
50 |
+
history=[],
|
51 |
+
images=[image],
|
52 |
+
)
|
53 |
+
inputs = {
|
54 |
+
"input_ids": inputs["input_ids"].unsqueeze(0).to(device=DEVICE),
|
55 |
+
"token_type_ids": inputs["token_type_ids"].unsqueeze(0).to(device=DEVICE),
|
56 |
+
"attention_mask": inputs["attention_mask"].unsqueeze(0).to(device=DEVICE),
|
57 |
+
"images": [[inputs["images"][0].to(device=DEVICE, dtype=DTYPE)]],
|
58 |
+
}
|
59 |
+
|
60 |
+
outputs = model.generate(**inputs, **params)
|
61 |
+
outputs = outputs[:, inputs["input_ids"].shape[1] :]
|
62 |
+
result = tokenizer.decode(outputs[0])
|
63 |
+
|
64 |
+
result = result.replace("This image showcases", "").lstrip()
|
65 |
+
return result
|
66 |
+
|
67 |
+
|
68 |
+
with gr.Blocks() as demo:
|
69 |
+
with gr.Row():
|
70 |
+
with gr.Column():
|
71 |
+
input_image = gr.Image(type="pil")
|
72 |
+
input_query = gr.Textbox(lines=5, label="Prompt", value=DEFAULT_QUERY)
|
73 |
+
run_button = gr.Button(value="Generate Caption")
|
74 |
+
with gr.Column():
|
75 |
+
output_caption = gr.Textbox(label="Generated Caption", show_copy_button=True)
|
76 |
+
|
77 |
+
run_button.click(
|
78 |
+
fn=generate_caption,
|
79 |
+
inputs=[input_image, input_query],
|
80 |
+
outputs=output_caption,
|
81 |
+
)
|
82 |
+
|
83 |
+
demo.launch(share=False)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers==4.41.2
|
2 |
+
xformers==0.0.27
|
3 |
+
sentencepiece==0.2.0
|
4 |
+
bitsandbytes==0.43.1
|
5 |
+
einops==0.8.0
|
6 |
+
torchvision==0.18.1
|
7 |
+
accelerate==0.31.0
|