cherrytest commited on
Commit
1625ded
1 Parent(s): 4c73aae
Files changed (9) hide show
  1. app.py +249 -0
  2. app_bak.py +236 -0
  3. cutelogo.jpg +0 -0
  4. examples/1.webp +0 -0
  5. examples/2.png +0 -0
  6. examples/3.png +0 -0
  7. examples/4.png +0 -0
  8. logo.png +0 -0
  9. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+ import os
3
+
4
+ import gradio as gr
5
+ import modelscope_studio.components.antd as antd
6
+ import modelscope_studio.components.base as ms
7
+ from PIL import Image
8
+ import secrets
9
+ import tempfile
10
+ from http import HTTPStatus
11
+ from urllib3.exceptions import HTTPError
12
+
13
+ from pathlib import Path
14
+
15
+ os.environ['DASHSCOPE_HTTP_BASE_URL'] = 'https://dashscope.aliyuncs.com/api/v1'
16
+ # os.environ['DASHSCOPE_WEBSOCKET_BASE_URL'] = 'https://poc-dashscope.aliyuncs.com/api-ws/v1/inference'
17
+
18
+ import dashscope
19
+ from dashscope import MultiModalConversation
20
+ API_KEY = os.environ['API_KEY']
21
+ dashscope.api_key = API_KEY
22
+
23
+ is_modelscope_studio = os.getenv('MODELSCOPE_ENVIRONMENT') == 'studio'
24
+
25
+ def get_text(text: str, cn_text: str):
26
+ if is_modelscope_studio:
27
+ return cn_text
28
+ return text
29
+
30
+ def resolve_image(filename):
31
+ return os.path.join(os.path.dirname(__file__), filename)
32
+
33
+ DEMO_LIST = [
34
+ {
35
+ "description": "Evaluate the integral of the functions graphed using the formula for circles: ",
36
+ "image": resolve_image("./examples/1.webp")
37
+ },
38
+ {
39
+ "description": "回答图中问题",
40
+ "image": resolve_image("./examples/2.png")
41
+ },
42
+ {
43
+ "description": "图片中的滤液E是什么化学物质?",
44
+ "image": resolve_image("./examples/3.png")
45
+ },
46
+ {
47
+ "description": "I want to know the volume of this sofa",
48
+ "image": resolve_image("./examples/4.png")
49
+ },
50
+ ]
51
+
52
+ def process_image(image, shouldConvert=False):
53
+ # 获取上传文件的目录
54
+ uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(
55
+ Path(tempfile.gettempdir()) / "gradio")
56
+ os.makedirs(uploaded_file_dir, exist_ok=True)
57
+
58
+ # 创建临时文件路径
59
+ name = f"tmp{secrets.token_hex(20)}.jpg"
60
+ filename = os.path.join(uploaded_file_dir, name)
61
+ # 保存上传的图片
62
+ if shouldConvert:
63
+ new_img = Image.new('RGB',
64
+ size=(image.width, image.height),
65
+ color=(255, 255, 255))
66
+ new_img.paste(image, (0, 0), mask=image)
67
+ image = new_img
68
+ image.save(filename)
69
+
70
+ return filename
71
+
72
+
73
+
74
+ def generate(image, query):
75
+ imageFile = process_image(image)
76
+ content = [
77
+ {'image': f'file://{imageFile}'},
78
+ {'text': query}
79
+ ]
80
+ messages = [
81
+ {'role': 'user', 'content': content},
82
+ ]
83
+ print('messages:', messages)
84
+ responses = MultiModalConversation.call(
85
+ model='qvq-72b-preview', messages=messages, stream=True,
86
+ )
87
+ for response in responses:
88
+ if not response.status_code == HTTPStatus.OK:
89
+ raise HTTPError(f'response.code: {response.code}\nresponse.message: {response.message}')
90
+ response = response.output.choices[0].message.content
91
+ if len(response) > 0 and response[0]['text']:
92
+ print(response[0]['text'])
93
+ yield response[0]['text']
94
+
95
+
96
+ if __name__ == "__main__":
97
+
98
+ def on_clear():
99
+ return {
100
+ input: gr.update(value=None),
101
+ **{
102
+ item: gr.update(value=None)
103
+ for item in input_image
104
+ },
105
+ }
106
+
107
+ with gr.Blocks() as demo:
108
+ with ms.Application() as app:
109
+ with antd.ConfigProvider(
110
+ locale="zh_CN" if is_modelscope_studio else None,
111
+ theme=dict(token=dict(colorPrimary="#a855f7"))):
112
+ with antd.Card(elem_style=dict(marginBottom=12),
113
+ styles=dict(body=dict(padding=4))):
114
+ with antd.Flex(elem_style=dict(width="100%"),
115
+ justify="center",
116
+ align="center",
117
+ gap=14):
118
+ with ms.Div(elem_style=dict(flexShrink=0)):
119
+ antd.Image(
120
+ resolve_image("./cutelogo.jpg"),
121
+ preview=False,
122
+ height=60)
123
+ with ms.Div():
124
+ antd.Typography.Title(
125
+ "QVQ-72B-Preview",
126
+ elem_style=dict(margin=0, fontSize=24),
127
+ level=1)
128
+ with ms.AutoLoading():
129
+ with antd.Row(gutter=[8, 8], align="stretch"):
130
+ with antd.Col(xs=24, md=8):
131
+ with antd.Space(direction="vertical",
132
+ elem_style=dict(width="100%")):
133
+ with antd.Space(direction="vertical",
134
+ elem_style=dict(width="100%"),
135
+ elem_id="input-container"):
136
+ with ms.Fragment():
137
+ input_image = gr.Image(
138
+ type="pil",
139
+ label="Upload",
140
+ sources=["upload"]),
141
+ input = antd.Input.Textarea(
142
+ placeholder=get_text("Ask a question", "输入一个问题"),
143
+ auto_size=dict(maxRows=6, minRows=2),
144
+ allow_clear=True)
145
+
146
+ with antd.Flex(align="center",
147
+ justify="space-between"):
148
+ antd.Typography.Text(
149
+ get_text("Warning: This model only supports single-turn dialogue.", "注:当前模型只支持单轮对话,如需中文回答,提示词加“用中文回答”"), type="warning")
150
+ tour_btn = antd.Button(get_text("Tour", "使用指引"),
151
+ variant="filled",
152
+ color="default")
153
+
154
+ with antd.Row(gutter=8):
155
+ with antd.Col(span=12):
156
+ clear_btn = antd.Button(get_text("Clear", "清除"),
157
+ block=True)
158
+ with antd.Col(span=12):
159
+ submit_btn = antd.Button(
160
+ get_text("Submit", "提交"),
161
+ type="primary",
162
+ block=True,
163
+ elem_id="submit-btn")
164
+
165
+ antd.Divider(get_text("Example", "示例"))
166
+
167
+ with antd.Flex(gap="small", wrap=True):
168
+ for item in DEMO_LIST:
169
+
170
+ def bind_on_example(_item):
171
+ def on_example():
172
+ return gr.update(
173
+ value=_item[
174
+ 'description']
175
+ ), gr.update(
176
+ value=_item['image'])
177
+
178
+ return on_example
179
+
180
+ with antd.Card(
181
+ hoverable=True,
182
+ elem_style=dict(
183
+ width="100%")) as example:
184
+ if "description" in item:
185
+ antd.Typography.Text(
186
+ item["description"])
187
+ if "image" in item:
188
+ antd.Image(item["image"],
189
+ preview=False)
190
+ example.click(
191
+ fn=bind_on_example(item),
192
+ outputs=[input, input_image[0]])
193
+
194
+ with antd.Col(xs=24, md=16):
195
+ with antd.Card(title=get_text("Answer", "答案"),
196
+ elem_style=dict(height="100%"),
197
+ elem_id="output-container"):
198
+ output = gr.Markdown(
199
+ show_copy_button=True,
200
+ latex_delimiters=[{
201
+ "left": '$$',
202
+ "right": '$$',
203
+ "display": True
204
+ }, {
205
+ "left": '$',
206
+ "right": '$',
207
+ "display": False,
208
+ }, {
209
+ "left": '\\(',
210
+ "right": '\\)',
211
+ "display": False,
212
+ }, {
213
+ "left": '\\[',
214
+ "right": '\\]',
215
+ "display": True
216
+ }])
217
+ with antd.Tour(props=dict(open=False)) as tour:
218
+ antd.Tour.Step(
219
+ title=get_text("Step 1", "步骤 1"),
220
+ description=get_text("Upload image and enter text", "传入图片和文本"),
221
+ get_target=
222
+ "() => document.querySelector('#input-container')")
223
+ antd.Tour.Step(
224
+ title=get_text("Step 2","步骤 2"),
225
+ description=get_text("Click submit button", "点击提交按钮"),
226
+ get_target=
227
+ "() => document.querySelector('#submit-btn')")
228
+ antd.Tour.Step(
229
+ title=get_text("Step 3","步骤 3"),
230
+ description=get_text("Wait for result", "等待结果返回"),
231
+ get_target=
232
+ "() => document.querySelector('#output-container')"
233
+ )
234
+
235
+ tour_btn.click(fn=lambda: gr.update(props=dict(open=True)),
236
+ outputs=[tour])
237
+ gr.on([tour.finish, tour.close],
238
+ fn=lambda: gr.update(props=dict(open=False)),
239
+ outputs=[tour])
240
+
241
+ submit_btn.click(
242
+ fn=generate,
243
+ inputs=[*input_image, input],
244
+ outputs=[output])
245
+ clear_btn.click(
246
+ fn=on_clear,
247
+ outputs=[*input_image, input])
248
+
249
+ demo.queue(default_concurrency_limit=50).launch()
app_bak.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ import os
6
+ import numpy as np
7
+ from urllib3.exceptions import HTTPError
8
+ os.environ['DASHSCOPE_HTTP_BASE_URL'] = 'https://poc-dashscope.aliyuncs.com/api/v1'
9
+ os.environ['DASHSCOPE_WEBSOCKET_BASE_URL'] = 'https://poc-dashscope.aliyuncs.com/api-ws/v1/inference'
10
+
11
+ from argparse import ArgumentParser
12
+ from pathlib import Path
13
+
14
+ import copy
15
+ import gradio as gr
16
+ import oss2
17
+ import os
18
+ import re
19
+ import secrets
20
+ import tempfile
21
+ import requests
22
+ from http import HTTPStatus
23
+ from dashscope import MultiModalConversation
24
+ import dashscope
25
+ API_KEY = os.environ['API_KEY']
26
+ dashscope.api_key = API_KEY
27
+
28
+ REVISION = 'v1.0.4'
29
+ BOX_TAG_PATTERN = r"<box>([\s\S]*?)</box>"
30
+ PUNCTUATION = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
31
+
32
+
33
+ def _get_args():
34
+ parser = ArgumentParser()
35
+ parser.add_argument("--revision", type=str, default=REVISION)
36
+ parser.add_argument("--cpu-only", action="store_true", help="Run demo with CPU only")
37
+
38
+ parser.add_argument("--share", action="store_true", default=False,
39
+ help="Create a publicly shareable link for the interface.")
40
+ parser.add_argument("--inbrowser", action="store_true", default=False,
41
+ help="Automatically launch the interface in a new tab on the default browser.")
42
+ parser.add_argument("--server-port", type=int, default=7860,
43
+ help="Demo server port.")
44
+ parser.add_argument("--server-name", type=str, default="127.0.0.1",
45
+ help="Demo server name.")
46
+
47
+ args = parser.parse_args()
48
+ return args
49
+
50
+ def _parse_text(text):
51
+ lines = text.split("\n")
52
+ lines = [line for line in lines if line != ""]
53
+ count = 0
54
+ for i, line in enumerate(lines):
55
+ if "```" in line:
56
+ count += 1
57
+ items = line.split("`")
58
+ if count % 2 == 1:
59
+ lines[i] = f'<pre><code class="language-{items[-1]}">'
60
+ else:
61
+ lines[i] = f"<br></code></pre>"
62
+ else:
63
+ if i > 0:
64
+ if count % 2 == 1:
65
+ line = line.replace("`", r"\`")
66
+ line = line.replace("<", "&lt;")
67
+ line = line.replace(">", "&gt;")
68
+ line = line.replace(" ", "&nbsp;")
69
+ line = line.replace("*", "&ast;")
70
+ line = line.replace("_", "&lowbar;")
71
+ line = line.replace("-", "&#45;")
72
+ line = line.replace(".", "&#46;")
73
+ line = line.replace("!", "&#33;")
74
+ line = line.replace("(", "&#40;")
75
+ line = line.replace(")", "&#41;")
76
+ line = line.replace("$", "&#36;")
77
+ lines[i] = "<br>" + line
78
+ text = "".join(lines)
79
+ return text
80
+
81
+
82
+ def _remove_image_special(text):
83
+ text = text.replace('<ref>', '').replace('</ref>', '')
84
+ return re.sub(r'<box>.*?(</box>|$)', '', text)
85
+
86
+ def _launch_demo(args):
87
+ uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(
88
+ Path(tempfile.gettempdir()) / "gradio"
89
+ )
90
+
91
+ def predict(_chatbot, task_history):
92
+ chat_query = _chatbot[-1][0]
93
+ query = task_history[-1][0]
94
+ if len(chat_query) == 0:
95
+ _chatbot.pop()
96
+ task_history.pop()
97
+ return _chatbot
98
+ print("User: " + _parse_text(query))
99
+ history_cp = copy.deepcopy(task_history)
100
+ full_response = ""
101
+ messages = []
102
+ content = []
103
+ for q, a in history_cp:
104
+ if isinstance(q, (tuple, list)):
105
+ content.append({'image': f'file://{q[0]}'})
106
+ else:
107
+ content.append({'text': q})
108
+ messages.append({'role': 'user', 'content': content})
109
+ messages.append({'role': 'assistant', 'content': [{'text': a}]})
110
+ content = []
111
+ messages.pop()
112
+ responses = MultiModalConversation.call(
113
+ model='pre-qvq-72b-preview-1219', messages=messages, stream=True,
114
+ )
115
+ for response in responses:
116
+ if not response.status_code == HTTPStatus.OK:
117
+ raise HTTPError(f'response.code: {response.code}\nresponse.message: {response.message}')
118
+ response = response.output.choices[0].message.content
119
+ response_text = []
120
+ for ele in response:
121
+ if 'text' in ele:
122
+ response_text.append(ele['text'])
123
+ elif 'box' in ele:
124
+ response_text.append(ele['box'])
125
+ response_text = ''.join(response_text)
126
+ _chatbot[-1] = (_parse_text(chat_query), _remove_image_special(response_text))
127
+ yield _chatbot
128
+
129
+ if len(response) > 1:
130
+ result_image = response[-1]['result_image']
131
+ resp = requests.get(result_image)
132
+ os.makedirs(uploaded_file_dir, exist_ok=True)
133
+ name = f"tmp{secrets.token_hex(20)}.jpg"
134
+ filename = os.path.join(uploaded_file_dir, name)
135
+ with open(filename, 'wb') as f:
136
+ f.write(resp.content)
137
+ response = ''.join(r['box'] if 'box' in r else r['text'] for r in response[:-1])
138
+ _chatbot.append((None, (filename,)))
139
+ else:
140
+ response = response[0]['text']
141
+ _chatbot[-1] = (_parse_text(chat_query), response)
142
+ full_response = _parse_text(response)
143
+
144
+ task_history[-1] = (query, full_response)
145
+ print("Qwen2-VL-Chat: " + _parse_text(full_response))
146
+ yield _chatbot
147
+
148
+
149
+ def regenerate(_chatbot, task_history):
150
+ if not task_history:
151
+ return _chatbot
152
+ item = task_history[-1]
153
+ if item[1] is None:
154
+ return _chatbot
155
+ task_history[-1] = (item[0], None)
156
+ chatbot_item = _chatbot.pop(-1)
157
+ if chatbot_item[0] is None:
158
+ _chatbot[-1] = (_chatbot[-1][0], None)
159
+ else:
160
+ _chatbot.append((chatbot_item[0], None))
161
+ _chatbot_gen = predict(_chatbot, task_history)
162
+ for _chatbot in _chatbot_gen:
163
+ yield _chatbot
164
+
165
+ def add_text(history, task_history, text):
166
+ task_text = text
167
+ history = history if history is not None else []
168
+ task_history = task_history if task_history is not None else []
169
+ history = history + [(_parse_text(text), None)]
170
+ task_history = task_history + [(task_text, None)]
171
+ return history, task_history, ""
172
+
173
+ def add_file(history, task_history, file):
174
+ history = history if history is not None else []
175
+ task_history = task_history if task_history is not None else []
176
+ history = history + [((file.name,), None)]
177
+ task_history = task_history + [((file.name,), None)]
178
+ return history, task_history
179
+
180
+ def reset_user_input():
181
+ return gr.update(value="")
182
+
183
+ def reset_state(task_history):
184
+ task_history.clear()
185
+ return []
186
+
187
+ with gr.Blocks() as demo:
188
+ gr.Markdown("""\
189
+ <p align="center"><img src="https://modelscope.oss-cn-beijing.aliyuncs.com/resource/qwen.png" style="height: 80px"/><p>""")
190
+ gr.Markdown("""<center><font size=8>QVQ-72B-preview</center>""")
191
+ gr.Markdown(
192
+ """\
193
+ <center><font size=3>This WebUI is based on QVQ-72B-preview, developed by Alibaba Cloud.</center>""")
194
+ gr.Markdown("""<center><font size=3>本WebUI基于Qwen2-VL-Max。</center>""")
195
+
196
+ chatbot = gr.Chatbot(label='QVQ-72B-preview', elem_classes="control-height", height=500)
197
+ query = gr.Textbox(label='Input', )
198
+ task_history = gr.State([])
199
+
200
+ with gr.Row():
201
+ addfile_btn = gr.UploadButton("📁 Upload (上传文件)", file_types=["image"])
202
+ submit_btn = gr.Button("🚀 Submit (发送)")
203
+ regen_btn = gr.Button("🤔️ Regenerate (重试)")
204
+
205
+ query.submit(add_text, [chatbot, task_history, query], [chatbot, task_history]).then(
206
+ predict, [chatbot, task_history], [chatbot], show_progress=True
207
+ )
208
+ submit_btn.click(add_text, [chatbot, task_history, query], [chatbot, task_history]).then(
209
+ predict, [chatbot, task_history], [chatbot], show_progress=True
210
+ )
211
+ submit_btn.click(reset_user_input, [], [query])
212
+ regen_btn.click(regenerate, [chatbot, task_history], [chatbot], show_progress=True)
213
+ addfile_btn.upload(add_file, [chatbot, task_history, addfile_btn], [chatbot, task_history], show_progress=True)
214
+
215
+ gr.Markdown("""\
216
+ <font size=2>Note: This demo is governed by the original license of Qwen2-VL. \
217
+ We strongly advise users not to knowingly generate or allow others to knowingly generate harmful content, \
218
+ including hate speech, violence, pornography, deception, etc. \
219
+ (注:本演示受Qwen2-VL的许可协议限制。我们强烈建议,用户不应传播及不应允许他人传播以下内容,\
220
+ 包括但不限于仇恨言论、暴力、色情、欺诈相关的有害信息。)""")
221
+
222
+ demo.queue().launch(
223
+ share=args.share,
224
+ # inbrowser=args.inbrowser,
225
+ # server_port=args.server_port,
226
+ # server_name=args.server_name,
227
+ )
228
+
229
+
230
+ def main():
231
+ args = _get_args()
232
+ _launch_demo(args)
233
+
234
+
235
+ if __name__ == '__main__':
236
+ main()
cutelogo.jpg ADDED
examples/1.webp ADDED
examples/2.png ADDED
examples/3.png ADDED
examples/4.png ADDED
logo.png ADDED
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dashscope
2
+ oss2
3
+ modelscope_studio