Spaces:
Running
Running
minor
Browse files
app.py
CHANGED
@@ -1,64 +1,211 @@
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
from
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
"""
|
7 |
-
|
|
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
system_message,
|
14 |
-
max_tokens,
|
15 |
-
temperature,
|
16 |
-
top_p,
|
17 |
-
):
|
18 |
-
messages = [{"role": "system", "content": system_message}]
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
|
26 |
-
|
|
|
|
|
|
|
|
|
27 |
|
28 |
-
|
|
|
|
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
temperature=temperature,
|
35 |
-
top_p=top_p,
|
36 |
-
):
|
37 |
-
token = message.choices[0].delta.content
|
38 |
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
|
|
41 |
|
|
|
|
|
42 |
|
43 |
-
"""
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
maximum=1.0,
|
55 |
-
value=0.95,
|
56 |
-
step=0.05,
|
57 |
-
label="Top-p (nucleus sampling)",
|
58 |
-
),
|
59 |
-
],
|
60 |
)
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
demo.launch()
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
from threading import Thread
|
4 |
+
from typing import List, Optional, Tuple, Dict
|
5 |
import gradio as gr
|
6 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
7 |
+
import spaces
|
8 |
+
from pathlib import Path
|
9 |
+
from huggingface_hub import CommitScheduler
|
10 |
+
import uuid
|
11 |
+
import json
|
12 |
|
13 |
+
# Constants
|
14 |
+
SYSTEM_PROMPT = """You are SmallThinker-3B, a helpful AI assistant. You try to follow instructions as much as possible while being accurate and brief."""
|
15 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
16 |
+
TITLE = "<h1><center>SmallThinker-3B Chat</center></h1>"
|
17 |
+
MODEL_PATH = "PowerInfer/SmallThinker-3B-Preview"
|
18 |
|
19 |
+
# Custom CSS with dark theme
|
20 |
+
CSS = """
|
21 |
+
.duplicate-button {
|
22 |
+
margin: auto !important;
|
23 |
+
color: white !important;
|
24 |
+
background: black !important;
|
25 |
+
border-radius: 100vh !important;
|
26 |
+
}
|
27 |
|
28 |
+
h3 {
|
29 |
+
text-align: center;
|
30 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
+
.chat-container {
|
33 |
+
height: 500px !important;
|
34 |
+
overflow-y: auto !important;
|
35 |
+
flex-direction: column !important;
|
36 |
+
}
|
37 |
|
38 |
+
.messages-container {
|
39 |
+
flex-grow: 1 !important;
|
40 |
+
overflow-y: auto !important;
|
41 |
+
padding-right: 10px !important;
|
42 |
+
}
|
43 |
|
44 |
+
.contain {
|
45 |
+
height: 100% !important;
|
46 |
+
}
|
47 |
|
48 |
+
button {
|
49 |
+
border-radius: 8px !important;
|
50 |
+
}
|
51 |
+
"""
|
|
|
|
|
|
|
|
|
52 |
|
53 |
+
# Load model and tokenizer
|
54 |
+
model = AutoModelForCausalLM.from_pretrained(
|
55 |
+
MODEL_PATH,
|
56 |
+
torch_dtype=torch.bfloat16,
|
57 |
+
).to(device)
|
58 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
|
59 |
|
60 |
+
logs_id = os.getenv("LOGS_ID")
|
61 |
+
logs_token = os.getenv("HF_LOGS_TOKEN")
|
62 |
|
63 |
+
logs_file = Path("logs/") / f"data_{uuid.uuid4()}.json"
|
64 |
+
logs_folder = logs_file.parent
|
65 |
+
|
66 |
+
scheduler = CommitScheduler(
|
67 |
+
repo_id=logs_id,
|
68 |
+
repo_type="dataset",
|
69 |
+
folder_path=logs_folder,
|
70 |
+
path_in_repo="data",
|
71 |
+
every=5,
|
72 |
+
token=logs_token,
|
73 |
+
private=True,
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
)
|
75 |
|
76 |
+
@spaces.GPU
|
77 |
+
def stream_chat(
|
78 |
+
message: str,
|
79 |
+
history: list,
|
80 |
+
temperature: float = 0.3,
|
81 |
+
max_new_tokens: int = 1024,
|
82 |
+
top_p: float = 1.0,
|
83 |
+
top_k: int = 20,
|
84 |
+
repetition_penalty: float = 1.2,
|
85 |
+
):
|
86 |
+
# Create new history list with current message
|
87 |
+
new_history = history + [[message, ""]]
|
88 |
+
|
89 |
+
conversation = []
|
90 |
+
# Only include previous messages in the conversation
|
91 |
+
for prompt, answer in history:
|
92 |
+
conversation.extend([
|
93 |
+
{"role": "system", "content": SYSTEM_PROMPT},
|
94 |
+
{"role": "user", "content": prompt},
|
95 |
+
{"role": "assistant", "content": answer},
|
96 |
+
])
|
97 |
+
|
98 |
+
conversation.append({"role": "user", "content": message})
|
99 |
+
|
100 |
+
input_text = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
101 |
+
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
102 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=40.0, skip_prompt=True, skip_special_tokens=True)
|
103 |
+
|
104 |
+
generate_kwargs = dict(
|
105 |
+
input_ids=inputs,
|
106 |
+
max_new_tokens=max_new_tokens,
|
107 |
+
do_sample=False if temperature == 0 else True,
|
108 |
+
top_p=top_p,
|
109 |
+
top_k=top_k,
|
110 |
+
temperature=temperature,
|
111 |
+
repetition_penalty=repetition_penalty,
|
112 |
+
streamer=streamer,
|
113 |
+
pad_token_id=tokenizer.pad_token_id,
|
114 |
+
)
|
115 |
+
|
116 |
+
with torch.no_grad():
|
117 |
+
thread = Thread(target=model.generate, kwargs=generate_kwargs)
|
118 |
+
thread.start()
|
119 |
+
|
120 |
+
buffer = ""
|
121 |
+
for new_text in streamer:
|
122 |
+
buffer += new_text
|
123 |
+
buffer = buffer.replace("\nUser", "")
|
124 |
+
buffer = buffer.replace("\nSystem", "")
|
125 |
+
new_history[-1][1] = buffer
|
126 |
+
yield new_history
|
127 |
+
|
128 |
+
with scheduler.lock:
|
129 |
+
with logs_file.open("a") as f:
|
130 |
+
f.write(json.dumps({"input": input_text.replace(SYSTEM_PROMPT, ""), "output": buffer.replace(SYSTEM_PROMPT, ""), "model": "SmallThinker-3B"}))
|
131 |
+
f.write("\n")
|
132 |
|
133 |
+
def clear_input():
|
134 |
+
return ""
|
135 |
+
|
136 |
+
def add_message(message: str, history: list):
|
137 |
+
if message.strip() != "":
|
138 |
+
history = history + [[message, ""]]
|
139 |
+
return history
|
140 |
+
|
141 |
+
def clear_session() -> Tuple[str, List]:
|
142 |
+
return '', []
|
143 |
+
|
144 |
+
def main():
|
145 |
+
with gr.Blocks(css=CSS, theme="soft") as demo:
|
146 |
+
gr.HTML(TITLE)
|
147 |
+
gr.DuplicateButton(value="Duplicate Space for private use", elem_classes="duplicate-button")
|
148 |
+
|
149 |
+
with gr.Row():
|
150 |
+
with gr.Accordion(label="Chat Interface", open=True):
|
151 |
+
chatbot = gr.Chatbot(
|
152 |
+
label='SmallThinker-3B',
|
153 |
+
height=500,
|
154 |
+
container=True,
|
155 |
+
elem_classes=["chat-container"]
|
156 |
+
)
|
157 |
+
|
158 |
+
with gr.Accordion(label="⚙️ Parameters", open=False):
|
159 |
+
temperature = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.3, label="Temperature")
|
160 |
+
max_new_tokens = gr.Slider(minimum=128, maximum=32768, step=128, value=16384, label="Max new tokens")
|
161 |
+
top_p = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1.0, label="Top-p")
|
162 |
+
top_k = gr.Slider(minimum=1, maximum=100, step=1, value=20, label="Top-k")
|
163 |
+
repetition_penalty = gr.Slider(minimum=1.0, maximum=2.0, step=0.1, value=1.1, label="Repetition penalty")
|
164 |
+
|
165 |
+
textbox = gr.Textbox(lines=1, label='Input')
|
166 |
+
|
167 |
+
with gr.Row():
|
168 |
+
clear_history = gr.Button("🧹 Clear History")
|
169 |
+
submit = gr.Button("🚀 Send")
|
170 |
+
|
171 |
+
# Chain of events for submit button
|
172 |
+
submit_event = submit.click(
|
173 |
+
fn=add_message,
|
174 |
+
inputs=[textbox, chatbot],
|
175 |
+
outputs=chatbot,
|
176 |
+
queue=False
|
177 |
+
).then(
|
178 |
+
fn=clear_input,
|
179 |
+
outputs=textbox,
|
180 |
+
queue=False
|
181 |
+
).then(
|
182 |
+
fn=stream_chat,
|
183 |
+
inputs=[textbox, chatbot, temperature, max_new_tokens, top_p, top_k, repetition_penalty],
|
184 |
+
outputs=chatbot,
|
185 |
+
show_progress=True
|
186 |
+
)
|
187 |
+
|
188 |
+
# Chain of events for enter key
|
189 |
+
enter_event = textbox.submit(
|
190 |
+
fn=add_message,
|
191 |
+
inputs=[textbox, chatbot],
|
192 |
+
outputs=chatbot,
|
193 |
+
queue=False
|
194 |
+
).then(
|
195 |
+
fn=clear_input,
|
196 |
+
outputs=textbox,
|
197 |
+
queue=False
|
198 |
+
).then(
|
199 |
+
fn=stream_chat,
|
200 |
+
inputs=[textbox, chatbot, temperature, max_new_tokens, top_p, top_k, repetition_penalty],
|
201 |
+
outputs=chatbot,
|
202 |
+
show_progress=True
|
203 |
+
)
|
204 |
+
|
205 |
+
clear_history.click(fn=clear_session,
|
206 |
+
outputs=[textbox, chatbot])
|
207 |
+
|
208 |
demo.launch()
|
209 |
+
|
210 |
+
if __name__ == "__main__":
|
211 |
+
main()
|