import spaces import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer # Load the model and tokenizer locally model_name = "kz919/QwQ-0.5B-Distilled-SFT" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name).to("cuda") # Define the function to handle chat responses @spaces.GPU def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p): # Prepare the prompt by combining history and system messages prompt = system_message + "\n" for user_input, assistant_response in history: prompt += f"User: {user_input}\nAssistant: {assistant_response}\n" prompt += f"User: {message}\nAssistant:" # Tokenize the input prompt inputs = tokenizer(prompt, return_tensors="pt").to("cuda") # Generate a response outputs = model.generate( inputs.input_ids, max_length=max_tokens, temperature=temperature, top_p=top_p, pad_token_id=tokenizer.eos_token_id, ) # Decode the generated tokens and yield the response response = tokenizer.decode(outputs[0], skip_special_tokens=True) yield response.split("Assistant:")[-1].strip() # Create the Gradio interface demo = gr.ChatInterface( respond, additional_inputs=[ gr.Textbox(value="You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.", label="System message"), gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), ], ) # Launch the Gradio app if __name__ == "__main__": demo.launch()