Spaces:
Sleeping
Sleeping
File size: 1,249 Bytes
2a981df |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load model and tokenizer from HuggingFace
model_name = "HuggingFaceTB/SmolLM2-135M"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
def generate(prompt, max_length=50, temperature=0.7):
"""Generate text from prompt"""
inputs = tokenizer(prompt, return_tensors="pt")
# Generate text
outputs = model.generate(
**inputs,
max_new_tokens=max_length,
temperature=temperature,
do_sample=True,
top_p=0.9,
repetition_penalty=1.1
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Create Gradio interface
demo = gr.Interface(
fn=generate,
inputs=[
gr.Textbox(label="Enter your prompt", value="Once upon a time"),
gr.Slider(minimum=10, maximum=200, value=50, label="Maximum length"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature")
],
outputs=gr.Textbox(label="Generated Text"),
title="SmolLM2 Text Generation",
description="A small language model based on SmolLM2-135M architecture."
)
if __name__ == "__main__":
demo.launch() |