pradeep6kumar2024 commited on
Commit
2a981df
·
0 Parent(s):

Initial space setup without model weights

Browse files
Files changed (3) hide show
  1. README.md +24 -0
  2. app.py +40 -0
  3. requirements.txt +3 -0
README.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: SmolLM2 Demo
3
+ emoji: 🤖
4
+ colorFrom: blue
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 4.12.0
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ # SmolLM2 Demo
13
+
14
+ This is a demo of the SmolLM2 language model, a small transformer-based model trained on custom text data.
15
+
16
+ ## Features
17
+ - Text generation with adjustable parameters
18
+ - Temperature control for creativity
19
+ - Configurable output length
20
+
21
+ ## Usage
22
+ 1. Enter your prompt text
23
+ 2. Adjust the maximum length and temperature
24
+ 3. Click generate to see the model's output
app.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+
5
+ # Load model and tokenizer from HuggingFace
6
+ model_name = "HuggingFaceTB/SmolLM2-135M"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name)
9
+
10
+ def generate(prompt, max_length=50, temperature=0.7):
11
+ """Generate text from prompt"""
12
+ inputs = tokenizer(prompt, return_tensors="pt")
13
+
14
+ # Generate text
15
+ outputs = model.generate(
16
+ **inputs,
17
+ max_new_tokens=max_length,
18
+ temperature=temperature,
19
+ do_sample=True,
20
+ top_p=0.9,
21
+ repetition_penalty=1.1
22
+ )
23
+
24
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
25
+
26
+ # Create Gradio interface
27
+ demo = gr.Interface(
28
+ fn=generate,
29
+ inputs=[
30
+ gr.Textbox(label="Enter your prompt", value="Once upon a time"),
31
+ gr.Slider(minimum=10, maximum=200, value=50, label="Maximum length"),
32
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature")
33
+ ],
34
+ outputs=gr.Textbox(label="Generated Text"),
35
+ title="SmolLM2 Text Generation",
36
+ description="A small language model based on SmolLM2-135M architecture."
37
+ )
38
+
39
+ if __name__ == "__main__":
40
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ transformers
3
+ gradio