File size: 11,686 Bytes
5755412
 
4cf237b
2f957f0
 
13b1681
 
f82c314
 
 
 
 
 
 
13b1681
4fa9540
f82c314
eccd8f6
1ce8e5a
13b1681
2f957f0
 
4df6952
2f957f0
 
adb5084
b5aeb95
2f957f0
f82c314
b26485f
4df7266
c8df7a5
 
 
 
 
 
b26485f
bfe03fe
 
f82c314
 
13b1681
 
4df6952
13b1681
1ce8e5a
13b1681
 
c8df7a5
4df6952
 
b26485f
4df6952
b26485f
c8df7a5
4df6952
b26485f
4df6952
b26485f
 
 
13b1681
 
5755412
 
4df6952
 
13b1681
eccd8f6
 
f82c314
13b1681
 
 
 
 
 
 
1ce8e5a
13b1681
 
 
 
4e66e3d
4fa9540
4df6952
 
4fa9540
 
4e66e3d
4fa9540
f82c314
 
4e66e3d
f82c314
 
 
4e66e3d
f82c314
 
13b1681
c8df7a5
f82c314
c8df7a5
f82c314
 
 
13b1681
f82c314
 
4cf237b
f82c314
 
 
13b1681
f82c314
 
13b1681
 
 
4df6952
13b1681
f82c314
 
 
13b1681
 
1ce8e5a
 
 
 
13b1681
f82c314
13b1681
 
b5aeb95
 
b26485f
 
c8df7a5
b26485f
f82c314
 
13b1681
 
 
 
 
f82c314
4df6952
13b1681
 
4cf237b
 
4df6952
13b1681
 
 
4cf237b
13b1681
f82c314
b446d41
b5aeb95
 
4df7266
b5aeb95
 
 
 
 
 
 
 
 
 
2f957f0
b5aeb95
 
2f957f0
4e66e3d
b26485f
4df7266
b26485f
4e66e3d
 
f82c314
4e66e3d
 
f82c314
 
4e66e3d
f82c314
4e66e3d
eccd8f6
2f957f0
b5aeb95
 
4df7266
b5aeb95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2f957f0
b5aeb95
2f957f0
 
c8df7a5
2f957f0
 
 
 
 
4df6952
 
 
2f957f0
 
 
 
 
 
4df6952
2f957f0
 
 
 
 
 
 
 
 
 
 
 
4df6952
2f957f0
 
 
4df6952
2f957f0
 
 
c8df7a5
2f957f0
 
 
4df6952
2f957f0
 
4df6952
2f957f0
 
 
 
 
 
 
c8df7a5
2f957f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4df6952
b446d41
4df7266
b26485f
4df6952
4e66e3d
2f957f0
4e66e3d
4df7266
4e66e3d
4cf237b
 
2f957f0
 
4df7266
 
f82c314
 
4e66e3d
f82c314
 
eccd8f6
4df7266
b5aeb95
4df7266
 
b5aeb95
 
 
4df7266
b5aeb95
 
2f957f0
 
 
 
4df6952
2f957f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4df6952
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
import gradio as gr
import spaces
import torch
import faiss
import numpy as np

from datasets import load_dataset
from transformers import (
    AutoConfig,
    AutoTokenizer,
    AutoModelForCausalLM,
    DataCollatorForLanguageModeling,
    Trainer,
    TrainingArguments,
    pipeline,
    BitsAndBytesConfig,
)

from peft import LoraConfig, TaskType, get_peft_model, prepare_model_for_kbit_training, PeftModel

from sentence_transformers import SentenceTransformer

# Global variables for pipelines and settings.
TEXT_PIPELINE = None
COMPARISON_PIPELINE = None
NUM_EXAMPLES = 50  

@spaces.GPU(duration=300)
def finetune_small_subset():
    """
    Fine-tunes the custom R1 model on a small subset of the ServiceNow-AI/R1-Distill-SFT dataset.
    Steps:
      1) Loads the model from "wuhp/myr1" (using files from the "myr1" subfolder via trust_remote_code).
      2) Applies 4-bit quantization and prepares for QLoRA training.
      3) Fine-tunes on the dataset (mapping "problem" to prompt and "solution" to target).
      4) Saves the LoRA adapter to "finetuned_myr1".
      5) Reloads the adapter for inference.
    """
    # Specify the configuration ("v0" or "v1") explicitly.
    ds = load_dataset("ServiceNow-AI/R1-Distill-SFT", "v0", split="train")
    ds = ds.select(range(min(NUM_EXAMPLES, len(ds))))

    bnb_config = BitsAndBytesConfig(
        load_in_4bit=True,
        bnb_4bit_compute_dtype=torch.bfloat16,
        bnb_4bit_use_double_quant=True,
        bnb_4bit_quant_type="nf4",
    )

    # Load the custom model configuration from the repository.
    base_config = AutoConfig.from_pretrained(
        "wuhp/myr1",
        subfolder="myr1",
        trust_remote_code=True,
    )
    # (Optionally apply local overrides here if needed.)

    tokenizer = AutoTokenizer.from_pretrained(
        "wuhp/myr1",
        subfolder="myr1",
        trust_remote_code=True
    )

    base_model = AutoModelForCausalLM.from_pretrained(
        "wuhp/myr1",
        subfolder="myr1",
        config=base_config,
        quantization_config=bnb_config,
        device_map="auto",
        trust_remote_code=True
    )

    base_model = prepare_model_for_kbit_training(base_model)

    lora_config = LoraConfig(
        r=16,
        lora_alpha=32,
        lora_dropout=0.05,
        bias="none",
        target_modules=["q_proj", "v_proj"],
        task_type=TaskType.CAUSAL_LM,
    )
    lora_model = get_peft_model(base_model, lora_config)

    def tokenize_fn(ex):
        text = (
            f"Problem: {ex['problem']}\n\n"
            f"Solution: {ex['solution']}"
        )
        return tokenizer(text, truncation=True, max_length=512)

    ds = ds.map(tokenize_fn, batched=False, remove_columns=ds.column_names)
    ds.set_format("torch")

    collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)

    training_args = TrainingArguments(
        output_dir="finetuned_myr1",
        num_train_epochs=1,
        per_device_train_batch_size=1,
        gradient_accumulation_steps=2,
        logging_steps=5,
        save_steps=999999,
        save_total_limit=1,
        fp16=False,
    )

    trainer = Trainer(
        model=lora_model,
        args=training_args,
        train_dataset=ds,
        data_collator=collator,
    )
    trainer.train()

    trainer.model.save_pretrained("finetuned_myr1")
    tokenizer.save_pretrained("finetuned_myr1")

    base_model_2 = AutoModelForCausalLM.from_pretrained(
        "wuhp/myr1",
        subfolder="myr1",
        config=base_config,
        quantization_config=bnb_config,
        device_map="auto",
        trust_remote_code=True
    )
    base_model_2 = prepare_model_for_kbit_training(base_model_2)

    lora_model_2 = PeftModel.from_pretrained(
        base_model_2,
        "finetuned_myr1",
    )

    global TEXT_PIPELINE
    TEXT_PIPELINE = pipeline("text-generation", model=lora_model_2, tokenizer=tokenizer)

    return "Finetuning complete. Model loaded for inference."

def ensure_pipeline():
    """
    Loads the base model (without LoRA) if no fine-tuned model is available.
    """
    global TEXT_PIPELINE
    if TEXT_PIPELINE is None:
        bnb_config = BitsAndBytesConfig(
            load_in_4bit=True,
            bnb_4bit_compute_dtype=torch.bfloat16,
            bnb_4bit_use_double_quant=True,
            bnb_4bit_quant_type="nf4",
        )
        base_config = AutoConfig.from_pretrained("wuhp/myr1", subfolder="myr1", trust_remote_code=True)
        tokenizer = AutoTokenizer.from_pretrained("wuhp/myr1", subfolder="myr1", trust_remote_code=True)
        base_model = AutoModelForCausalLM.from_pretrained(
            "wuhp/myr1",
            subfolder="myr1",
            config=base_config,
            quantization_config=bnb_config,
            device_map="auto",
            trust_remote_code=True
        )
        TEXT_PIPELINE = pipeline("text-generation", model=base_model, tokenizer=tokenizer)
    return TEXT_PIPELINE

def ensure_comparison_pipeline():
    """
    Loads the official R1 model pipeline if not already loaded.
    """
    global COMPARISON_PIPELINE
    if COMPARISON_PIPELINE is None:
        config = AutoConfig.from_pretrained("deepseek-ai/DeepSeek-R1-Distill-Llama-8B")
        tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-R1-Distill-Llama-8B")
        model = AutoModelForCausalLM.from_pretrained(
            "deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
            config=config,
            device_map="auto"
        )
        COMPARISON_PIPELINE = pipeline("text-generation", model=model, tokenizer=tokenizer)
    return COMPARISON_PIPELINE

@spaces.GPU(duration=120)
def predict(prompt, temperature, top_p, min_new_tokens, max_new_tokens):
    """
    Direct generation without retrieval using the custom R1 model.
    """
    pipe = ensure_pipeline()
    out = pipe(
        prompt,
        temperature=float(temperature),
        top_p=float(top_p),
        min_new_tokens=int(min_new_tokens),
        max_new_tokens=int(max_new_tokens),
        do_sample=True
    )
    return out[0]["generated_text"]

@spaces.GPU(duration=120)
def compare_models(prompt, temperature, top_p, min_new_tokens, max_new_tokens):
    """
    Compare outputs between your custom R1 model and the official R1 model.
    """
    local_pipe = ensure_pipeline()
    comp_pipe = ensure_comparison_pipeline()

    local_out = local_pipe(
        prompt,
        temperature=float(temperature),
        top_p=float(top_p),
        min_new_tokens=int(min_new_tokens),
        max_new_tokens=int(max_new_tokens),
        do_sample=True
    )
    comp_out = comp_pipe(
        prompt,
        temperature=float(temperature),
        top_p=float(top_p),
        min_new_tokens=int(min_new_tokens),
        max_new_tokens=int(max_new_tokens),
        do_sample=True
    )
    return local_out[0]["generated_text"], comp_out[0]["generated_text"]

class ConversationRetriever:
    """
    A FAISS-based retriever using SentenceTransformer for embedding.
    """
    def __init__(self, model_name="sentence-transformers/all-MiniLM-L6-v2", embed_dim=384):
        self.embed_model = SentenceTransformer(model_name)
        self.embed_dim = embed_dim
        self.index = faiss.IndexFlatL2(embed_dim)
        self.texts = []
        self.vectors = []
        self.ids = []
        self.id_counter = 0

    def add_text(self, text):
        if not text.strip():
            return
        emb = self.embed_model.encode([text], convert_to_numpy=True)
        vec = emb[0].astype(np.float32)
        self.index.add(vec.reshape(1, -1))
        self.texts.append(text)
        self.vectors.append(vec)
        self.ids.append(self.id_counter)
        self.id_counter += 1

    def search(self, query, top_k=3):
        q_emb = self.embed_model.encode([query], convert_to_numpy=True).astype(np.float32)
        q_vec = q_emb[0].reshape(1, -1)
        distances, indices = self.index.search(q_vec, top_k)
        results = []
        for dist, idx in zip(distances[0], indices[0]):
            if idx < len(self.texts):
                results.append((self.texts[idx], dist))
        return results

retriever = ConversationRetriever()

def build_rag_prompt(user_query, retrieved_chunks):
    """
    Builds a prompt for retrieval-augmented generation.
    """
    context_str = ""
    for i, (chunk, dist) in enumerate(retrieved_chunks):
        context_str += f"Chunk #{i+1} (similarity ~ {dist:.2f}):\n{chunk}\n\n"
    prompt = (
        f"User's Query:\n{user_query}\n\n"
        f"Relevant Context:\n{context_str}"
        "Assistant:"
    )
    return prompt

@spaces.GPU(duration=120)
def chat_rag(user_input, history, temperature, top_p, min_new_tokens, max_new_tokens):
    """
    Chat with retrieval augmentation.
    """
    pipe = ensure_pipeline()
    retriever.add_text(f"User: {user_input}")
    top_k = 3
    results = retriever.search(user_input, top_k=top_k)
    prompt = build_rag_prompt(user_input, results)
    output = pipe(
        prompt,
        temperature=float(temperature),
        top_p=float(top_p),
        min_new_tokens=int(min_new_tokens),
        max_new_tokens=int(max_new_tokens),
        do_sample=True
    )[0]["generated_text"]

    if output.startswith(prompt):
        assistant_reply = output[len(prompt):].strip()
    else:
        assistant_reply = output.strip()

    retriever.add_text(f"Assistant: {assistant_reply}")
    history.append([user_input, assistant_reply])
    return history, history

# Build the Gradio interface.
with gr.Blocks() as demo:
    gr.Markdown("# QLoRA Fine-tuning & RAG-based Chat Demo using Custom R1 Model")

    finetune_btn = gr.Button("Finetune 4-bit (QLoRA) on ServiceNow-AI/R1-Distill-SFT subset (up to 5 min)")
    status_box = gr.Textbox(label="Finetune Status")
    finetune_btn.click(fn=finetune_small_subset, outputs=status_box)

    gr.Markdown("## Direct Generation (No Retrieval) using Custom R1")
    prompt_in = gr.Textbox(lines=3, label="Prompt")
    temperature = gr.Slider(0.0, 1.5, step=0.1, value=0.7, label="Temperature")
    top_p = gr.Slider(0.0, 1.0, step=0.05, value=0.9, label="Top-p")
    min_tokens = gr.Slider(1, 2500, value=50, step=10, label="Min New Tokens")
    max_tokens = gr.Slider(1, 2500, value=200, step=50, label="Max New Tokens")
    output_box = gr.Textbox(label="Custom R1 Output", lines=8)
    gen_btn = gr.Button("Generate with Custom R1")
    gen_btn.click(
        fn=predict,
        inputs=[prompt_in, temperature, top_p, min_tokens, max_tokens],
        outputs=output_box
    )

    gr.Markdown("## Compare Custom R1 vs Official R1")
    compare_btn = gr.Button("Compare")
    out_custom = gr.Textbox(label="Custom R1 Output", lines=6)
    out_official = gr.Textbox(label="Official R1 Output", lines=6)
    compare_btn.click(
        fn=compare_models,
        inputs=[prompt_in, temperature, top_p, min_tokens, max_tokens],
        outputs=[out_custom, out_official]
    )

    gr.Markdown("## Chat with Retrieval-Augmented Memory")
    with gr.Row():
        with gr.Column():
            chatbot = gr.Chatbot(label="RAG Chat")
            chat_state = gr.State([])
            user_input = gr.Textbox(
                show_label=False,
                placeholder="Ask a question...",
                lines=2
            )
            send_btn = gr.Button("Send")
    user_input.submit(
        fn=chat_rag,
        inputs=[user_input, chat_state, temperature, top_p, min_tokens, max_tokens],
        outputs=[chat_state, chatbot]
    )
    send_btn.click(
        fn=chat_rag,
        inputs=[user_input, chat_state, temperature, top_p, min_tokens, max_tokens],
        outputs=[chat_state, chatbot]
    )

demo.launch()