anilbhatt1 commited on
Commit
722f993
1 Parent(s): ac092a1

Initial commit

Browse files
Files changed (2) hide show
  1. app.py +35 -0
  2. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
+
4
+ model_path = "anilbhatt1/phi2-oasst-guanaco-bf16-custom"
5
+ model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
6
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
7
+
8
+ def generate_text(prompt, response_length):
9
+
10
+ prompt = str(prompt)
11
+ max_len = int(response_length)
12
+
13
+ gen = pipeline('text-generation', model=model, tokenizer=tokenizer, max_length=max_len)
14
+ result = gen(prompt)
15
+ output_msg = result[0]['generated_text']
16
+ return output_msg
17
+
18
+ def gradio_fn(prompt, response_length):
19
+ output_txt_msg = generate_text(prompt, response_length)
20
+ return output_txt_msg
21
+
22
+ markdown_description = """
23
+ - This is a Gradio app that answers the query you ask it
24
+ - Uses **microsoft/phi-2 qlora** optimized model finetuned on **timdettmers/openassistant-guanaco** dataset
25
+ """
26
+ demo = gr.Interface(fn=gradio_fn,
27
+ inputs=[gr.Textbox(info="How may I help you ? please enter your prompt here..."),
28
+ gr.Slider(value=50, minimum=50, maximum=200, \
29
+ info="Choose a response length min chars=50, max=200")],
30
+ outputs=gr.Textbox(),
31
+ title="phi2 - Dialog Partner",
32
+ description=markdown_description,
33
+ article=" **Credits** : https://github.com/mshumer/gpt-llm-trainer ")
34
+
35
+ demo.queue().launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gradio==3.50.2
2
+ torch>=2.1.0
3
+ transformers
4
+ tokenizers
5
+ einops