File size: 736 Bytes
e7b7459
17b826e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e7b7459
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

# Set the title of the app
st.title('LLaMA2Glenda')

# Load the model and tokenizer
model = AutoModelForCausalLM.from_pretrained("tminh/llama-2-7b-glenda")
tokenizer = AutoTokenizer.from_pretrained("TinyPixel/Llama-2-7B-bf16-sharded")

# Create a text input for the prompt
prompt = st.text_input('Enter your prompt:')

# Create a button to trigger the inference
if st.button('Generate Answer'):
    # Run text generation pipeline
    pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
    result = pipe(f"<s>[INST] {prompt} [/INST]")
    # Display the result
    st.write(result[0]['generated_text'])