muhammedAdnan3 commited on
Commit
870a44e
·
verified ·
1 Parent(s): 6989da3

Create Llama.py

Browse files
Files changed (1) hide show
  1. Llama.py +31 -0
Llama.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyttsx3
2
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
+
4
+ # Load pre-trained model and tokenizer from Hugging Face
5
+ model_name = "decapoda-research/llama-7b-hf" # Placeholder, replace with actual model if available
6
+ model = AutoModelForCausalLM.from_pretrained(model_name)
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+
9
+ # Initialize text-to-speech engine
10
+ tts_engine = pyttsx3.init()
11
+
12
+ def generate_text(prompt):
13
+ # Use the model to generate text based on the prompt
14
+ inputs = tokenizer(prompt, return_tensors="pt")
15
+ outputs = model.generate(inputs["input_ids"], max_length=50)
16
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
17
+ return generated_text
18
+
19
+ def text_to_speech(text):
20
+ # Use the TTS engine to convert text to speech
21
+ tts_engine.say(text)
22
+ tts_engine.runAndWait()
23
+
24
+ def main():
25
+ prompt = "Once upon a time" # Replace with your desired prompt
26
+ generated_text = generate_text(prompt)
27
+ print(f"Generated Text: {generated_text}")
28
+ text_to_speech(generated_text)
29
+
30
+ if __name__ == "__main__":
31
+ main()