llama_ui_test / test.py
Martín Bravo
add: model
304c4d9
raw
history blame
416 Bytes
from transformers import AutoModel, AutoTokenizer
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained("martinbravo/llama_finetuned_test")
# Load the model
model = AutoModel.from_pretrained("martinbravo/llama_finetuned_test")
# Test the model
input_text = "What is the capital of France?"
inputs = tokenizer(input_text, return_tensors="pt")
# Perform inference
outputs = model(**inputs)
print(outputs)