Text Generation
Transformers
Safetensors
llama
text-generation-inference
Inference Endpoints
danielsteinigen commited on
Commit
9a67d89
·
verified ·
1 Parent(s): e652889

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -115,7 +115,7 @@ tokenizer = AutoTokenizer.from_pretrained(
115
  trust_remote_code=True,
116
  )
117
 
118
- messages = [{"role": "User", "content": "Wer bist du?"}]
119
  prompt_ids = tokenizer.apply_chat_template(messages, chat_template="DE", tokenize=True, add_generation_prompt=True, return_tensors="pt")
120
  prediction = model.generate(
121
  prompt_ids.to(model.device),
 
115
  trust_remote_code=True,
116
  )
117
 
118
+ messages = [{"role": "User", "content": "Hallo"}]
119
  prompt_ids = tokenizer.apply_chat_template(messages, chat_template="DE", tokenize=True, add_generation_prompt=True, return_tensors="pt")
120
  prediction = model.generate(
121
  prompt_ids.to(model.device),