macadeliccc commited on
Commit
4913a05
·
verified ·
1 Parent(s): bc73189

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +34 -13
README.md CHANGED
@@ -14,27 +14,48 @@ Merge of two SOLAR models. This is an experiment to improve models ability to le
14
 
15
  ## 🌅 Code Example
16
 
 
 
 
 
17
  ```python
18
- import torch
19
  from transformers import AutoModelForCausalLM, AutoTokenizer
20
 
21
- tokenizer = AutoTokenizer.from_pretrained("macadeliccc/SOLAR-math-2x10.7b",load_in_4bit=True)
22
- model = AutoModelForCausalLM.from_pretrained(
23
- "macadeliccc/SOLAR-math-2x10.7b",
24
- device_map="auto",
25
- torch_dtype=torch.float16,
26
- )
 
 
 
 
 
 
 
 
 
27
 
28
- conversation = [ {'role': 'user', 'content': 'A rectangle has a length that is twice its width and its area is 50 square meters. Find the dimensions of the rectangle.'} ]
 
29
 
30
- prompt = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
31
 
32
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
33
- outputs = model.generate(**inputs, use_cache=True, max_length=4096)
34
- output_text = tokenizer.decode(outputs[0])
35
- print(output_text)
 
 
 
 
 
 
 
36
  ```
37
 
 
38
  ## Evaluations
39
 
40
  TODO
 
14
 
15
  ## 🌅 Code Example
16
 
17
+ ## 🌅 Code Example
18
+
19
+ Example also available in [colab](https://colab.research.google.com/drive/10FWCLODU_EFclVOFOlxNYMmSiLilGMBZ?usp=sharing)
20
+
21
  ```python
 
22
  from transformers import AutoModelForCausalLM, AutoTokenizer
23
 
24
+ def generate_response(prompt):
25
+ """
26
+ Generate a response from the model based on the input prompt.
27
+
28
+ Args:
29
+ prompt (str): Prompt for the model.
30
+
31
+ Returns:
32
+ str: The generated response from the model.
33
+ """
34
+ # Tokenize the input prompt
35
+ inputs = tokenizer(prompt, return_tensors="pt")
36
+
37
+ # Generate output tokens
38
+ outputs = model.generate(**inputs, max_new_tokens=512, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id)
39
 
40
+ # Decode the generated tokens to a string
41
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
42
 
43
+ return response
44
 
45
+
46
+ # Load the model and tokenizer
47
+ model_id = "macadeliccc/SOLAR-math-2x10.7b"
48
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
49
+ model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True)
50
+
51
+ prompt = "Explain the proof of Fermat's Last Theorem and its implications in number theory."
52
+
53
+
54
+ print("Response:")
55
+ print(generate_response(prompt), "\n")
56
  ```
57
 
58
+
59
  ## Evaluations
60
 
61
  TODO