Torch import was missing on line 120
#5
by
Speedsy
- opened
README.md
CHANGED
@@ -117,6 +117,7 @@ You may also construct the pipeline from the loaded model and tokenizer yourself
|
|
117 |
|
118 |
```python
|
119 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
120 |
|
121 |
model_name = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3" # either local folder or huggingface model name
|
122 |
# Important: The prompt needs to be in the same format the model was trained with.
|
|
|
117 |
|
118 |
```python
|
119 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
120 |
+
import torch
|
121 |
|
122 |
model_name = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3" # either local folder or huggingface model name
|
123 |
# Important: The prompt needs to be in the same format the model was trained with.
|