psinger commited on
Commit
a11dcab
1 Parent(s): e01e806

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -6
README.md CHANGED
@@ -49,14 +49,14 @@ quantization_config = BitsAndBytesConfig(
49
  model_kwargs["quantization_config"] = quantization_config
50
 
51
  tokenizer = AutoTokenizer.from_pretrained(
52
- "psinger/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
53
  use_fast=False,
54
  padding_side="left",
55
  trust_remote_code=True,
56
  )
57
 
58
  generate_text = pipeline(
59
- model="psinger/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
60
  tokenizer=tokenizer,
61
  torch_dtype=torch.float16,
62
  trust_remote_code=True,
@@ -103,13 +103,13 @@ quantization_config = BitsAndBytesConfig(
103
  )
104
 
105
  tokenizer = AutoTokenizer.from_pretrained(
106
- "psinger/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
107
  use_fast=False,
108
  padding_side="left",
109
  trust_remote_code=True,
110
  )
111
  model = AutoModelForCausalLM.from_pretrained(
112
- "psinger/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
113
  trust_remote_code=True,
114
  torch_dtype=torch.float16,
115
  device_map={"": "cuda:0"},
@@ -148,13 +148,13 @@ quantization_config = BitsAndBytesConfig(
148
  )
149
 
150
  tokenizer = AutoTokenizer.from_pretrained(
151
- "psinger/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
152
  use_fast=False,
153
  padding_side="left",
154
  trust_remote_code=True,
155
  )
156
  model = AutoModelForCausalLM.from_pretrained(
157
- "psinger/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
158
  trust_remote_code=True,
159
  torch_dtype=torch.float16,
160
  device_map={"": "cuda:0"},
 
49
  model_kwargs["quantization_config"] = quantization_config
50
 
51
  tokenizer = AutoTokenizer.from_pretrained(
52
+ "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
53
  use_fast=False,
54
  padding_side="left",
55
  trust_remote_code=True,
56
  )
57
 
58
  generate_text = pipeline(
59
+ model="h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
60
  tokenizer=tokenizer,
61
  torch_dtype=torch.float16,
62
  trust_remote_code=True,
 
103
  )
104
 
105
  tokenizer = AutoTokenizer.from_pretrained(
106
+ "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
107
  use_fast=False,
108
  padding_side="left",
109
  trust_remote_code=True,
110
  )
111
  model = AutoModelForCausalLM.from_pretrained(
112
+ "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
113
  trust_remote_code=True,
114
  torch_dtype=torch.float16,
115
  device_map={"": "cuda:0"},
 
148
  )
149
 
150
  tokenizer = AutoTokenizer.from_pretrained(
151
+ "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
152
  use_fast=False,
153
  padding_side="left",
154
  trust_remote_code=True,
155
  )
156
  model = AutoModelForCausalLM.from_pretrained(
157
+ "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
158
  trust_remote_code=True,
159
  torch_dtype=torch.float16,
160
  device_map={"": "cuda:0"},