raygx commited on
Commit
5730e48
1 Parent(s): ea0b5b0

Upload model

Browse files
Files changed (4) hide show
  1. README.md +2 -2
  2. config.json +5 -5
  3. generation_config.json +4 -4
  4. tf_model.h5 +2 -2
README.md CHANGED
@@ -34,7 +34,7 @@ More information needed
34
  ### Training hyperparameters
35
 
36
  The following hyperparameters were used during training:
37
- - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'WarmUp', 'config': {'initial_learning_rate': 2e-05, 'decay_schedule_fn': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 41670, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, '__passive_serialization__': True}, 'warmup_steps': 1000, 'power': 1.0, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.02}
38
  - training_precision: mixed_bfloat16
39
 
40
  ### Training results
@@ -43,7 +43,7 @@ The following hyperparameters were used during training:
43
 
44
  ### Framework versions
45
 
46
- - Transformers 4.31.0
47
  - TensorFlow 2.12.0
48
  - Datasets 2.14.4
49
  - Tokenizers 0.13.3
 
34
  ### Training hyperparameters
35
 
36
  The following hyperparameters were used during training:
37
+ - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'WarmUp', 'config': {'initial_learning_rate': 2e-05, 'decay_schedule_fn': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 41322, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, '__passive_serialization__': True}, 'warmup_steps': 2000, 'power': 1.0, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.03}
38
  - training_precision: mixed_bfloat16
39
 
40
  ### Training results
 
43
 
44
  ### Framework versions
45
 
46
+ - Transformers 4.32.0
47
  - TensorFlow 2.12.0
48
  - Datasets 2.14.4
49
  - Tokenizers 0.13.3
config.json CHANGED
@@ -6,9 +6,9 @@
6
  "GPT2LMHeadModel"
7
  ],
8
  "attn_pdrop": 0.1,
9
- "bos_token_id": 0,
10
  "embd_pdrop": 0.1,
11
- "eos_token_id": 0,
12
  "id2label": {
13
  "0": "LABEL_0"
14
  },
@@ -24,7 +24,7 @@
24
  "n_inner": null,
25
  "n_layer": 6,
26
  "n_positions": 1024,
27
- "pad_token_id": 50002,
28
  "reorder_and_upcast_attn": false,
29
  "resid_pdrop": 0.1,
30
  "scale_attn_by_inverse_layer_idx": false,
@@ -40,7 +40,7 @@
40
  "max_length": 50
41
  }
42
  },
43
- "transformers_version": "4.31.0",
44
  "use_cache": true,
45
- "vocab_size": 50003
46
  }
 
6
  "GPT2LMHeadModel"
7
  ],
8
  "attn_pdrop": 0.1,
9
+ "bos_token_id": 1,
10
  "embd_pdrop": 0.1,
11
+ "eos_token_id": 2,
12
  "id2label": {
13
  "0": "LABEL_0"
14
  },
 
24
  "n_inner": null,
25
  "n_layer": 6,
26
  "n_positions": 1024,
27
+ "pad_token_id": 3,
28
  "reorder_and_upcast_attn": false,
29
  "resid_pdrop": 0.1,
30
  "scale_attn_by_inverse_layer_idx": false,
 
40
  "max_length": 50
41
  }
42
  },
43
+ "transformers_version": "4.32.0",
44
  "use_cache": true,
45
+ "vocab_size": 50000
46
  }
generation_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "_from_model_config": true,
3
- "bos_token_id": 0,
4
- "eos_token_id": 0,
5
- "pad_token_id": 50002,
6
- "transformers_version": "4.31.0"
7
  }
 
1
  {
2
  "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 3,
6
+ "transformers_version": "4.32.0"
7
  }
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45c7beec991e6a29d32863c136116dd37bed53440e955f44d098994a1f362d3d
3
- size 326965192
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:754f87b2937af6b51ff75476368851c0f5869692a890f456bda5221bb42955a2
3
+ size 326955976