gxy commited on
Commit
afd09d2
1 Parent(s): 866e84e

FEAT: first commit

Browse files
.gitattributes CHANGED
@@ -30,3 +30,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
30
  *.zip filter=lfs diff=lfs merge=lfs -text
31
  *.zst filter=lfs diff=lfs merge=lfs -text
32
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
30
  *.zip filter=lfs diff=lfs merge=lfs -text
31
  *.zst filter=lfs diff=lfs merge=lfs -text
32
  *tfevents* filter=lfs diff=lfs merge=lfs -text
33
+ pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MegatronBertForMaskedLM"
4
+ ],
5
+ "vocab_size": 21248,
6
+ "hidden_size": 2560,
7
+ "num_hidden_layers": 48,
8
+ "num_attention_heads": 40,
9
+ "hidden_act": "gelu",
10
+ "intermediate_size": 10240,
11
+ "hidden_dropout_prob": 0.1,
12
+ "attention_probs_dropout_prob": 0.1,
13
+ "max_position_embeddings": 512,
14
+ "type_vocab_size": 2,
15
+ "initializer_range": 0.02,
16
+ "layer_norm_eps": 1e-12,
17
+ "gradient_checkpointing": false,
18
+ "position_embedding_type": "absolute",
19
+ "use_cache": false,
20
+ "model_type": "megatron-bert"
21
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a7bbc7c726ebbe1f7a260171f1190f16ba1fff40610b059ddc8b1c9e1244b45
3
+ size 7690876653
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": true,
6
+ "mask_token": "[MASK]",
7
+ "name_or_path": "/cognitive_comp/gaoxinyu/hf_hub/Erlangshen-MegatronBert-3.9B",
8
+ "never_split": null,
9
+ "pad_token": "[PAD]",
10
+ "sep_token": "[SEP]",
11
+ "special_tokens_map_file": null,
12
+ "strip_accents": null,
13
+ "tokenize_chinese_chars": true,
14
+ "tokenizer_class": "BertTokenizer",
15
+ "unk_token": "[UNK]"
16
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff