Hack90 commited on
Commit
14a772e
·
verified ·
1 Parent(s): fdb75e2

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -61,3 +61,5 @@ DNA-LLM/taxa_tokenizer/custom_tokenizer/tokenizer.json filter=lfs diff=lfs merge
61
  DNA-LLM/taxa_tokenizer/custom_tokenizer/vocab.txt filter=lfs diff=lfs merge=lfs -text
62
  custom_tokenizer/tokenizer.json filter=lfs diff=lfs merge=lfs -text
63
  custom_tokenizer/vocab.txt filter=lfs diff=lfs merge=lfs -text
 
 
 
61
  DNA-LLM/taxa_tokenizer/custom_tokenizer/vocab.txt filter=lfs diff=lfs merge=lfs -text
62
  custom_tokenizer/tokenizer.json filter=lfs diff=lfs merge=lfs -text
63
  custom_tokenizer/vocab.txt filter=lfs diff=lfs merge=lfs -text
64
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
65
+ vocab.txt filter=lfs diff=lfs merge=lfs -text
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33364a21dda031340a010692ccb33a940ed2308725dcd63b909e1f9c05f5409b
3
+ size 56212000
tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {},
3
+ "clean_up_tokenization_spaces": false,
4
+ "model_max_length": 1000000000000000019884624838656,
5
+ "model_type": "wordlevel",
6
+ "special_tokens": {
7
+ "pad_token": "<pad>",
8
+ "unk_token": "<unk>"
9
+ },
10
+ "tokenizer_class": "PreTrainedTokenizerFast",
11
+ "vocab_size": 2231728
12
+ }
vocab.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a11df56d03eb6e98062870c98358b17bdf153061b65e33d62b0d459a052f9c88
3
+ size 17151043