add tokenizer
Browse files- added_tokens.json +1 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 67, "</s>": 68, "[PAD]": 69}
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "[PAD]", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"_": 1, "e": 2, "g": 3, "t": 4, "«": 5, "»": 6, "،": 7, "؛": 8, "؟": 9, "ء": 10, "آ": 11, "أ": 12, "ؤ": 13, "إ": 14, "ئ": 15, "ا": 16, "ب": 17, "ة": 18, "ت": 19, "ث": 20, "ج": 21, "ح": 22, "خ": 23, "د": 24, "ذ": 25, "ر": 26, "ز": 27, "س": 28, "ش": 29, "ص": 30, "ض": 31, "ط": 32, "ظ": 33, "ع": 34, "غ": 35, "ـ": 36, "ف": 37, "ق": 38, "ك": 39, "ل": 40, "م": 41, "ن": 42, "ه": 43, "و": 44, "ى": 45, "ي": 46, "ً": 47, "ٌ": 48, "ٍ": 49, "َ": 50, "ُ": 51, "ِ": 52, "ّ": 53, "ْ": 54, "ٰ": 55, "چ": 56, "ڨ": 57, "ک": 58, "ھ": 59, "ی": 60, "ۖ": 61, "ۚ": 62, "—": 63, "☭": 64, "ﺃ": 65, "ﻻ": 66, "|": 0}
|