iamTangsang commited on
Commit
8219ce8
1 Parent(s): 3acbfa0

Upload Final Tokenizer

Browse files
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "</s>": 66,
3
+ "<s>": 65
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "__PAD__",
18
+ "lstrip": true,
19
+ "normalized": false,
20
+ "rstrip": true,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "__UNK__",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": true,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "__PAD__",
5
+ "lstrip": true,
6
+ "normalized": false,
7
+ "rstrip": true,
8
+ "single_word": false,
9
+ "special": false
10
+ },
11
+ "1": {
12
+ "content": "__UNK__",
13
+ "lstrip": true,
14
+ "normalized": false,
15
+ "rstrip": true,
16
+ "single_word": false,
17
+ "special": false
18
+ },
19
+ "65": {
20
+ "content": "<s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "66": {
28
+ "content": "</s>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "bos_token": "<s>",
37
+ "clean_up_tokenization_spaces": true,
38
+ "do_lower_case": false,
39
+ "eos_token": "</s>",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "__PAD__",
42
+ "processor_class": "Wav2Vec2Processor",
43
+ "replace_word_delimiter_char": " ",
44
+ "target_lang": null,
45
+ "tokenizer_class": "Wav2Vec2CTCTokenizer",
46
+ "unk_token": "__UNK__",
47
+ "word_delimiter_token": "|"
48
+ }
vocab.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__PAD__": 0,
3
+ "__UNK__": 1,
4
+ "|": 2,
5
+ "ँ": 3,
6
+ "ं": 4,
7
+ "ः": 5,
8
+ "अ": 6,
9
+ "आ": 7,
10
+ "इ": 8,
11
+ "ई": 9,
12
+ "उ": 10,
13
+ "ऊ": 11,
14
+ "ऋ": 12,
15
+ "ए": 13,
16
+ "ऐ": 14,
17
+ "ओ": 15,
18
+ "औ": 16,
19
+ "क": 17,
20
+ "ख": 18,
21
+ "ग": 19,
22
+ "घ": 20,
23
+ "ङ": 21,
24
+ "च": 22,
25
+ "छ": 23,
26
+ "ज": 24,
27
+ "झ": 25,
28
+ "ञ": 26,
29
+ "ट": 27,
30
+ "ठ": 28,
31
+ "ड": 29,
32
+ "ढ": 30,
33
+ "ण": 31,
34
+ "त": 32,
35
+ "थ": 33,
36
+ "द": 34,
37
+ "ध": 35,
38
+ "न": 36,
39
+ "प": 37,
40
+ "फ": 38,
41
+ "ब": 39,
42
+ "भ": 40,
43
+ "म": 41,
44
+ "य": 42,
45
+ "र": 43,
46
+ "ल": 44,
47
+ "व": 45,
48
+ "श": 46,
49
+ "ष": 47,
50
+ "स": 48,
51
+ "ह": 49,
52
+ "़": 50,
53
+ "ा": 51,
54
+ "ि": 52,
55
+ "ी": 53,
56
+ "ु": 54,
57
+ "ू": 55,
58
+ "ृ": 56,
59
+ "े": 57,
60
+ "ै": 58,
61
+ "ॉ": 59,
62
+ "ो": 60,
63
+ "ौ": 61,
64
+ "्": 62,
65
+ "ॐ": 63,
66
+ "ॠ": 64
67
+ }