melissasanabria commited on
Commit
587ca32
1 Parent(s): ae84d2c

Upload tokenizer

Browse files
Files changed (3) hide show
  1. special_tokens_map.json +1 -0
  2. tokenizer.json +18 -17
  3. tokenizer_config.json +4 -16
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
tokenizer.json CHANGED
@@ -3,50 +3,50 @@
3
  "truncation": null,
4
  "padding": null,
5
  "added_tokens": [
 
 
 
 
 
 
 
 
 
6
  {
7
  "id": 1,
8
- "special": true,
9
  "content": "[UNK]",
10
  "single_word": false,
11
  "lstrip": false,
12
  "rstrip": false,
13
- "normalized": false
 
14
  },
15
  {
16
  "id": 2,
17
- "special": true,
18
  "content": "[CLS]",
19
  "single_word": false,
20
  "lstrip": false,
21
  "rstrip": false,
22
- "normalized": false
 
23
  },
24
  {
25
  "id": 3,
26
- "special": true,
27
  "content": "[SEP]",
28
  "single_word": false,
29
  "lstrip": false,
30
  "rstrip": false,
31
- "normalized": false
32
- },
33
- {
34
- "id": 0,
35
- "special": true,
36
- "content": "[PAD]",
37
- "single_word": false,
38
- "lstrip": false,
39
- "rstrip": false,
40
- "normalized": false
41
  },
42
  {
43
  "id": 4,
44
- "special": true,
45
  "content": "[MASK]",
46
  "single_word": false,
47
  "lstrip": false,
48
  "rstrip": false,
49
- "normalized": false
 
50
  }
51
  ],
52
  "normalizer": null,
@@ -136,6 +136,7 @@
136
  "continuing_subword_prefix": null,
137
  "end_of_word_suffix": null,
138
  "fuse_unk": false,
 
139
  "vocab": {
140
  "[PAD]": 0,
141
  "[UNK]": 1,
 
3
  "truncation": null,
4
  "padding": null,
5
  "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "[PAD]",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
  {
16
  "id": 1,
 
17
  "content": "[UNK]",
18
  "single_word": false,
19
  "lstrip": false,
20
  "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
  },
24
  {
25
  "id": 2,
 
26
  "content": "[CLS]",
27
  "single_word": false,
28
  "lstrip": false,
29
  "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
  },
33
  {
34
  "id": 3,
 
35
  "content": "[SEP]",
36
  "single_word": false,
37
  "lstrip": false,
38
  "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
 
 
 
 
 
 
 
 
41
  },
42
  {
43
  "id": 4,
 
44
  "content": "[MASK]",
45
  "single_word": false,
46
  "lstrip": false,
47
  "rstrip": false,
48
+ "normalized": false,
49
+ "special": true
50
  }
51
  ],
52
  "normalizer": null,
 
136
  "continuing_subword_prefix": null,
137
  "end_of_word_suffix": null,
138
  "fuse_unk": false,
139
+ "byte_fallback": false,
140
  "vocab": {
141
  "[PAD]": 0,
142
  "[UNK]": 1,
tokenizer_config.json CHANGED
@@ -1,17 +1,5 @@
1
  {
2
- "clean_up_tokenization_spaces": true,
3
- "cls_token": "[CLS]",
4
- "do_basic_tokenize": true,
5
- "do_lower_case": true,
6
- "mask_token": "[MASK]",
7
- "max_len": 512,
8
- "model_max_length": 512,
9
- "never_split": null,
10
- "pad_token": "[PAD]",
11
- "sep_token": "[SEP]",
12
- "strip_accents": null,
13
- "tokenize_chinese_chars": true,
14
- "tokenizer_class": "BertTokenizer",
15
- "unk_token": "[UNK]"
16
- }
17
-
 
1
  {
2
+ "clean_up_tokenization_spaces": true,
3
+ "model_max_length": 1000000000000000019884624838656,
4
+ "tokenizer_class": "PreTrainedTokenizerFast"
5
+ }