RaphaelMourad commited on
Commit
7499bab
1 Parent(s): a17ceeb

Upload 10 files

Browse files
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "data/models/Mixtral-8x7B-v0.2-dna",
3
+ "architectures": [
4
+ "MixtralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 768,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 768,
13
+ "max_position_embeddings": 512,
14
+ "model_type": "mixtral",
15
+ "num_attention_heads": 8,
16
+ "num_experts_per_tok": 1,
17
+ "num_hidden_layers": 8,
18
+ "num_key_value_heads": 8,
19
+ "num_local_experts": 8,
20
+ "output_router_logits": false,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_theta": 1000000.0,
23
+ "router_aux_loss_coef": 0.02,
24
+ "router_jitter_noise": 0.0,
25
+ "sliding_window": null,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.42.3",
29
+ "use_cache": true,
30
+ "vocab_size": 69
31
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.42.3"
6
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb71712478ca13d905b781ccc20f01cd790ecfe6009e0988185e6f260721751
3
+ size 264608168
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3976e39bd4bbac0908cb211384118099c4e48af715c726ef4b6d1cbf191bbcd3
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab05a4708e4ff22127c526406c3ddcaf2a88a18fede6043f2ade0d78b3ee2566
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[EOS]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 100,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": {
10
+ "strategy": "BatchLongest",
11
+ "direction": "Right",
12
+ "pad_to_multiple_of": null,
13
+ "pad_id": 64,
14
+ "pad_type_id": 0,
15
+ "pad_token": "[EOS]"
16
+ },
17
+ "added_tokens": [
18
+ {
19
+ "id": 64,
20
+ "content": "[UNK]",
21
+ "single_word": false,
22
+ "lstrip": false,
23
+ "rstrip": false,
24
+ "normalized": false,
25
+ "special": true
26
+ },
27
+ {
28
+ "id": 65,
29
+ "content": "[CLS]",
30
+ "single_word": false,
31
+ "lstrip": false,
32
+ "rstrip": false,
33
+ "normalized": false,
34
+ "special": true
35
+ },
36
+ {
37
+ "id": 66,
38
+ "content": "[SEP]",
39
+ "single_word": false,
40
+ "lstrip": false,
41
+ "rstrip": false,
42
+ "normalized": false,
43
+ "special": true
44
+ },
45
+ {
46
+ "id": 67,
47
+ "content": "[PAD]",
48
+ "single_word": false,
49
+ "lstrip": false,
50
+ "rstrip": false,
51
+ "normalized": false,
52
+ "special": true
53
+ },
54
+ {
55
+ "id": 68,
56
+ "content": "[MASK]",
57
+ "single_word": false,
58
+ "lstrip": false,
59
+ "rstrip": false,
60
+ "normalized": false,
61
+ "special": true
62
+ }
63
+ ],
64
+ "normalizer": null,
65
+ "pre_tokenizer": {
66
+ "type": "Whitespace"
67
+ },
68
+ "post_processor": {
69
+ "type": "TemplateProcessing",
70
+ "single": [
71
+ {
72
+ "SpecialToken": {
73
+ "id": "[CLS]",
74
+ "type_id": 0
75
+ }
76
+ },
77
+ {
78
+ "Sequence": {
79
+ "id": "A",
80
+ "type_id": 0
81
+ }
82
+ },
83
+ {
84
+ "SpecialToken": {
85
+ "id": "[SEP]",
86
+ "type_id": 0
87
+ }
88
+ }
89
+ ],
90
+ "pair": [
91
+ {
92
+ "SpecialToken": {
93
+ "id": "[CLS]",
94
+ "type_id": 0
95
+ }
96
+ },
97
+ {
98
+ "Sequence": {
99
+ "id": "A",
100
+ "type_id": 0
101
+ }
102
+ },
103
+ {
104
+ "SpecialToken": {
105
+ "id": "[SEP]",
106
+ "type_id": 0
107
+ }
108
+ },
109
+ {
110
+ "Sequence": {
111
+ "id": "B",
112
+ "type_id": 1
113
+ }
114
+ },
115
+ {
116
+ "SpecialToken": {
117
+ "id": "[SEP]",
118
+ "type_id": 1
119
+ }
120
+ }
121
+ ],
122
+ "special_tokens": {
123
+ "[CLS]": {
124
+ "id": "[CLS]",
125
+ "ids": [
126
+ 65
127
+ ],
128
+ "tokens": [
129
+ "[CLS]"
130
+ ]
131
+ },
132
+ "[SEP]": {
133
+ "id": "[SEP]",
134
+ "ids": [
135
+ 66
136
+ ],
137
+ "tokens": [
138
+ "[SEP]"
139
+ ]
140
+ }
141
+ }
142
+ },
143
+ "decoder": null,
144
+ "model": {
145
+ "type": "WordLevel",
146
+ "vocab": {
147
+ "AAA": 0,
148
+ "AAT": 1,
149
+ "AAG": 2,
150
+ "AAC": 3,
151
+ "ATA": 4,
152
+ "ATT": 5,
153
+ "ATG": 6,
154
+ "ATC": 7,
155
+ "AGA": 8,
156
+ "AGT": 9,
157
+ "AGG": 10,
158
+ "AGC": 11,
159
+ "ACA": 12,
160
+ "ACT": 13,
161
+ "ACG": 14,
162
+ "ACC": 15,
163
+ "TAA": 16,
164
+ "TAT": 17,
165
+ "TAG": 18,
166
+ "TAC": 19,
167
+ "TTA": 20,
168
+ "TTT": 21,
169
+ "TTG": 22,
170
+ "TTC": 23,
171
+ "TGA": 24,
172
+ "TGT": 25,
173
+ "TGG": 26,
174
+ "TGC": 27,
175
+ "TCA": 28,
176
+ "TCT": 29,
177
+ "TCG": 30,
178
+ "TCC": 31,
179
+ "GAA": 32,
180
+ "GAT": 33,
181
+ "GAG": 34,
182
+ "GAC": 35,
183
+ "GTA": 36,
184
+ "GTT": 37,
185
+ "GTG": 38,
186
+ "GTC": 39,
187
+ "GGA": 40,
188
+ "GGT": 41,
189
+ "GGG": 42,
190
+ "GGC": 43,
191
+ "GCA": 44,
192
+ "GCT": 45,
193
+ "GCG": 46,
194
+ "GCC": 47,
195
+ "CAA": 48,
196
+ "CAT": 49,
197
+ "CAG": 50,
198
+ "CAC": 51,
199
+ "CTA": 52,
200
+ "CTT": 53,
201
+ "CTG": 54,
202
+ "CTC": 55,
203
+ "CGA": 56,
204
+ "CGT": 57,
205
+ "CGG": 58,
206
+ "CGC": 59,
207
+ "CCA": 60,
208
+ "CCT": 61,
209
+ "CCG": 62,
210
+ "CCC": 63,
211
+ "[UNK]": 64,
212
+ "[CLS]": 65,
213
+ "[SEP]": 66,
214
+ "[PAD]": 67,
215
+ "[MASK]": 68
216
+ },
217
+ "unk_token": "[UNK]"
218
+ }
219
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "64": {
4
+ "content": "[UNK]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "65": {
12
+ "content": "[CLS]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "66": {
20
+ "content": "[SEP]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "67": {
28
+ "content": "[PAD]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "68": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "mask_token": "[MASK]",
47
+ "model_max_length": 1000000000000000019884624838656,
48
+ "pad_token": "[EOS]",
49
+ "sep_token": "[SEP]",
50
+ "tokenizer_class": "PreTrainedTokenizerFast",
51
+ "unk_token": "[UNK]"
52
+ }
trainer_state.json ADDED
@@ -0,0 +1,1137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 2.51826810836792,
3
+ "best_model_checkpoint": "./results/models/checkpoint-60605",
4
+ "epoch": 31.0,
5
+ "eval_steps": 500,
6
+ "global_step": 60605,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.2557544757033248,
13
+ "grad_norm": 0.228515625,
14
+ "learning_rate": 0.0009948849104859335,
15
+ "loss": 3.8573,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.5115089514066496,
20
+ "grad_norm": 0.27734375,
21
+ "learning_rate": 0.000989769820971867,
22
+ "loss": 3.7698,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.7672634271099744,
27
+ "grad_norm": 0.26171875,
28
+ "learning_rate": 0.0009846547314578005,
29
+ "loss": 3.7481,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 1.0,
34
+ "eval_loss": 3.7266085147857666,
35
+ "eval_runtime": 1.0357,
36
+ "eval_samples_per_second": 482.766,
37
+ "eval_steps_per_second": 0.966,
38
+ "step": 1955
39
+ },
40
+ {
41
+ "epoch": 1.0230179028132993,
42
+ "grad_norm": 0.376953125,
43
+ "learning_rate": 0.000979539641943734,
44
+ "loss": 3.7319,
45
+ "step": 2000
46
+ },
47
+ {
48
+ "epoch": 1.278772378516624,
49
+ "grad_norm": 1.28125,
50
+ "learning_rate": 0.0009744245524296675,
51
+ "loss": 3.7118,
52
+ "step": 2500
53
+ },
54
+ {
55
+ "epoch": 1.5345268542199488,
56
+ "grad_norm": 0.25,
57
+ "learning_rate": 0.000969309462915601,
58
+ "loss": 3.6851,
59
+ "step": 3000
60
+ },
61
+ {
62
+ "epoch": 1.7902813299232738,
63
+ "grad_norm": 0.279296875,
64
+ "learning_rate": 0.0009641943734015346,
65
+ "loss": 3.628,
66
+ "step": 3500
67
+ },
68
+ {
69
+ "epoch": 2.0,
70
+ "eval_loss": 3.5190136432647705,
71
+ "eval_runtime": 1.0477,
72
+ "eval_samples_per_second": 477.242,
73
+ "eval_steps_per_second": 0.954,
74
+ "step": 3910
75
+ },
76
+ {
77
+ "epoch": 2.0460358056265986,
78
+ "grad_norm": 0.345703125,
79
+ "learning_rate": 0.0009590792838874681,
80
+ "loss": 3.5437,
81
+ "step": 4000
82
+ },
83
+ {
84
+ "epoch": 2.3017902813299234,
85
+ "grad_norm": 0.279296875,
86
+ "learning_rate": 0.0009539641943734016,
87
+ "loss": 3.456,
88
+ "step": 4500
89
+ },
90
+ {
91
+ "epoch": 2.557544757033248,
92
+ "grad_norm": 0.275390625,
93
+ "learning_rate": 0.0009488491048593351,
94
+ "loss": 3.3486,
95
+ "step": 5000
96
+ },
97
+ {
98
+ "epoch": 2.813299232736573,
99
+ "grad_norm": 0.306640625,
100
+ "learning_rate": 0.0009437340153452686,
101
+ "loss": 3.2918,
102
+ "step": 5500
103
+ },
104
+ {
105
+ "epoch": 3.0,
106
+ "eval_loss": 3.189316511154175,
107
+ "eval_runtime": 1.0569,
108
+ "eval_samples_per_second": 473.077,
109
+ "eval_steps_per_second": 0.946,
110
+ "step": 5865
111
+ },
112
+ {
113
+ "epoch": 3.0690537084398977,
114
+ "grad_norm": 0.27734375,
115
+ "learning_rate": 0.0009386189258312021,
116
+ "loss": 3.2061,
117
+ "step": 6000
118
+ },
119
+ {
120
+ "epoch": 3.3248081841432224,
121
+ "grad_norm": 0.326171875,
122
+ "learning_rate": 0.0009335038363171356,
123
+ "loss": 3.1412,
124
+ "step": 6500
125
+ },
126
+ {
127
+ "epoch": 3.580562659846547,
128
+ "grad_norm": 0.26171875,
129
+ "learning_rate": 0.0009283887468030691,
130
+ "loss": 3.0956,
131
+ "step": 7000
132
+ },
133
+ {
134
+ "epoch": 3.836317135549872,
135
+ "grad_norm": 0.271484375,
136
+ "learning_rate": 0.0009232736572890026,
137
+ "loss": 3.0555,
138
+ "step": 7500
139
+ },
140
+ {
141
+ "epoch": 4.0,
142
+ "eval_loss": 3.0206379890441895,
143
+ "eval_runtime": 1.0312,
144
+ "eval_samples_per_second": 484.854,
145
+ "eval_steps_per_second": 0.97,
146
+ "step": 7820
147
+ },
148
+ {
149
+ "epoch": 4.092071611253197,
150
+ "grad_norm": 0.2490234375,
151
+ "learning_rate": 0.0009181585677749361,
152
+ "loss": 3.014,
153
+ "step": 8000
154
+ },
155
+ {
156
+ "epoch": 4.3478260869565215,
157
+ "grad_norm": 0.287109375,
158
+ "learning_rate": 0.0009130434782608695,
159
+ "loss": 2.9683,
160
+ "step": 8500
161
+ },
162
+ {
163
+ "epoch": 4.603580562659847,
164
+ "grad_norm": 0.265625,
165
+ "learning_rate": 0.0009079283887468031,
166
+ "loss": 2.9468,
167
+ "step": 9000
168
+ },
169
+ {
170
+ "epoch": 4.859335038363171,
171
+ "grad_norm": 0.267578125,
172
+ "learning_rate": 0.0009028132992327366,
173
+ "loss": 2.9253,
174
+ "step": 9500
175
+ },
176
+ {
177
+ "epoch": 5.0,
178
+ "eval_loss": 2.9162545204162598,
179
+ "eval_runtime": 1.0514,
180
+ "eval_samples_per_second": 475.536,
181
+ "eval_steps_per_second": 0.951,
182
+ "step": 9775
183
+ },
184
+ {
185
+ "epoch": 5.115089514066496,
186
+ "grad_norm": 0.3203125,
187
+ "learning_rate": 0.0008976982097186701,
188
+ "loss": 2.8851,
189
+ "step": 10000
190
+ },
191
+ {
192
+ "epoch": 5.370843989769821,
193
+ "grad_norm": 0.275390625,
194
+ "learning_rate": 0.0008925831202046036,
195
+ "loss": 2.8639,
196
+ "step": 10500
197
+ },
198
+ {
199
+ "epoch": 5.626598465473146,
200
+ "grad_norm": 0.294921875,
201
+ "learning_rate": 0.000887468030690537,
202
+ "loss": 2.8469,
203
+ "step": 11000
204
+ },
205
+ {
206
+ "epoch": 5.882352941176471,
207
+ "grad_norm": 0.26171875,
208
+ "learning_rate": 0.0008823529411764706,
209
+ "loss": 2.8301,
210
+ "step": 11500
211
+ },
212
+ {
213
+ "epoch": 6.0,
214
+ "eval_loss": 2.8372726440429688,
215
+ "eval_runtime": 1.0374,
216
+ "eval_samples_per_second": 481.997,
217
+ "eval_steps_per_second": 0.964,
218
+ "step": 11730
219
+ },
220
+ {
221
+ "epoch": 6.138107416879795,
222
+ "grad_norm": 0.27734375,
223
+ "learning_rate": 0.0008772378516624041,
224
+ "loss": 2.7998,
225
+ "step": 12000
226
+ },
227
+ {
228
+ "epoch": 6.3938618925831205,
229
+ "grad_norm": 0.255859375,
230
+ "learning_rate": 0.0008721227621483376,
231
+ "loss": 2.7802,
232
+ "step": 12500
233
+ },
234
+ {
235
+ "epoch": 6.649616368286445,
236
+ "grad_norm": 0.294921875,
237
+ "learning_rate": 0.0008670076726342711,
238
+ "loss": 2.7704,
239
+ "step": 13000
240
+ },
241
+ {
242
+ "epoch": 6.90537084398977,
243
+ "grad_norm": 0.2890625,
244
+ "learning_rate": 0.0008618925831202045,
245
+ "loss": 2.7605,
246
+ "step": 13500
247
+ },
248
+ {
249
+ "epoch": 7.0,
250
+ "eval_loss": 2.7794747352600098,
251
+ "eval_runtime": 1.0401,
252
+ "eval_samples_per_second": 480.743,
253
+ "eval_steps_per_second": 0.961,
254
+ "step": 13685
255
+ },
256
+ {
257
+ "epoch": 7.161125319693094,
258
+ "grad_norm": 0.279296875,
259
+ "learning_rate": 0.0008567774936061381,
260
+ "loss": 2.7314,
261
+ "step": 14000
262
+ },
263
+ {
264
+ "epoch": 7.41687979539642,
265
+ "grad_norm": 0.345703125,
266
+ "learning_rate": 0.0008516624040920716,
267
+ "loss": 2.7203,
268
+ "step": 14500
269
+ },
270
+ {
271
+ "epoch": 7.672634271099744,
272
+ "grad_norm": 0.318359375,
273
+ "learning_rate": 0.0008465473145780051,
274
+ "loss": 2.713,
275
+ "step": 15000
276
+ },
277
+ {
278
+ "epoch": 7.928388746803069,
279
+ "grad_norm": 0.31640625,
280
+ "learning_rate": 0.0008414322250639387,
281
+ "loss": 2.7029,
282
+ "step": 15500
283
+ },
284
+ {
285
+ "epoch": 8.0,
286
+ "eval_loss": 2.7313828468322754,
287
+ "eval_runtime": 1.0364,
288
+ "eval_samples_per_second": 482.427,
289
+ "eval_steps_per_second": 0.965,
290
+ "step": 15640
291
+ },
292
+ {
293
+ "epoch": 8.184143222506394,
294
+ "grad_norm": 0.3046875,
295
+ "learning_rate": 0.000836317135549872,
296
+ "loss": 2.6727,
297
+ "step": 16000
298
+ },
299
+ {
300
+ "epoch": 8.43989769820972,
301
+ "grad_norm": 0.259765625,
302
+ "learning_rate": 0.0008312020460358057,
303
+ "loss": 2.6709,
304
+ "step": 16500
305
+ },
306
+ {
307
+ "epoch": 8.695652173913043,
308
+ "grad_norm": 0.28125,
309
+ "learning_rate": 0.0008260869565217392,
310
+ "loss": 2.6632,
311
+ "step": 17000
312
+ },
313
+ {
314
+ "epoch": 8.951406649616368,
315
+ "grad_norm": 0.298828125,
316
+ "learning_rate": 0.0008209718670076727,
317
+ "loss": 2.6586,
318
+ "step": 17500
319
+ },
320
+ {
321
+ "epoch": 9.0,
322
+ "eval_loss": 2.697967767715454,
323
+ "eval_runtime": 1.5778,
324
+ "eval_samples_per_second": 316.897,
325
+ "eval_steps_per_second": 0.634,
326
+ "step": 17595
327
+ },
328
+ {
329
+ "epoch": 9.207161125319693,
330
+ "grad_norm": 0.328125,
331
+ "learning_rate": 0.0008158567774936062,
332
+ "loss": 2.6339,
333
+ "step": 18000
334
+ },
335
+ {
336
+ "epoch": 9.462915601023019,
337
+ "grad_norm": 0.28515625,
338
+ "learning_rate": 0.0008107416879795396,
339
+ "loss": 2.6225,
340
+ "step": 18500
341
+ },
342
+ {
343
+ "epoch": 9.718670076726342,
344
+ "grad_norm": 0.361328125,
345
+ "learning_rate": 0.0008056265984654732,
346
+ "loss": 2.6241,
347
+ "step": 19000
348
+ },
349
+ {
350
+ "epoch": 9.974424552429667,
351
+ "grad_norm": 0.2890625,
352
+ "learning_rate": 0.0008005115089514067,
353
+ "loss": 2.6182,
354
+ "step": 19500
355
+ },
356
+ {
357
+ "epoch": 10.0,
358
+ "eval_loss": 2.673384189605713,
359
+ "eval_runtime": 1.0686,
360
+ "eval_samples_per_second": 467.914,
361
+ "eval_steps_per_second": 0.936,
362
+ "step": 19550
363
+ },
364
+ {
365
+ "epoch": 10.230179028132993,
366
+ "grad_norm": 0.28125,
367
+ "learning_rate": 0.0007953964194373402,
368
+ "loss": 2.5897,
369
+ "step": 20000
370
+ },
371
+ {
372
+ "epoch": 10.485933503836318,
373
+ "grad_norm": 0.28515625,
374
+ "learning_rate": 0.0007902813299232737,
375
+ "loss": 2.5888,
376
+ "step": 20500
377
+ },
378
+ {
379
+ "epoch": 10.741687979539641,
380
+ "grad_norm": 0.294921875,
381
+ "learning_rate": 0.0007851662404092071,
382
+ "loss": 2.5878,
383
+ "step": 21000
384
+ },
385
+ {
386
+ "epoch": 10.997442455242966,
387
+ "grad_norm": 0.298828125,
388
+ "learning_rate": 0.0007800511508951407,
389
+ "loss": 2.5869,
390
+ "step": 21500
391
+ },
392
+ {
393
+ "epoch": 11.0,
394
+ "eval_loss": 2.6476247310638428,
395
+ "eval_runtime": 1.0522,
396
+ "eval_samples_per_second": 475.199,
397
+ "eval_steps_per_second": 0.95,
398
+ "step": 21505
399
+ },
400
+ {
401
+ "epoch": 11.253196930946292,
402
+ "grad_norm": 0.318359375,
403
+ "learning_rate": 0.0007749360613810742,
404
+ "loss": 2.5571,
405
+ "step": 22000
406
+ },
407
+ {
408
+ "epoch": 11.508951406649617,
409
+ "grad_norm": 0.3828125,
410
+ "learning_rate": 0.0007698209718670077,
411
+ "loss": 2.5603,
412
+ "step": 22500
413
+ },
414
+ {
415
+ "epoch": 11.764705882352942,
416
+ "grad_norm": 0.2890625,
417
+ "learning_rate": 0.0007647058823529411,
418
+ "loss": 2.5602,
419
+ "step": 23000
420
+ },
421
+ {
422
+ "epoch": 12.0,
423
+ "eval_loss": 2.6259398460388184,
424
+ "eval_runtime": 1.0589,
425
+ "eval_samples_per_second": 472.168,
426
+ "eval_steps_per_second": 0.944,
427
+ "step": 23460
428
+ },
429
+ {
430
+ "epoch": 12.020460358056265,
431
+ "grad_norm": 0.31640625,
432
+ "learning_rate": 0.0007595907928388746,
433
+ "loss": 2.554,
434
+ "step": 23500
435
+ },
436
+ {
437
+ "epoch": 12.27621483375959,
438
+ "grad_norm": 0.345703125,
439
+ "learning_rate": 0.0007544757033248082,
440
+ "loss": 2.532,
441
+ "step": 24000
442
+ },
443
+ {
444
+ "epoch": 12.531969309462916,
445
+ "grad_norm": 0.314453125,
446
+ "learning_rate": 0.0007493606138107417,
447
+ "loss": 2.53,
448
+ "step": 24500
449
+ },
450
+ {
451
+ "epoch": 12.787723785166241,
452
+ "grad_norm": 0.32421875,
453
+ "learning_rate": 0.0007442455242966752,
454
+ "loss": 2.5295,
455
+ "step": 25000
456
+ },
457
+ {
458
+ "epoch": 13.0,
459
+ "eval_loss": 2.6141037940979004,
460
+ "eval_runtime": 1.0318,
461
+ "eval_samples_per_second": 484.612,
462
+ "eval_steps_per_second": 0.969,
463
+ "step": 25415
464
+ },
465
+ {
466
+ "epoch": 13.043478260869565,
467
+ "grad_norm": 0.412109375,
468
+ "learning_rate": 0.0007391304347826086,
469
+ "loss": 2.5263,
470
+ "step": 25500
471
+ },
472
+ {
473
+ "epoch": 13.29923273657289,
474
+ "grad_norm": 0.30078125,
475
+ "learning_rate": 0.0007340153452685422,
476
+ "loss": 2.5082,
477
+ "step": 26000
478
+ },
479
+ {
480
+ "epoch": 13.554987212276215,
481
+ "grad_norm": 0.3203125,
482
+ "learning_rate": 0.0007289002557544757,
483
+ "loss": 2.5115,
484
+ "step": 26500
485
+ },
486
+ {
487
+ "epoch": 13.81074168797954,
488
+ "grad_norm": 0.306640625,
489
+ "learning_rate": 0.0007237851662404093,
490
+ "loss": 2.5082,
491
+ "step": 27000
492
+ },
493
+ {
494
+ "epoch": 14.0,
495
+ "eval_loss": 2.5942933559417725,
496
+ "eval_runtime": 1.0321,
497
+ "eval_samples_per_second": 484.463,
498
+ "eval_steps_per_second": 0.969,
499
+ "step": 27370
500
+ },
501
+ {
502
+ "epoch": 14.066496163682864,
503
+ "grad_norm": 0.32421875,
504
+ "learning_rate": 0.0007186700767263428,
505
+ "loss": 2.4949,
506
+ "step": 27500
507
+ },
508
+ {
509
+ "epoch": 14.322250639386189,
510
+ "grad_norm": 0.345703125,
511
+ "learning_rate": 0.0007135549872122762,
512
+ "loss": 2.4873,
513
+ "step": 28000
514
+ },
515
+ {
516
+ "epoch": 14.578005115089514,
517
+ "grad_norm": 0.333984375,
518
+ "learning_rate": 0.0007084398976982098,
519
+ "loss": 2.4895,
520
+ "step": 28500
521
+ },
522
+ {
523
+ "epoch": 14.83375959079284,
524
+ "grad_norm": 0.318359375,
525
+ "learning_rate": 0.0007033248081841433,
526
+ "loss": 2.4875,
527
+ "step": 29000
528
+ },
529
+ {
530
+ "epoch": 15.0,
531
+ "eval_loss": 2.5941433906555176,
532
+ "eval_runtime": 1.0612,
533
+ "eval_samples_per_second": 471.171,
534
+ "eval_steps_per_second": 0.942,
535
+ "step": 29325
536
+ },
537
+ {
538
+ "epoch": 15.089514066496164,
539
+ "grad_norm": 0.33203125,
540
+ "learning_rate": 0.0006982097186700768,
541
+ "loss": 2.4796,
542
+ "step": 29500
543
+ },
544
+ {
545
+ "epoch": 15.345268542199488,
546
+ "grad_norm": 0.330078125,
547
+ "learning_rate": 0.0006930946291560103,
548
+ "loss": 2.4671,
549
+ "step": 30000
550
+ },
551
+ {
552
+ "epoch": 15.601023017902813,
553
+ "grad_norm": 0.3828125,
554
+ "learning_rate": 0.0006879795396419437,
555
+ "loss": 2.4647,
556
+ "step": 30500
557
+ },
558
+ {
559
+ "epoch": 15.856777493606138,
560
+ "grad_norm": 0.33203125,
561
+ "learning_rate": 0.0006828644501278773,
562
+ "loss": 2.4729,
563
+ "step": 31000
564
+ },
565
+ {
566
+ "epoch": 16.0,
567
+ "eval_loss": 2.5811538696289062,
568
+ "eval_runtime": 1.0329,
569
+ "eval_samples_per_second": 484.078,
570
+ "eval_steps_per_second": 0.968,
571
+ "step": 31280
572
+ },
573
+ {
574
+ "epoch": 16.11253196930946,
575
+ "grad_norm": 0.341796875,
576
+ "learning_rate": 0.0006777493606138108,
577
+ "loss": 2.4574,
578
+ "step": 31500
579
+ },
580
+ {
581
+ "epoch": 16.36828644501279,
582
+ "grad_norm": 0.30859375,
583
+ "learning_rate": 0.0006726342710997443,
584
+ "loss": 2.4535,
585
+ "step": 32000
586
+ },
587
+ {
588
+ "epoch": 16.624040920716112,
589
+ "grad_norm": 0.33984375,
590
+ "learning_rate": 0.0006675191815856778,
591
+ "loss": 2.452,
592
+ "step": 32500
593
+ },
594
+ {
595
+ "epoch": 16.87979539641944,
596
+ "grad_norm": 0.30859375,
597
+ "learning_rate": 0.0006624040920716112,
598
+ "loss": 2.4508,
599
+ "step": 33000
600
+ },
601
+ {
602
+ "epoch": 17.0,
603
+ "eval_loss": 2.5716660022735596,
604
+ "eval_runtime": 1.0296,
605
+ "eval_samples_per_second": 485.606,
606
+ "eval_steps_per_second": 0.971,
607
+ "step": 33235
608
+ },
609
+ {
610
+ "epoch": 17.135549872122763,
611
+ "grad_norm": 0.318359375,
612
+ "learning_rate": 0.0006572890025575448,
613
+ "loss": 2.4415,
614
+ "step": 33500
615
+ },
616
+ {
617
+ "epoch": 17.391304347826086,
618
+ "grad_norm": 0.302734375,
619
+ "learning_rate": 0.0006521739130434783,
620
+ "loss": 2.4275,
621
+ "step": 34000
622
+ },
623
+ {
624
+ "epoch": 17.647058823529413,
625
+ "grad_norm": 0.302734375,
626
+ "learning_rate": 0.0006470588235294118,
627
+ "loss": 2.4398,
628
+ "step": 34500
629
+ },
630
+ {
631
+ "epoch": 17.902813299232736,
632
+ "grad_norm": 0.302734375,
633
+ "learning_rate": 0.0006419437340153452,
634
+ "loss": 2.4446,
635
+ "step": 35000
636
+ },
637
+ {
638
+ "epoch": 18.0,
639
+ "eval_loss": 2.5545594692230225,
640
+ "eval_runtime": 1.0484,
641
+ "eval_samples_per_second": 476.904,
642
+ "eval_steps_per_second": 0.954,
643
+ "step": 35190
644
+ },
645
+ {
646
+ "epoch": 18.15856777493606,
647
+ "grad_norm": 0.3515625,
648
+ "learning_rate": 0.0006368286445012787,
649
+ "loss": 2.4267,
650
+ "step": 35500
651
+ },
652
+ {
653
+ "epoch": 18.414322250639387,
654
+ "grad_norm": 0.369140625,
655
+ "learning_rate": 0.0006317135549872123,
656
+ "loss": 2.4183,
657
+ "step": 36000
658
+ },
659
+ {
660
+ "epoch": 18.67007672634271,
661
+ "grad_norm": 0.333984375,
662
+ "learning_rate": 0.0006265984654731458,
663
+ "loss": 2.4242,
664
+ "step": 36500
665
+ },
666
+ {
667
+ "epoch": 18.925831202046037,
668
+ "grad_norm": 0.34375,
669
+ "learning_rate": 0.0006214833759590793,
670
+ "loss": 2.4312,
671
+ "step": 37000
672
+ },
673
+ {
674
+ "epoch": 19.0,
675
+ "eval_loss": 2.5581214427948,
676
+ "eval_runtime": 1.082,
677
+ "eval_samples_per_second": 462.11,
678
+ "eval_steps_per_second": 0.924,
679
+ "step": 37145
680
+ },
681
+ {
682
+ "epoch": 19.18158567774936,
683
+ "grad_norm": 0.33984375,
684
+ "learning_rate": 0.0006163682864450127,
685
+ "loss": 2.4121,
686
+ "step": 37500
687
+ },
688
+ {
689
+ "epoch": 19.437340153452684,
690
+ "grad_norm": 0.3125,
691
+ "learning_rate": 0.0006112531969309462,
692
+ "loss": 2.4054,
693
+ "step": 38000
694
+ },
695
+ {
696
+ "epoch": 19.69309462915601,
697
+ "grad_norm": 0.326171875,
698
+ "learning_rate": 0.0006061381074168799,
699
+ "loss": 2.4158,
700
+ "step": 38500
701
+ },
702
+ {
703
+ "epoch": 19.948849104859335,
704
+ "grad_norm": 0.326171875,
705
+ "learning_rate": 0.0006010230179028134,
706
+ "loss": 2.4166,
707
+ "step": 39000
708
+ },
709
+ {
710
+ "epoch": 20.0,
711
+ "eval_loss": 2.545928478240967,
712
+ "eval_runtime": 1.0301,
713
+ "eval_samples_per_second": 485.371,
714
+ "eval_steps_per_second": 0.971,
715
+ "step": 39100
716
+ },
717
+ {
718
+ "epoch": 20.20460358056266,
719
+ "grad_norm": 0.33203125,
720
+ "learning_rate": 0.0005959079283887469,
721
+ "loss": 2.4008,
722
+ "step": 39500
723
+ },
724
+ {
725
+ "epoch": 20.460358056265985,
726
+ "grad_norm": 0.345703125,
727
+ "learning_rate": 0.0005907928388746803,
728
+ "loss": 2.3997,
729
+ "step": 40000
730
+ },
731
+ {
732
+ "epoch": 20.71611253196931,
733
+ "grad_norm": 0.337890625,
734
+ "learning_rate": 0.0005856777493606138,
735
+ "loss": 2.4061,
736
+ "step": 40500
737
+ },
738
+ {
739
+ "epoch": 20.971867007672635,
740
+ "grad_norm": 0.296875,
741
+ "learning_rate": 0.0005805626598465474,
742
+ "loss": 2.4055,
743
+ "step": 41000
744
+ },
745
+ {
746
+ "epoch": 21.0,
747
+ "eval_loss": 2.5422630310058594,
748
+ "eval_runtime": 1.0775,
749
+ "eval_samples_per_second": 464.02,
750
+ "eval_steps_per_second": 0.928,
751
+ "step": 41055
752
+ },
753
+ {
754
+ "epoch": 21.22762148337596,
755
+ "grad_norm": 0.291015625,
756
+ "learning_rate": 0.0005754475703324809,
757
+ "loss": 2.3843,
758
+ "step": 41500
759
+ },
760
+ {
761
+ "epoch": 21.483375959079282,
762
+ "grad_norm": 0.314453125,
763
+ "learning_rate": 0.0005703324808184144,
764
+ "loss": 2.3914,
765
+ "step": 42000
766
+ },
767
+ {
768
+ "epoch": 21.73913043478261,
769
+ "grad_norm": 0.322265625,
770
+ "learning_rate": 0.0005652173913043478,
771
+ "loss": 2.3954,
772
+ "step": 42500
773
+ },
774
+ {
775
+ "epoch": 21.994884910485933,
776
+ "grad_norm": 0.30859375,
777
+ "learning_rate": 0.0005601023017902813,
778
+ "loss": 2.3992,
779
+ "step": 43000
780
+ },
781
+ {
782
+ "epoch": 22.0,
783
+ "eval_loss": 2.538604736328125,
784
+ "eval_runtime": 1.0718,
785
+ "eval_samples_per_second": 466.508,
786
+ "eval_steps_per_second": 0.933,
787
+ "step": 43010
788
+ },
789
+ {
790
+ "epoch": 22.25063938618926,
791
+ "grad_norm": 0.3359375,
792
+ "learning_rate": 0.0005549872122762149,
793
+ "loss": 2.3769,
794
+ "step": 43500
795
+ },
796
+ {
797
+ "epoch": 22.506393861892583,
798
+ "grad_norm": 0.357421875,
799
+ "learning_rate": 0.0005498721227621484,
800
+ "loss": 2.3806,
801
+ "step": 44000
802
+ },
803
+ {
804
+ "epoch": 22.762148337595907,
805
+ "grad_norm": 0.328125,
806
+ "learning_rate": 0.0005447570332480819,
807
+ "loss": 2.3905,
808
+ "step": 44500
809
+ },
810
+ {
811
+ "epoch": 23.0,
812
+ "eval_loss": 2.5324785709381104,
813
+ "eval_runtime": 1.0484,
814
+ "eval_samples_per_second": 476.898,
815
+ "eval_steps_per_second": 0.954,
816
+ "step": 44965
817
+ },
818
+ {
819
+ "epoch": 23.017902813299234,
820
+ "grad_norm": 0.376953125,
821
+ "learning_rate": 0.0005396419437340153,
822
+ "loss": 2.3874,
823
+ "step": 45000
824
+ },
825
+ {
826
+ "epoch": 23.273657289002557,
827
+ "grad_norm": 0.35546875,
828
+ "learning_rate": 0.0005345268542199488,
829
+ "loss": 2.3744,
830
+ "step": 45500
831
+ },
832
+ {
833
+ "epoch": 23.529411764705884,
834
+ "grad_norm": 0.328125,
835
+ "learning_rate": 0.0005294117647058824,
836
+ "loss": 2.3785,
837
+ "step": 46000
838
+ },
839
+ {
840
+ "epoch": 23.785166240409207,
841
+ "grad_norm": 0.3359375,
842
+ "learning_rate": 0.0005242966751918159,
843
+ "loss": 2.3759,
844
+ "step": 46500
845
+ },
846
+ {
847
+ "epoch": 24.0,
848
+ "eval_loss": 2.5388312339782715,
849
+ "eval_runtime": 1.0338,
850
+ "eval_samples_per_second": 483.635,
851
+ "eval_steps_per_second": 0.967,
852
+ "step": 46920
853
+ },
854
+ {
855
+ "epoch": 24.04092071611253,
856
+ "grad_norm": 0.330078125,
857
+ "learning_rate": 0.0005191815856777494,
858
+ "loss": 2.3774,
859
+ "step": 47000
860
+ },
861
+ {
862
+ "epoch": 24.296675191815858,
863
+ "grad_norm": 0.37109375,
864
+ "learning_rate": 0.0005140664961636828,
865
+ "loss": 2.367,
866
+ "step": 47500
867
+ },
868
+ {
869
+ "epoch": 24.55242966751918,
870
+ "grad_norm": 0.3359375,
871
+ "learning_rate": 0.0005089514066496163,
872
+ "loss": 2.3701,
873
+ "step": 48000
874
+ },
875
+ {
876
+ "epoch": 24.808184143222505,
877
+ "grad_norm": 0.328125,
878
+ "learning_rate": 0.0005038363171355499,
879
+ "loss": 2.3722,
880
+ "step": 48500
881
+ },
882
+ {
883
+ "epoch": 25.0,
884
+ "eval_loss": 2.528256893157959,
885
+ "eval_runtime": 1.03,
886
+ "eval_samples_per_second": 485.431,
887
+ "eval_steps_per_second": 0.971,
888
+ "step": 48875
889
+ },
890
+ {
891
+ "epoch": 25.06393861892583,
892
+ "grad_norm": 0.302734375,
893
+ "learning_rate": 0.0004987212276214833,
894
+ "loss": 2.3668,
895
+ "step": 49000
896
+ },
897
+ {
898
+ "epoch": 25.319693094629155,
899
+ "grad_norm": 0.306640625,
900
+ "learning_rate": 0.0004936061381074169,
901
+ "loss": 2.3593,
902
+ "step": 49500
903
+ },
904
+ {
905
+ "epoch": 25.575447570332482,
906
+ "grad_norm": 0.37890625,
907
+ "learning_rate": 0.0004884910485933504,
908
+ "loss": 2.3647,
909
+ "step": 50000
910
+ },
911
+ {
912
+ "epoch": 25.831202046035806,
913
+ "grad_norm": 0.34765625,
914
+ "learning_rate": 0.0004833759590792839,
915
+ "loss": 2.3704,
916
+ "step": 50500
917
+ },
918
+ {
919
+ "epoch": 26.0,
920
+ "eval_loss": 2.5277769565582275,
921
+ "eval_runtime": 1.6093,
922
+ "eval_samples_per_second": 310.697,
923
+ "eval_steps_per_second": 0.621,
924
+ "step": 50830
925
+ },
926
+ {
927
+ "epoch": 26.08695652173913,
928
+ "grad_norm": 0.32421875,
929
+ "learning_rate": 0.0004782608695652174,
930
+ "loss": 2.3599,
931
+ "step": 51000
932
+ },
933
+ {
934
+ "epoch": 26.342710997442456,
935
+ "grad_norm": 0.33984375,
936
+ "learning_rate": 0.0004731457800511509,
937
+ "loss": 2.3516,
938
+ "step": 51500
939
+ },
940
+ {
941
+ "epoch": 26.59846547314578,
942
+ "grad_norm": 0.31640625,
943
+ "learning_rate": 0.0004680306905370844,
944
+ "loss": 2.3585,
945
+ "step": 52000
946
+ },
947
+ {
948
+ "epoch": 26.854219948849106,
949
+ "grad_norm": 0.33984375,
950
+ "learning_rate": 0.00046291560102301786,
951
+ "loss": 2.3591,
952
+ "step": 52500
953
+ },
954
+ {
955
+ "epoch": 27.0,
956
+ "eval_loss": 2.5286903381347656,
957
+ "eval_runtime": 1.0654,
958
+ "eval_samples_per_second": 469.299,
959
+ "eval_steps_per_second": 0.939,
960
+ "step": 52785
961
+ },
962
+ {
963
+ "epoch": 27.10997442455243,
964
+ "grad_norm": 0.357421875,
965
+ "learning_rate": 0.0004578005115089514,
966
+ "loss": 2.3548,
967
+ "step": 53000
968
+ },
969
+ {
970
+ "epoch": 27.365728900255753,
971
+ "grad_norm": 0.326171875,
972
+ "learning_rate": 0.0004526854219948849,
973
+ "loss": 2.3496,
974
+ "step": 53500
975
+ },
976
+ {
977
+ "epoch": 27.62148337595908,
978
+ "grad_norm": 0.31640625,
979
+ "learning_rate": 0.00044757033248081843,
980
+ "loss": 2.3506,
981
+ "step": 54000
982
+ },
983
+ {
984
+ "epoch": 27.877237851662404,
985
+ "grad_norm": 0.33984375,
986
+ "learning_rate": 0.00044245524296675193,
987
+ "loss": 2.3552,
988
+ "step": 54500
989
+ },
990
+ {
991
+ "epoch": 28.0,
992
+ "eval_loss": 2.523589849472046,
993
+ "eval_runtime": 1.0502,
994
+ "eval_samples_per_second": 476.079,
995
+ "eval_steps_per_second": 0.952,
996
+ "step": 54740
997
+ },
998
+ {
999
+ "epoch": 28.132992327365727,
1000
+ "grad_norm": 0.337890625,
1001
+ "learning_rate": 0.0004373401534526854,
1002
+ "loss": 2.3445,
1003
+ "step": 55000
1004
+ },
1005
+ {
1006
+ "epoch": 28.388746803069054,
1007
+ "grad_norm": 0.34765625,
1008
+ "learning_rate": 0.00043222506393861894,
1009
+ "loss": 2.3447,
1010
+ "step": 55500
1011
+ },
1012
+ {
1013
+ "epoch": 28.644501278772378,
1014
+ "grad_norm": 0.318359375,
1015
+ "learning_rate": 0.00042710997442455245,
1016
+ "loss": 2.3485,
1017
+ "step": 56000
1018
+ },
1019
+ {
1020
+ "epoch": 28.900255754475705,
1021
+ "grad_norm": 0.330078125,
1022
+ "learning_rate": 0.00042199488491048595,
1023
+ "loss": 2.3509,
1024
+ "step": 56500
1025
+ },
1026
+ {
1027
+ "epoch": 29.0,
1028
+ "eval_loss": 2.5270164012908936,
1029
+ "eval_runtime": 1.0578,
1030
+ "eval_samples_per_second": 472.684,
1031
+ "eval_steps_per_second": 0.945,
1032
+ "step": 56695
1033
+ },
1034
+ {
1035
+ "epoch": 29.156010230179028,
1036
+ "grad_norm": 0.392578125,
1037
+ "learning_rate": 0.00041687979539641946,
1038
+ "loss": 2.3418,
1039
+ "step": 57000
1040
+ },
1041
+ {
1042
+ "epoch": 29.41176470588235,
1043
+ "grad_norm": 0.3046875,
1044
+ "learning_rate": 0.0004117647058823529,
1045
+ "loss": 2.3364,
1046
+ "step": 57500
1047
+ },
1048
+ {
1049
+ "epoch": 29.66751918158568,
1050
+ "grad_norm": 0.33203125,
1051
+ "learning_rate": 0.00040664961636828646,
1052
+ "loss": 2.3473,
1053
+ "step": 58000
1054
+ },
1055
+ {
1056
+ "epoch": 29.923273657289002,
1057
+ "grad_norm": 0.41015625,
1058
+ "learning_rate": 0.00040153452685421997,
1059
+ "loss": 2.3423,
1060
+ "step": 58500
1061
+ },
1062
+ {
1063
+ "epoch": 30.0,
1064
+ "eval_loss": 2.5237414836883545,
1065
+ "eval_runtime": 1.0886,
1066
+ "eval_samples_per_second": 459.31,
1067
+ "eval_steps_per_second": 0.919,
1068
+ "step": 58650
1069
+ },
1070
+ {
1071
+ "epoch": 30.17902813299233,
1072
+ "grad_norm": 0.31640625,
1073
+ "learning_rate": 0.00039641943734015347,
1074
+ "loss": 2.3403,
1075
+ "step": 59000
1076
+ },
1077
+ {
1078
+ "epoch": 30.434782608695652,
1079
+ "grad_norm": 0.34375,
1080
+ "learning_rate": 0.000391304347826087,
1081
+ "loss": 2.3339,
1082
+ "step": 59500
1083
+ },
1084
+ {
1085
+ "epoch": 30.690537084398976,
1086
+ "grad_norm": 0.30078125,
1087
+ "learning_rate": 0.0003861892583120204,
1088
+ "loss": 2.3387,
1089
+ "step": 60000
1090
+ },
1091
+ {
1092
+ "epoch": 30.946291560102303,
1093
+ "grad_norm": 0.357421875,
1094
+ "learning_rate": 0.000381074168797954,
1095
+ "loss": 2.3386,
1096
+ "step": 60500
1097
+ },
1098
+ {
1099
+ "epoch": 31.0,
1100
+ "eval_loss": 2.51826810836792,
1101
+ "eval_runtime": 1.0097,
1102
+ "eval_samples_per_second": 495.204,
1103
+ "eval_steps_per_second": 0.99,
1104
+ "step": 60605
1105
+ }
1106
+ ],
1107
+ "logging_steps": 500,
1108
+ "max_steps": 97750,
1109
+ "num_input_tokens_seen": 0,
1110
+ "num_train_epochs": 50,
1111
+ "save_steps": 500,
1112
+ "stateful_callbacks": {
1113
+ "EarlyStoppingCallback": {
1114
+ "args": {
1115
+ "early_stopping_patience": 3,
1116
+ "early_stopping_threshold": 0.0
1117
+ },
1118
+ "attributes": {
1119
+ "early_stopping_patience_counter": 0
1120
+ }
1121
+ },
1122
+ "TrainerControl": {
1123
+ "args": {
1124
+ "should_epoch_stop": false,
1125
+ "should_evaluate": false,
1126
+ "should_log": false,
1127
+ "should_save": true,
1128
+ "should_training_stop": false
1129
+ },
1130
+ "attributes": {}
1131
+ }
1132
+ },
1133
+ "total_flos": 2.461299764647219e+18,
1134
+ "train_batch_size": 512,
1135
+ "trial_name": null,
1136
+ "trial_params": null
1137
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:771e7eeddb561c5ba5034da3dd73c3c247ab16ddfd64951f3e7cefd51e87e865
3
+ size 5048