Mistral-Peptide-v1-422M / trainer_state.json
RaphaelMourad's picture
Upload 10 files
b9e077c verified
raw
history blame contribute delete
No virus
73.7 kB
{
"best_metric": 5.560152530670166,
"best_model_checkpoint": "./results/models/mistral-peptide/checkpoint-215664",
"epoch": 8.0,
"eval_steps": 500,
"global_step": 215664,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01854736998293642,
"grad_norm": 0.546875,
"learning_rate": 0.003998516210401365,
"loss": 5.9005,
"step": 500
},
{
"epoch": 0.03709473996587284,
"grad_norm": 1.0859375,
"learning_rate": 0.0039970324208027305,
"loss": 5.775,
"step": 1000
},
{
"epoch": 0.055642109948809255,
"grad_norm": 0.259765625,
"learning_rate": 0.003995548631204095,
"loss": 5.7532,
"step": 1500
},
{
"epoch": 0.07418947993174568,
"grad_norm": 0.498046875,
"learning_rate": 0.00399406484160546,
"loss": 5.7288,
"step": 2000
},
{
"epoch": 0.0927368499146821,
"grad_norm": 0.248046875,
"learning_rate": 0.003992581052006825,
"loss": 5.7154,
"step": 2500
},
{
"epoch": 0.11128421989761851,
"grad_norm": 4.1875,
"learning_rate": 0.0039910972624081905,
"loss": 5.7002,
"step": 3000
},
{
"epoch": 0.12983158988055493,
"grad_norm": 0.28125,
"learning_rate": 0.003989613472809556,
"loss": 5.6792,
"step": 3500
},
{
"epoch": 0.14837895986349137,
"grad_norm": 0.248046875,
"learning_rate": 0.003988129683210921,
"loss": 5.6751,
"step": 4000
},
{
"epoch": 0.16692632984642777,
"grad_norm": 0.84765625,
"learning_rate": 0.003986645893612286,
"loss": 5.7071,
"step": 4500
},
{
"epoch": 0.1854736998293642,
"grad_norm": 1.046875,
"learning_rate": 0.0039851621040136505,
"loss": 5.7286,
"step": 5000
},
{
"epoch": 0.20402106981230061,
"grad_norm": 0.2060546875,
"learning_rate": 0.003983678314415016,
"loss": 5.7413,
"step": 5500
},
{
"epoch": 0.22256843979523702,
"grad_norm": 0.83984375,
"learning_rate": 0.003982194524816381,
"loss": 5.7345,
"step": 6000
},
{
"epoch": 0.24111580977817346,
"grad_norm": 4.625,
"learning_rate": 0.003980710735217746,
"loss": 5.7295,
"step": 6500
},
{
"epoch": 0.25966317976110986,
"grad_norm": 6.625,
"learning_rate": 0.003979226945619111,
"loss": 5.7137,
"step": 7000
},
{
"epoch": 0.2782105497440463,
"grad_norm": 0.43359375,
"learning_rate": 0.003977743156020477,
"loss": 5.7136,
"step": 7500
},
{
"epoch": 0.29675791972698273,
"grad_norm": 0.59375,
"learning_rate": 0.003976259366421842,
"loss": 5.7133,
"step": 8000
},
{
"epoch": 0.3153052897099191,
"grad_norm": 0.44921875,
"learning_rate": 0.003974775576823206,
"loss": 5.7126,
"step": 8500
},
{
"epoch": 0.33385265969285555,
"grad_norm": 0.75,
"learning_rate": 0.0039732917872245714,
"loss": 5.7146,
"step": 9000
},
{
"epoch": 0.352400029675792,
"grad_norm": 0.236328125,
"learning_rate": 0.003971807997625937,
"loss": 5.7079,
"step": 9500
},
{
"epoch": 0.3709473996587284,
"grad_norm": 0.275390625,
"learning_rate": 0.003970324208027302,
"loss": 5.7086,
"step": 10000
},
{
"epoch": 0.3894947696416648,
"grad_norm": 0.43359375,
"learning_rate": 0.003968840418428667,
"loss": 5.7079,
"step": 10500
},
{
"epoch": 0.40804213962460123,
"grad_norm": 0.322265625,
"learning_rate": 0.0039673566288300315,
"loss": 5.6996,
"step": 11000
},
{
"epoch": 0.42658950960753766,
"grad_norm": 0.26171875,
"learning_rate": 0.003965872839231397,
"loss": 5.6944,
"step": 11500
},
{
"epoch": 0.44513687959047404,
"grad_norm": 0.27734375,
"learning_rate": 0.003964389049632762,
"loss": 5.6964,
"step": 12000
},
{
"epoch": 0.4636842495734105,
"grad_norm": 0.283203125,
"learning_rate": 0.003962905260034127,
"loss": 5.6924,
"step": 12500
},
{
"epoch": 0.4822316195563469,
"grad_norm": 0.96484375,
"learning_rate": 0.003961421470435492,
"loss": 5.7065,
"step": 13000
},
{
"epoch": 0.5007789895392833,
"grad_norm": 0.287109375,
"learning_rate": 0.0039599376808368576,
"loss": 5.6997,
"step": 13500
},
{
"epoch": 0.5193263595222197,
"grad_norm": 0.41796875,
"learning_rate": 0.003958453891238223,
"loss": 5.6976,
"step": 14000
},
{
"epoch": 0.5378737295051562,
"grad_norm": 1.1171875,
"learning_rate": 0.003956970101639588,
"loss": 5.7148,
"step": 14500
},
{
"epoch": 0.5564210994880926,
"grad_norm": 0.69921875,
"learning_rate": 0.003955486312040952,
"loss": 5.7088,
"step": 15000
},
{
"epoch": 0.574968469471029,
"grad_norm": 0.330078125,
"learning_rate": 0.003954002522442318,
"loss": 5.7007,
"step": 15500
},
{
"epoch": 0.5935158394539655,
"grad_norm": 0.318359375,
"learning_rate": 0.003952518732843683,
"loss": 5.6958,
"step": 16000
},
{
"epoch": 0.6120632094369018,
"grad_norm": 0.298828125,
"learning_rate": 0.003951034943245048,
"loss": 5.6973,
"step": 16500
},
{
"epoch": 0.6306105794198382,
"grad_norm": 19.125,
"learning_rate": 0.003949551153646413,
"loss": 5.6959,
"step": 17000
},
{
"epoch": 0.6491579494027747,
"grad_norm": 0.41796875,
"learning_rate": 0.0039480673640477785,
"loss": 5.6943,
"step": 17500
},
{
"epoch": 0.6677053193857111,
"grad_norm": 1.828125,
"learning_rate": 0.003946583574449144,
"loss": 5.6879,
"step": 18000
},
{
"epoch": 0.6862526893686475,
"grad_norm": 0.58203125,
"learning_rate": 0.003945099784850508,
"loss": 5.6871,
"step": 18500
},
{
"epoch": 0.704800059351584,
"grad_norm": 0.248046875,
"learning_rate": 0.003943615995251873,
"loss": 5.6831,
"step": 19000
},
{
"epoch": 0.7233474293345203,
"grad_norm": 0.41796875,
"learning_rate": 0.0039421322056532385,
"loss": 5.6856,
"step": 19500
},
{
"epoch": 0.7418947993174568,
"grad_norm": 1.4921875,
"learning_rate": 0.003940648416054604,
"loss": 5.6844,
"step": 20000
},
{
"epoch": 0.7604421693003932,
"grad_norm": 1.703125,
"learning_rate": 0.003939164626455969,
"loss": 5.6741,
"step": 20500
},
{
"epoch": 0.7789895392833296,
"grad_norm": 1.75,
"learning_rate": 0.003937680836857333,
"loss": 5.6793,
"step": 21000
},
{
"epoch": 0.7975369092662661,
"grad_norm": 1.0625,
"learning_rate": 0.0039361970472586985,
"loss": 5.6735,
"step": 21500
},
{
"epoch": 0.8160842792492025,
"grad_norm": 2.859375,
"learning_rate": 0.003934713257660064,
"loss": 5.6737,
"step": 22000
},
{
"epoch": 0.8346316492321388,
"grad_norm": 0.333984375,
"learning_rate": 0.003933229468061429,
"loss": 5.6704,
"step": 22500
},
{
"epoch": 0.8531790192150753,
"grad_norm": 0.48828125,
"learning_rate": 0.003931745678462794,
"loss": 5.6643,
"step": 23000
},
{
"epoch": 0.8717263891980117,
"grad_norm": 0.34765625,
"learning_rate": 0.003930261888864159,
"loss": 5.6727,
"step": 23500
},
{
"epoch": 0.8902737591809481,
"grad_norm": 0.333984375,
"learning_rate": 0.003928778099265525,
"loss": 5.6689,
"step": 24000
},
{
"epoch": 0.9088211291638846,
"grad_norm": 0.2216796875,
"learning_rate": 0.00392729430966689,
"loss": 5.6722,
"step": 24500
},
{
"epoch": 0.927368499146821,
"grad_norm": 0.3359375,
"learning_rate": 0.003925810520068254,
"loss": 5.6649,
"step": 25000
},
{
"epoch": 0.9459158691297574,
"grad_norm": 0.359375,
"learning_rate": 0.003924326730469619,
"loss": 5.6693,
"step": 25500
},
{
"epoch": 0.9644632391126938,
"grad_norm": 0.56640625,
"learning_rate": 0.003922842940870985,
"loss": 5.6648,
"step": 26000
},
{
"epoch": 0.9830106090956302,
"grad_norm": 0.373046875,
"learning_rate": 0.00392135915127235,
"loss": 5.6681,
"step": 26500
},
{
"epoch": 1.0,
"eval_loss": 5.664683818817139,
"eval_runtime": 0.7133,
"eval_samples_per_second": 1211.22,
"eval_steps_per_second": 37.851,
"step": 26958
},
{
"epoch": 1.0015579790785667,
"grad_norm": 0.228515625,
"learning_rate": 0.003919875361673715,
"loss": 5.6673,
"step": 27000
},
{
"epoch": 1.020105349061503,
"grad_norm": 0.330078125,
"learning_rate": 0.00391839157207508,
"loss": 5.6679,
"step": 27500
},
{
"epoch": 1.0386527190444395,
"grad_norm": 0.369140625,
"learning_rate": 0.003916907782476445,
"loss": 5.6611,
"step": 28000
},
{
"epoch": 1.0572000890273758,
"grad_norm": 0.330078125,
"learning_rate": 0.00391542399287781,
"loss": 5.6592,
"step": 28500
},
{
"epoch": 1.0757474590103124,
"grad_norm": 0.28125,
"learning_rate": 0.003913940203279175,
"loss": 5.6571,
"step": 29000
},
{
"epoch": 1.0942948289932488,
"grad_norm": 0.59375,
"learning_rate": 0.00391245641368054,
"loss": 5.6557,
"step": 29500
},
{
"epoch": 1.1128421989761852,
"grad_norm": 0.33203125,
"learning_rate": 0.0039109726240819055,
"loss": 5.6538,
"step": 30000
},
{
"epoch": 1.1313895689591216,
"grad_norm": 2.609375,
"learning_rate": 0.00390948883448327,
"loss": 5.6498,
"step": 30500
},
{
"epoch": 1.149936938942058,
"grad_norm": 1.4296875,
"learning_rate": 0.003908005044884635,
"loss": 5.6497,
"step": 31000
},
{
"epoch": 1.1684843089249943,
"grad_norm": 0.365234375,
"learning_rate": 0.003906521255286,
"loss": 5.6488,
"step": 31500
},
{
"epoch": 1.187031678907931,
"grad_norm": 0.94921875,
"learning_rate": 0.003905037465687366,
"loss": 5.6523,
"step": 32000
},
{
"epoch": 1.2055790488908673,
"grad_norm": 0.380859375,
"learning_rate": 0.0039035536760887307,
"loss": 5.651,
"step": 32500
},
{
"epoch": 1.2241264188738037,
"grad_norm": 0.578125,
"learning_rate": 0.0039020698864900955,
"loss": 5.651,
"step": 33000
},
{
"epoch": 1.24267378885674,
"grad_norm": 4.40625,
"learning_rate": 0.0039005860968914607,
"loss": 5.6528,
"step": 33500
},
{
"epoch": 1.2612211588396764,
"grad_norm": 0.25,
"learning_rate": 0.003899102307292826,
"loss": 5.6541,
"step": 34000
},
{
"epoch": 1.279768528822613,
"grad_norm": 0.53125,
"learning_rate": 0.003897618517694191,
"loss": 5.6547,
"step": 34500
},
{
"epoch": 1.2983158988055494,
"grad_norm": 0.419921875,
"learning_rate": 0.003896134728095556,
"loss": 5.651,
"step": 35000
},
{
"epoch": 1.3168632687884858,
"grad_norm": 0.5,
"learning_rate": 0.003894650938496921,
"loss": 5.6542,
"step": 35500
},
{
"epoch": 1.3354106387714222,
"grad_norm": 0.2138671875,
"learning_rate": 0.0038931671488982864,
"loss": 5.6534,
"step": 36000
},
{
"epoch": 1.3539580087543586,
"grad_norm": 4.875,
"learning_rate": 0.0038916833592996516,
"loss": 5.6519,
"step": 36500
},
{
"epoch": 1.3725053787372952,
"grad_norm": 1.625,
"learning_rate": 0.003890199569701017,
"loss": 5.65,
"step": 37000
},
{
"epoch": 1.3910527487202315,
"grad_norm": 0.341796875,
"learning_rate": 0.003888715780102381,
"loss": 5.6486,
"step": 37500
},
{
"epoch": 1.409600118703168,
"grad_norm": 0.36328125,
"learning_rate": 0.0038872319905037464,
"loss": 5.6503,
"step": 38000
},
{
"epoch": 1.4281474886861043,
"grad_norm": 0.265625,
"learning_rate": 0.0038857482009051116,
"loss": 5.6419,
"step": 38500
},
{
"epoch": 1.4466948586690407,
"grad_norm": 0.48046875,
"learning_rate": 0.003884264411306477,
"loss": 5.6449,
"step": 39000
},
{
"epoch": 1.4652422286519773,
"grad_norm": 0.296875,
"learning_rate": 0.003882780621707842,
"loss": 5.6478,
"step": 39500
},
{
"epoch": 1.4837895986349134,
"grad_norm": 0.51171875,
"learning_rate": 0.003881296832109207,
"loss": 5.6423,
"step": 40000
},
{
"epoch": 1.50233696861785,
"grad_norm": 0.54296875,
"learning_rate": 0.003879813042510572,
"loss": 5.642,
"step": 40500
},
{
"epoch": 1.5208843386007864,
"grad_norm": 14.625,
"learning_rate": 0.0038783292529119373,
"loss": 5.6415,
"step": 41000
},
{
"epoch": 1.5394317085837228,
"grad_norm": 0.265625,
"learning_rate": 0.0038768454633133025,
"loss": 5.644,
"step": 41500
},
{
"epoch": 1.5579790785666594,
"grad_norm": 0.322265625,
"learning_rate": 0.0038753616737146677,
"loss": 5.6495,
"step": 42000
},
{
"epoch": 1.5765264485495956,
"grad_norm": 1.0234375,
"learning_rate": 0.003873877884116032,
"loss": 5.6514,
"step": 42500
},
{
"epoch": 1.5950738185325322,
"grad_norm": 1.71875,
"learning_rate": 0.0038723940945173973,
"loss": 5.6572,
"step": 43000
},
{
"epoch": 1.6136211885154685,
"grad_norm": 1.6640625,
"learning_rate": 0.0038709103049187625,
"loss": 5.6482,
"step": 43500
},
{
"epoch": 1.632168558498405,
"grad_norm": 0.50390625,
"learning_rate": 0.0038694265153201277,
"loss": 5.6448,
"step": 44000
},
{
"epoch": 1.6507159284813413,
"grad_norm": 2.25,
"learning_rate": 0.003867942725721493,
"loss": 5.6464,
"step": 44500
},
{
"epoch": 1.6692632984642777,
"grad_norm": 0.478515625,
"learning_rate": 0.0038664589361228578,
"loss": 5.6412,
"step": 45000
},
{
"epoch": 1.6878106684472143,
"grad_norm": 1.0625,
"learning_rate": 0.003864975146524223,
"loss": 5.6426,
"step": 45500
},
{
"epoch": 1.7063580384301507,
"grad_norm": 0.2890625,
"learning_rate": 0.003863491356925588,
"loss": 5.6435,
"step": 46000
},
{
"epoch": 1.724905408413087,
"grad_norm": 0.283203125,
"learning_rate": 0.0038620075673269534,
"loss": 5.6416,
"step": 46500
},
{
"epoch": 1.7434527783960234,
"grad_norm": 0.265625,
"learning_rate": 0.0038605237777283186,
"loss": 5.6395,
"step": 47000
},
{
"epoch": 1.7620001483789598,
"grad_norm": 0.212890625,
"learning_rate": 0.003859039988129683,
"loss": 5.6425,
"step": 47500
},
{
"epoch": 1.7805475183618964,
"grad_norm": 0.291015625,
"learning_rate": 0.003857556198531048,
"loss": 5.6401,
"step": 48000
},
{
"epoch": 1.7990948883448326,
"grad_norm": 0.2265625,
"learning_rate": 0.0038560724089324134,
"loss": 5.6375,
"step": 48500
},
{
"epoch": 1.8176422583277692,
"grad_norm": 0.4921875,
"learning_rate": 0.0038545886193337786,
"loss": 5.6349,
"step": 49000
},
{
"epoch": 1.8361896283107055,
"grad_norm": 0.2412109375,
"learning_rate": 0.0038531048297351434,
"loss": 5.6363,
"step": 49500
},
{
"epoch": 1.854736998293642,
"grad_norm": 1.6796875,
"learning_rate": 0.0038516210401365087,
"loss": 5.6365,
"step": 50000
},
{
"epoch": 1.8732843682765785,
"grad_norm": 0.4765625,
"learning_rate": 0.003850137250537874,
"loss": 5.6362,
"step": 50500
},
{
"epoch": 1.8918317382595147,
"grad_norm": 0.44140625,
"learning_rate": 0.003848653460939239,
"loss": 5.6348,
"step": 51000
},
{
"epoch": 1.9103791082424513,
"grad_norm": 0.314453125,
"learning_rate": 0.0038471696713406043,
"loss": 5.6407,
"step": 51500
},
{
"epoch": 1.9289264782253877,
"grad_norm": 11.25,
"learning_rate": 0.0038456858817419687,
"loss": 5.6373,
"step": 52000
},
{
"epoch": 1.947473848208324,
"grad_norm": 0.24609375,
"learning_rate": 0.003844202092143334,
"loss": 5.6393,
"step": 52500
},
{
"epoch": 1.9660212181912606,
"grad_norm": 1.6015625,
"learning_rate": 0.003842718302544699,
"loss": 5.6272,
"step": 53000
},
{
"epoch": 1.9845685881741968,
"grad_norm": 0.45703125,
"learning_rate": 0.0038412345129460643,
"loss": 5.6328,
"step": 53500
},
{
"epoch": 2.0,
"eval_loss": 5.620687007904053,
"eval_runtime": 0.6722,
"eval_samples_per_second": 1285.247,
"eval_steps_per_second": 40.164,
"step": 53916
},
{
"epoch": 2.0031159581571334,
"grad_norm": 1.03125,
"learning_rate": 0.0038397507233474295,
"loss": 5.6283,
"step": 54000
},
{
"epoch": 2.0216633281400695,
"grad_norm": 0.326171875,
"learning_rate": 0.0038382669337487943,
"loss": 5.6321,
"step": 54500
},
{
"epoch": 2.040210698123006,
"grad_norm": 0.70703125,
"learning_rate": 0.0038367831441501596,
"loss": 5.6296,
"step": 55000
},
{
"epoch": 2.0587580681059428,
"grad_norm": 0.30859375,
"learning_rate": 0.0038352993545515248,
"loss": 5.6252,
"step": 55500
},
{
"epoch": 2.077305438088879,
"grad_norm": 0.3515625,
"learning_rate": 0.00383381556495289,
"loss": 5.6294,
"step": 56000
},
{
"epoch": 2.0958528080718155,
"grad_norm": 0.27734375,
"learning_rate": 0.003832331775354255,
"loss": 5.6235,
"step": 56500
},
{
"epoch": 2.1144001780547517,
"grad_norm": 0.37890625,
"learning_rate": 0.0038308479857556196,
"loss": 5.6233,
"step": 57000
},
{
"epoch": 2.1329475480376883,
"grad_norm": 1.2578125,
"learning_rate": 0.003829364196156985,
"loss": 5.6275,
"step": 57500
},
{
"epoch": 2.151494918020625,
"grad_norm": 0.484375,
"learning_rate": 0.00382788040655835,
"loss": 5.626,
"step": 58000
},
{
"epoch": 2.170042288003561,
"grad_norm": 2.015625,
"learning_rate": 0.0038263966169597152,
"loss": 5.6238,
"step": 58500
},
{
"epoch": 2.1885896579864976,
"grad_norm": 0.232421875,
"learning_rate": 0.0038249128273610804,
"loss": 5.625,
"step": 59000
},
{
"epoch": 2.207137027969434,
"grad_norm": 0.4140625,
"learning_rate": 0.0038234290377624452,
"loss": 5.6198,
"step": 59500
},
{
"epoch": 2.2256843979523704,
"grad_norm": 0.27734375,
"learning_rate": 0.0038219452481638105,
"loss": 5.623,
"step": 60000
},
{
"epoch": 2.2442317679353065,
"grad_norm": 0.26171875,
"learning_rate": 0.0038204614585651757,
"loss": 5.632,
"step": 60500
},
{
"epoch": 2.262779137918243,
"grad_norm": 0.73046875,
"learning_rate": 0.003818977668966541,
"loss": 5.6247,
"step": 61000
},
{
"epoch": 2.2813265079011797,
"grad_norm": 1.09375,
"learning_rate": 0.003817493879367906,
"loss": 5.6222,
"step": 61500
},
{
"epoch": 2.299873877884116,
"grad_norm": 0.423828125,
"learning_rate": 0.0038160100897692705,
"loss": 5.6231,
"step": 62000
},
{
"epoch": 2.3184212478670525,
"grad_norm": 7.71875,
"learning_rate": 0.0038145263001706357,
"loss": 5.6204,
"step": 62500
},
{
"epoch": 2.3369686178499887,
"grad_norm": 0.38671875,
"learning_rate": 0.003813042510572001,
"loss": 5.6259,
"step": 63000
},
{
"epoch": 2.3555159878329253,
"grad_norm": 0.94140625,
"learning_rate": 0.003811558720973366,
"loss": 5.618,
"step": 63500
},
{
"epoch": 2.374063357815862,
"grad_norm": 0.5625,
"learning_rate": 0.003810074931374731,
"loss": 5.624,
"step": 64000
},
{
"epoch": 2.392610727798798,
"grad_norm": 3.15625,
"learning_rate": 0.003808591141776096,
"loss": 5.6262,
"step": 64500
},
{
"epoch": 2.4111580977817346,
"grad_norm": 0.279296875,
"learning_rate": 0.0038071073521774614,
"loss": 5.6213,
"step": 65000
},
{
"epoch": 2.429705467764671,
"grad_norm": 0.328125,
"learning_rate": 0.0038056235625788266,
"loss": 5.6239,
"step": 65500
},
{
"epoch": 2.4482528377476074,
"grad_norm": 0.2451171875,
"learning_rate": 0.003804139772980192,
"loss": 5.6291,
"step": 66000
},
{
"epoch": 2.466800207730544,
"grad_norm": 0.51953125,
"learning_rate": 0.0038026559833815566,
"loss": 5.6262,
"step": 66500
},
{
"epoch": 2.48534757771348,
"grad_norm": 0.62109375,
"learning_rate": 0.0038011721937829214,
"loss": 5.6221,
"step": 67000
},
{
"epoch": 2.5038949476964167,
"grad_norm": 0.42578125,
"learning_rate": 0.0037996884041842866,
"loss": 5.621,
"step": 67500
},
{
"epoch": 2.522442317679353,
"grad_norm": 0.287109375,
"learning_rate": 0.003798204614585652,
"loss": 5.6214,
"step": 68000
},
{
"epoch": 2.5409896876622895,
"grad_norm": 2.734375,
"learning_rate": 0.003796720824987017,
"loss": 5.6202,
"step": 68500
},
{
"epoch": 2.559537057645226,
"grad_norm": 5.65625,
"learning_rate": 0.003795237035388382,
"loss": 5.6223,
"step": 69000
},
{
"epoch": 2.5780844276281623,
"grad_norm": 0.412109375,
"learning_rate": 0.003793753245789747,
"loss": 5.6248,
"step": 69500
},
{
"epoch": 2.596631797611099,
"grad_norm": 0.458984375,
"learning_rate": 0.0037922694561911123,
"loss": 5.6186,
"step": 70000
},
{
"epoch": 2.615179167594035,
"grad_norm": 1.9765625,
"learning_rate": 0.0037907856665924775,
"loss": 5.6197,
"step": 70500
},
{
"epoch": 2.6337265375769716,
"grad_norm": 0.65234375,
"learning_rate": 0.0037893018769938427,
"loss": 5.6179,
"step": 71000
},
{
"epoch": 2.652273907559908,
"grad_norm": 0.51171875,
"learning_rate": 0.0037878180873952075,
"loss": 5.6212,
"step": 71500
},
{
"epoch": 2.6708212775428444,
"grad_norm": 1.6640625,
"learning_rate": 0.0037863342977965723,
"loss": 5.6187,
"step": 72000
},
{
"epoch": 2.689368647525781,
"grad_norm": 0.4140625,
"learning_rate": 0.0037848505081979375,
"loss": 5.6142,
"step": 72500
},
{
"epoch": 2.707916017508717,
"grad_norm": 9.25,
"learning_rate": 0.0037833667185993027,
"loss": 5.6186,
"step": 73000
},
{
"epoch": 2.7264633874916537,
"grad_norm": 0.421875,
"learning_rate": 0.003781882929000668,
"loss": 5.619,
"step": 73500
},
{
"epoch": 2.7450107574745903,
"grad_norm": 0.314453125,
"learning_rate": 0.0037803991394020327,
"loss": 5.6144,
"step": 74000
},
{
"epoch": 2.7635581274575265,
"grad_norm": 0.427734375,
"learning_rate": 0.003778915349803398,
"loss": 5.6135,
"step": 74500
},
{
"epoch": 2.782105497440463,
"grad_norm": 0.70703125,
"learning_rate": 0.003777431560204763,
"loss": 5.6098,
"step": 75000
},
{
"epoch": 2.8006528674233993,
"grad_norm": 1.0078125,
"learning_rate": 0.0037759477706061284,
"loss": 5.6077,
"step": 75500
},
{
"epoch": 2.819200237406336,
"grad_norm": 0.30859375,
"learning_rate": 0.003774463981007493,
"loss": 5.6143,
"step": 76000
},
{
"epoch": 2.8377476073892725,
"grad_norm": 0.60546875,
"learning_rate": 0.0037729801914088584,
"loss": 5.6068,
"step": 76500
},
{
"epoch": 2.8562949773722086,
"grad_norm": 0.41796875,
"learning_rate": 0.003771496401810223,
"loss": 5.6022,
"step": 77000
},
{
"epoch": 2.8748423473551448,
"grad_norm": 5.15625,
"learning_rate": 0.0037700126122115884,
"loss": 5.6061,
"step": 77500
},
{
"epoch": 2.8933897173380814,
"grad_norm": 3.109375,
"learning_rate": 0.0037685288226129536,
"loss": 5.6119,
"step": 78000
},
{
"epoch": 2.911937087321018,
"grad_norm": 0.294921875,
"learning_rate": 0.0037670450330143184,
"loss": 5.6074,
"step": 78500
},
{
"epoch": 2.9304844573039546,
"grad_norm": 0.359375,
"learning_rate": 0.0037655612434156836,
"loss": 5.6091,
"step": 79000
},
{
"epoch": 2.9490318272868907,
"grad_norm": 0.470703125,
"learning_rate": 0.003764077453817049,
"loss": 5.6098,
"step": 79500
},
{
"epoch": 2.967579197269827,
"grad_norm": 1.796875,
"learning_rate": 0.003762593664218414,
"loss": 5.6066,
"step": 80000
},
{
"epoch": 2.9861265672527635,
"grad_norm": 0.318359375,
"learning_rate": 0.0037611098746197793,
"loss": 5.6141,
"step": 80500
},
{
"epoch": 3.0,
"eval_loss": 5.597402095794678,
"eval_runtime": 0.6753,
"eval_samples_per_second": 1279.346,
"eval_steps_per_second": 39.98,
"step": 80874
},
{
"epoch": 3.0046739372357,
"grad_norm": 0.462890625,
"learning_rate": 0.003759626085021144,
"loss": 5.6015,
"step": 81000
},
{
"epoch": 3.0232213072186362,
"grad_norm": 0.447265625,
"learning_rate": 0.0037581422954225093,
"loss": 5.6081,
"step": 81500
},
{
"epoch": 3.041768677201573,
"grad_norm": 0.419921875,
"learning_rate": 0.003756658505823874,
"loss": 5.6036,
"step": 82000
},
{
"epoch": 3.0603160471845094,
"grad_norm": 9.8125,
"learning_rate": 0.0037551747162252393,
"loss": 5.6046,
"step": 82500
},
{
"epoch": 3.0788634171674456,
"grad_norm": 0.35546875,
"learning_rate": 0.0037536909266266045,
"loss": 5.6093,
"step": 83000
},
{
"epoch": 3.097410787150382,
"grad_norm": 0.63671875,
"learning_rate": 0.0037522071370279693,
"loss": 5.6081,
"step": 83500
},
{
"epoch": 3.1159581571333184,
"grad_norm": 8.375,
"learning_rate": 0.0037507233474293345,
"loss": 5.6033,
"step": 84000
},
{
"epoch": 3.134505527116255,
"grad_norm": 0.7890625,
"learning_rate": 0.0037492395578306997,
"loss": 5.6081,
"step": 84500
},
{
"epoch": 3.153052897099191,
"grad_norm": 0.37890625,
"learning_rate": 0.003747755768232065,
"loss": 5.6031,
"step": 85000
},
{
"epoch": 3.1716002670821277,
"grad_norm": 0.546875,
"learning_rate": 0.00374627197863343,
"loss": 5.6004,
"step": 85500
},
{
"epoch": 3.1901476370650643,
"grad_norm": 0.455078125,
"learning_rate": 0.003744788189034795,
"loss": 5.5958,
"step": 86000
},
{
"epoch": 3.2086950070480005,
"grad_norm": 0.6171875,
"learning_rate": 0.00374330439943616,
"loss": 5.6069,
"step": 86500
},
{
"epoch": 3.227242377030937,
"grad_norm": 2.515625,
"learning_rate": 0.003741820609837525,
"loss": 5.6084,
"step": 87000
},
{
"epoch": 3.2457897470138732,
"grad_norm": 1.1953125,
"learning_rate": 0.00374033682023889,
"loss": 5.6124,
"step": 87500
},
{
"epoch": 3.26433711699681,
"grad_norm": 1.09375,
"learning_rate": 0.0037388530306402554,
"loss": 5.6012,
"step": 88000
},
{
"epoch": 3.2828844869797464,
"grad_norm": 0.33984375,
"learning_rate": 0.00373736924104162,
"loss": 5.6012,
"step": 88500
},
{
"epoch": 3.3014318569626826,
"grad_norm": 0.478515625,
"learning_rate": 0.0037358854514429854,
"loss": 5.6019,
"step": 89000
},
{
"epoch": 3.319979226945619,
"grad_norm": 57.25,
"learning_rate": 0.0037344016618443506,
"loss": 5.6005,
"step": 89500
},
{
"epoch": 3.3385265969285554,
"grad_norm": 1.25,
"learning_rate": 0.003732917872245716,
"loss": 5.6,
"step": 90000
},
{
"epoch": 3.357073966911492,
"grad_norm": 0.53515625,
"learning_rate": 0.0037314340826470806,
"loss": 5.5993,
"step": 90500
},
{
"epoch": 3.3756213368944286,
"grad_norm": 2.421875,
"learning_rate": 0.003729950293048446,
"loss": 5.6029,
"step": 91000
},
{
"epoch": 3.3941687068773647,
"grad_norm": 1.0,
"learning_rate": 0.003728466503449811,
"loss": 5.5991,
"step": 91500
},
{
"epoch": 3.4127160768603013,
"grad_norm": 0.7421875,
"learning_rate": 0.003726982713851176,
"loss": 5.5965,
"step": 92000
},
{
"epoch": 3.4312634468432375,
"grad_norm": 0.41015625,
"learning_rate": 0.003725498924252541,
"loss": 5.6004,
"step": 92500
},
{
"epoch": 3.449810816826174,
"grad_norm": 1.0859375,
"learning_rate": 0.003724015134653906,
"loss": 5.5991,
"step": 93000
},
{
"epoch": 3.4683581868091107,
"grad_norm": 0.80859375,
"learning_rate": 0.003722531345055271,
"loss": 5.6098,
"step": 93500
},
{
"epoch": 3.486905556792047,
"grad_norm": 0.48828125,
"learning_rate": 0.0037210475554566363,
"loss": 5.6072,
"step": 94000
},
{
"epoch": 3.5054529267749834,
"grad_norm": 1.7734375,
"learning_rate": 0.0037195637658580015,
"loss": 5.6078,
"step": 94500
},
{
"epoch": 3.5240002967579196,
"grad_norm": 10.25,
"learning_rate": 0.0037180799762593668,
"loss": 5.6109,
"step": 95000
},
{
"epoch": 3.542547666740856,
"grad_norm": 3.859375,
"learning_rate": 0.0037165961866607315,
"loss": 5.6097,
"step": 95500
},
{
"epoch": 3.561095036723793,
"grad_norm": 0.67578125,
"learning_rate": 0.0037151123970620968,
"loss": 5.6076,
"step": 96000
},
{
"epoch": 3.579642406706729,
"grad_norm": 1.2890625,
"learning_rate": 0.0037136286074634615,
"loss": 5.6068,
"step": 96500
},
{
"epoch": 3.5981897766896656,
"grad_norm": 1.1953125,
"learning_rate": 0.0037121448178648268,
"loss": 5.6083,
"step": 97000
},
{
"epoch": 3.6167371466726017,
"grad_norm": 4.125,
"learning_rate": 0.003710661028266192,
"loss": 5.6109,
"step": 97500
},
{
"epoch": 3.6352845166555383,
"grad_norm": 0.283203125,
"learning_rate": 0.0037091772386675568,
"loss": 5.6104,
"step": 98000
},
{
"epoch": 3.653831886638475,
"grad_norm": 0.875,
"learning_rate": 0.003707693449068922,
"loss": 5.6158,
"step": 98500
},
{
"epoch": 3.672379256621411,
"grad_norm": 7.0625,
"learning_rate": 0.003706209659470287,
"loss": 5.6205,
"step": 99000
},
{
"epoch": 3.6909266266043472,
"grad_norm": 6.625,
"learning_rate": 0.0037047258698716524,
"loss": 5.6214,
"step": 99500
},
{
"epoch": 3.709473996587284,
"grad_norm": 44.25,
"learning_rate": 0.0037032420802730177,
"loss": 5.6258,
"step": 100000
},
{
"epoch": 3.7280213665702204,
"grad_norm": 0.9609375,
"learning_rate": 0.0037017582906743824,
"loss": 5.6237,
"step": 100500
},
{
"epoch": 3.746568736553157,
"grad_norm": 1.6640625,
"learning_rate": 0.0037002745010757477,
"loss": 5.6164,
"step": 101000
},
{
"epoch": 3.765116106536093,
"grad_norm": 1.1953125,
"learning_rate": 0.0036987907114771124,
"loss": 5.6159,
"step": 101500
},
{
"epoch": 3.7836634765190293,
"grad_norm": 0.345703125,
"learning_rate": 0.0036973069218784777,
"loss": 5.6122,
"step": 102000
},
{
"epoch": 3.802210846501966,
"grad_norm": 5.53125,
"learning_rate": 0.003695823132279843,
"loss": 5.609,
"step": 102500
},
{
"epoch": 3.8207582164849025,
"grad_norm": 0.578125,
"learning_rate": 0.0036943393426812077,
"loss": 5.609,
"step": 103000
},
{
"epoch": 3.8393055864678387,
"grad_norm": 6.6875,
"learning_rate": 0.003692855553082573,
"loss": 5.6056,
"step": 103500
},
{
"epoch": 3.8578529564507753,
"grad_norm": 0.4296875,
"learning_rate": 0.003691371763483938,
"loss": 5.6102,
"step": 104000
},
{
"epoch": 3.8764003264337115,
"grad_norm": 2.859375,
"learning_rate": 0.0036898879738853033,
"loss": 5.6058,
"step": 104500
},
{
"epoch": 3.894947696416648,
"grad_norm": 1.9296875,
"learning_rate": 0.003688404184286668,
"loss": 5.6081,
"step": 105000
},
{
"epoch": 3.9134950663995847,
"grad_norm": 8.3125,
"learning_rate": 0.0036869203946880333,
"loss": 5.6019,
"step": 105500
},
{
"epoch": 3.932042436382521,
"grad_norm": 2.65625,
"learning_rate": 0.0036854366050893986,
"loss": 5.5993,
"step": 106000
},
{
"epoch": 3.9505898063654574,
"grad_norm": 0.70703125,
"learning_rate": 0.0036839528154907633,
"loss": 5.6001,
"step": 106500
},
{
"epoch": 3.9691371763483936,
"grad_norm": 0.7734375,
"learning_rate": 0.0036824690258921286,
"loss": 5.5992,
"step": 107000
},
{
"epoch": 3.98768454633133,
"grad_norm": 0.88671875,
"learning_rate": 0.0036809852362934934,
"loss": 5.6038,
"step": 107500
},
{
"epoch": 4.0,
"eval_loss": 5.593242645263672,
"eval_runtime": 0.6663,
"eval_samples_per_second": 1296.741,
"eval_steps_per_second": 40.523,
"step": 107832
},
{
"epoch": 4.006231916314267,
"grad_norm": 3.09375,
"learning_rate": 0.0036795014466948586,
"loss": 5.6019,
"step": 108000
},
{
"epoch": 4.024779286297203,
"grad_norm": 0.546875,
"learning_rate": 0.003678017657096224,
"loss": 5.6019,
"step": 108500
},
{
"epoch": 4.043326656280139,
"grad_norm": 2.96875,
"learning_rate": 0.003676533867497589,
"loss": 5.6004,
"step": 109000
},
{
"epoch": 4.061874026263076,
"grad_norm": 3.796875,
"learning_rate": 0.0036750500778989542,
"loss": 5.5929,
"step": 109500
},
{
"epoch": 4.080421396246012,
"grad_norm": 0.478515625,
"learning_rate": 0.003673566288300319,
"loss": 5.5989,
"step": 110000
},
{
"epoch": 4.098968766228949,
"grad_norm": 0.453125,
"learning_rate": 0.0036720824987016842,
"loss": 5.5982,
"step": 110500
},
{
"epoch": 4.1175161362118855,
"grad_norm": 3.78125,
"learning_rate": 0.0036705987091030495,
"loss": 5.5986,
"step": 111000
},
{
"epoch": 4.136063506194821,
"grad_norm": 0.9375,
"learning_rate": 0.0036691149195044142,
"loss": 5.5984,
"step": 111500
},
{
"epoch": 4.154610876177758,
"grad_norm": 3.09375,
"learning_rate": 0.0036676311299057795,
"loss": 5.5978,
"step": 112000
},
{
"epoch": 4.173158246160694,
"grad_norm": 1.09375,
"learning_rate": 0.0036661473403071443,
"loss": 5.5957,
"step": 112500
},
{
"epoch": 4.191705616143631,
"grad_norm": 0.408203125,
"learning_rate": 0.0036646635507085095,
"loss": 5.5914,
"step": 113000
},
{
"epoch": 4.210252986126568,
"grad_norm": 0.41015625,
"learning_rate": 0.0036631797611098747,
"loss": 5.5971,
"step": 113500
},
{
"epoch": 4.228800356109503,
"grad_norm": 0.81640625,
"learning_rate": 0.00366169597151124,
"loss": 5.5971,
"step": 114000
},
{
"epoch": 4.24734772609244,
"grad_norm": 0.36328125,
"learning_rate": 0.003660212181912605,
"loss": 5.5985,
"step": 114500
},
{
"epoch": 4.2658950960753765,
"grad_norm": 0.6015625,
"learning_rate": 0.00365872839231397,
"loss": 5.6023,
"step": 115000
},
{
"epoch": 4.284442466058313,
"grad_norm": 1.8046875,
"learning_rate": 0.003657244602715335,
"loss": 5.6029,
"step": 115500
},
{
"epoch": 4.30298983604125,
"grad_norm": 1.265625,
"learning_rate": 0.0036557608131167004,
"loss": 5.6077,
"step": 116000
},
{
"epoch": 4.3215372060241855,
"grad_norm": 2.796875,
"learning_rate": 0.003654277023518065,
"loss": 5.604,
"step": 116500
},
{
"epoch": 4.340084576007122,
"grad_norm": 5.375,
"learning_rate": 0.00365279323391943,
"loss": 5.6038,
"step": 117000
},
{
"epoch": 4.358631945990059,
"grad_norm": 2.78125,
"learning_rate": 0.003651309444320795,
"loss": 5.6091,
"step": 117500
},
{
"epoch": 4.377179315972995,
"grad_norm": 0.52734375,
"learning_rate": 0.0036498256547221604,
"loss": 5.606,
"step": 118000
},
{
"epoch": 4.395726685955932,
"grad_norm": 0.65625,
"learning_rate": 0.0036483418651235256,
"loss": 5.5985,
"step": 118500
},
{
"epoch": 4.414274055938868,
"grad_norm": 17.25,
"learning_rate": 0.003646858075524891,
"loss": 5.5961,
"step": 119000
},
{
"epoch": 4.432821425921804,
"grad_norm": 1.0546875,
"learning_rate": 0.0036453742859262556,
"loss": 5.6007,
"step": 119500
},
{
"epoch": 4.451368795904741,
"grad_norm": 6.40625,
"learning_rate": 0.003643890496327621,
"loss": 5.6029,
"step": 120000
},
{
"epoch": 4.469916165887677,
"grad_norm": 0.400390625,
"learning_rate": 0.003642406706728986,
"loss": 5.5998,
"step": 120500
},
{
"epoch": 4.488463535870613,
"grad_norm": 0.61328125,
"learning_rate": 0.0036409229171303513,
"loss": 5.6039,
"step": 121000
},
{
"epoch": 4.50701090585355,
"grad_norm": 0.515625,
"learning_rate": 0.003639439127531716,
"loss": 5.5998,
"step": 121500
},
{
"epoch": 4.525558275836486,
"grad_norm": 0.58203125,
"learning_rate": 0.003637955337933081,
"loss": 5.5987,
"step": 122000
},
{
"epoch": 4.544105645819423,
"grad_norm": 3.296875,
"learning_rate": 0.003636471548334446,
"loss": 5.5986,
"step": 122500
},
{
"epoch": 4.5626530158023595,
"grad_norm": 0.58203125,
"learning_rate": 0.0036349877587358113,
"loss": 5.601,
"step": 123000
},
{
"epoch": 4.581200385785296,
"grad_norm": 0.3203125,
"learning_rate": 0.0036335039691371765,
"loss": 5.6043,
"step": 123500
},
{
"epoch": 4.599747755768232,
"grad_norm": 2.796875,
"learning_rate": 0.0036320201795385417,
"loss": 5.6029,
"step": 124000
},
{
"epoch": 4.618295125751168,
"grad_norm": 25.5,
"learning_rate": 0.0036305363899399065,
"loss": 5.5966,
"step": 124500
},
{
"epoch": 4.636842495734105,
"grad_norm": 0.484375,
"learning_rate": 0.0036290526003412717,
"loss": 5.5938,
"step": 125000
},
{
"epoch": 4.655389865717042,
"grad_norm": 0.89453125,
"learning_rate": 0.003627568810742637,
"loss": 5.5946,
"step": 125500
},
{
"epoch": 4.673937235699977,
"grad_norm": 3.34375,
"learning_rate": 0.003626085021144002,
"loss": 5.5968,
"step": 126000
},
{
"epoch": 4.692484605682914,
"grad_norm": 0.85546875,
"learning_rate": 0.003624601231545367,
"loss": 5.6008,
"step": 126500
},
{
"epoch": 4.7110319756658505,
"grad_norm": 0.435546875,
"learning_rate": 0.0036231174419467317,
"loss": 5.5946,
"step": 127000
},
{
"epoch": 4.729579345648787,
"grad_norm": 0.3671875,
"learning_rate": 0.003621633652348097,
"loss": 5.5876,
"step": 127500
},
{
"epoch": 4.748126715631724,
"grad_norm": 0.474609375,
"learning_rate": 0.003620149862749462,
"loss": 5.5902,
"step": 128000
},
{
"epoch": 4.76667408561466,
"grad_norm": 11.8125,
"learning_rate": 0.0036186660731508274,
"loss": 5.5933,
"step": 128500
},
{
"epoch": 4.785221455597596,
"grad_norm": 1.203125,
"learning_rate": 0.0036171822835521926,
"loss": 5.5911,
"step": 129000
},
{
"epoch": 4.803768825580533,
"grad_norm": 0.390625,
"learning_rate": 0.0036156984939535574,
"loss": 5.5877,
"step": 129500
},
{
"epoch": 4.822316195563469,
"grad_norm": 0.416015625,
"learning_rate": 0.0036142147043549226,
"loss": 5.5947,
"step": 130000
},
{
"epoch": 4.840863565546406,
"grad_norm": 0.419921875,
"learning_rate": 0.003612730914756288,
"loss": 5.5903,
"step": 130500
},
{
"epoch": 4.859410935529342,
"grad_norm": 0.40234375,
"learning_rate": 0.003611247125157653,
"loss": 5.5873,
"step": 131000
},
{
"epoch": 4.877958305512278,
"grad_norm": 0.5,
"learning_rate": 0.0036097633355590174,
"loss": 5.584,
"step": 131500
},
{
"epoch": 4.896505675495215,
"grad_norm": 0.40234375,
"learning_rate": 0.0036082795459603826,
"loss": 5.588,
"step": 132000
},
{
"epoch": 4.915053045478151,
"grad_norm": 1.8125,
"learning_rate": 0.003606795756361748,
"loss": 5.5897,
"step": 132500
},
{
"epoch": 4.933600415461088,
"grad_norm": 0.40234375,
"learning_rate": 0.003605311966763113,
"loss": 5.5915,
"step": 133000
},
{
"epoch": 4.952147785444024,
"grad_norm": 11.125,
"learning_rate": 0.0036038281771644783,
"loss": 5.5923,
"step": 133500
},
{
"epoch": 4.97069515542696,
"grad_norm": 0.8046875,
"learning_rate": 0.003602344387565843,
"loss": 5.587,
"step": 134000
},
{
"epoch": 4.989242525409897,
"grad_norm": 14.8125,
"learning_rate": 0.0036008605979672083,
"loss": 5.5909,
"step": 134500
},
{
"epoch": 5.0,
"eval_loss": 5.58021354675293,
"eval_runtime": 0.6821,
"eval_samples_per_second": 1266.679,
"eval_steps_per_second": 39.584,
"step": 134790
},
{
"epoch": 5.0077898953928335,
"grad_norm": 0.470703125,
"learning_rate": 0.0035993768083685735,
"loss": 5.5842,
"step": 135000
},
{
"epoch": 5.02633726537577,
"grad_norm": 3.125,
"learning_rate": 0.0035978930187699387,
"loss": 5.5836,
"step": 135500
},
{
"epoch": 5.044884635358706,
"grad_norm": 0.88671875,
"learning_rate": 0.003596409229171304,
"loss": 5.5819,
"step": 136000
},
{
"epoch": 5.063432005341642,
"grad_norm": 1.1484375,
"learning_rate": 0.0035949254395726683,
"loss": 5.5819,
"step": 136500
},
{
"epoch": 5.081979375324579,
"grad_norm": 0.455078125,
"learning_rate": 0.0035934416499740335,
"loss": 5.5837,
"step": 137000
},
{
"epoch": 5.100526745307516,
"grad_norm": 0.30859375,
"learning_rate": 0.0035919578603753988,
"loss": 5.5763,
"step": 137500
},
{
"epoch": 5.119074115290452,
"grad_norm": 3.375,
"learning_rate": 0.003590474070776764,
"loss": 5.5774,
"step": 138000
},
{
"epoch": 5.137621485273388,
"grad_norm": 0.71484375,
"learning_rate": 0.003588990281178129,
"loss": 5.5806,
"step": 138500
},
{
"epoch": 5.1561688552563245,
"grad_norm": 0.5234375,
"learning_rate": 0.003587506491579494,
"loss": 5.5815,
"step": 139000
},
{
"epoch": 5.174716225239261,
"grad_norm": 0.37890625,
"learning_rate": 0.003586022701980859,
"loss": 5.5823,
"step": 139500
},
{
"epoch": 5.193263595222198,
"grad_norm": 0.427734375,
"learning_rate": 0.0035845389123822244,
"loss": 5.5808,
"step": 140000
},
{
"epoch": 5.211810965205134,
"grad_norm": 0.390625,
"learning_rate": 0.0035830551227835896,
"loss": 5.5777,
"step": 140500
},
{
"epoch": 5.23035833518807,
"grad_norm": 0.431640625,
"learning_rate": 0.003581571333184955,
"loss": 5.5755,
"step": 141000
},
{
"epoch": 5.248905705171007,
"grad_norm": 2.796875,
"learning_rate": 0.003580087543586319,
"loss": 5.5739,
"step": 141500
},
{
"epoch": 5.267453075153943,
"grad_norm": 0.953125,
"learning_rate": 0.0035786037539876844,
"loss": 5.5777,
"step": 142000
},
{
"epoch": 5.28600044513688,
"grad_norm": 1.0,
"learning_rate": 0.0035771199643890497,
"loss": 5.5796,
"step": 142500
},
{
"epoch": 5.304547815119816,
"grad_norm": 0.3203125,
"learning_rate": 0.003575636174790415,
"loss": 5.5775,
"step": 143000
},
{
"epoch": 5.323095185102752,
"grad_norm": 0.3359375,
"learning_rate": 0.00357415238519178,
"loss": 5.5707,
"step": 143500
},
{
"epoch": 5.341642555085689,
"grad_norm": 2.234375,
"learning_rate": 0.003572668595593145,
"loss": 5.5792,
"step": 144000
},
{
"epoch": 5.360189925068625,
"grad_norm": 3.546875,
"learning_rate": 0.00357118480599451,
"loss": 5.5747,
"step": 144500
},
{
"epoch": 5.378737295051562,
"grad_norm": 2.046875,
"learning_rate": 0.0035697010163958753,
"loss": 5.579,
"step": 145000
},
{
"epoch": 5.397284665034498,
"grad_norm": 0.74609375,
"learning_rate": 0.0035682172267972405,
"loss": 5.5746,
"step": 145500
},
{
"epoch": 5.415832035017434,
"grad_norm": 2.921875,
"learning_rate": 0.003566733437198605,
"loss": 5.5761,
"step": 146000
},
{
"epoch": 5.434379405000371,
"grad_norm": 1.765625,
"learning_rate": 0.00356524964759997,
"loss": 5.5795,
"step": 146500
},
{
"epoch": 5.4529267749833075,
"grad_norm": 1.453125,
"learning_rate": 0.0035637658580013353,
"loss": 5.5805,
"step": 147000
},
{
"epoch": 5.471474144966244,
"grad_norm": 0.484375,
"learning_rate": 0.0035622820684027006,
"loss": 5.5802,
"step": 147500
},
{
"epoch": 5.490021514949181,
"grad_norm": 2.328125,
"learning_rate": 0.0035607982788040658,
"loss": 5.5863,
"step": 148000
},
{
"epoch": 5.508568884932116,
"grad_norm": 0.54296875,
"learning_rate": 0.0035593144892054306,
"loss": 5.5939,
"step": 148500
},
{
"epoch": 5.527116254915053,
"grad_norm": 0.44140625,
"learning_rate": 0.0035578306996067958,
"loss": 5.5998,
"step": 149000
},
{
"epoch": 5.54566362489799,
"grad_norm": 0.349609375,
"learning_rate": 0.003556346910008161,
"loss": 5.6138,
"step": 149500
},
{
"epoch": 5.564210994880926,
"grad_norm": 1.2109375,
"learning_rate": 0.0035548631204095262,
"loss": 5.6063,
"step": 150000
},
{
"epoch": 5.582758364863862,
"grad_norm": 36.25,
"learning_rate": 0.0035533793308108914,
"loss": 5.6083,
"step": 150500
},
{
"epoch": 5.6013057348467985,
"grad_norm": 0.7734375,
"learning_rate": 0.003551895541212256,
"loss": 5.603,
"step": 151000
},
{
"epoch": 5.619853104829735,
"grad_norm": 1.3515625,
"learning_rate": 0.003550411751613621,
"loss": 5.6061,
"step": 151500
},
{
"epoch": 5.638400474812672,
"grad_norm": 3.1875,
"learning_rate": 0.0035489279620149862,
"loss": 5.6004,
"step": 152000
},
{
"epoch": 5.656947844795608,
"grad_norm": 0.4921875,
"learning_rate": 0.0035474441724163515,
"loss": 5.5939,
"step": 152500
},
{
"epoch": 5.675495214778544,
"grad_norm": 0.609375,
"learning_rate": 0.0035459603828177167,
"loss": 5.6008,
"step": 153000
},
{
"epoch": 5.694042584761481,
"grad_norm": 4.0625,
"learning_rate": 0.0035444765932190815,
"loss": 5.608,
"step": 153500
},
{
"epoch": 5.712589954744417,
"grad_norm": 0.5234375,
"learning_rate": 0.0035429928036204467,
"loss": 5.598,
"step": 154000
},
{
"epoch": 5.731137324727354,
"grad_norm": 2.09375,
"learning_rate": 0.003541509014021812,
"loss": 5.5946,
"step": 154500
},
{
"epoch": 5.74968469471029,
"grad_norm": 4.34375,
"learning_rate": 0.003540025224423177,
"loss": 5.5909,
"step": 155000
},
{
"epoch": 5.768232064693226,
"grad_norm": 0.55859375,
"learning_rate": 0.0035385414348245423,
"loss": 5.5915,
"step": 155500
},
{
"epoch": 5.786779434676163,
"grad_norm": 25.625,
"learning_rate": 0.0035370576452259067,
"loss": 5.6017,
"step": 156000
},
{
"epoch": 5.805326804659099,
"grad_norm": 0.9453125,
"learning_rate": 0.003535573855627272,
"loss": 5.5936,
"step": 156500
},
{
"epoch": 5.823874174642036,
"grad_norm": 0.5390625,
"learning_rate": 0.003534090066028637,
"loss": 5.6077,
"step": 157000
},
{
"epoch": 5.8424215446249725,
"grad_norm": 1.5625,
"learning_rate": 0.0035326062764300024,
"loss": 5.601,
"step": 157500
},
{
"epoch": 5.860968914607908,
"grad_norm": 28.0,
"learning_rate": 0.003531122486831367,
"loss": 5.5944,
"step": 158000
},
{
"epoch": 5.879516284590845,
"grad_norm": 0.51953125,
"learning_rate": 0.0035296386972327324,
"loss": 5.5957,
"step": 158500
},
{
"epoch": 5.8980636545737815,
"grad_norm": 1.140625,
"learning_rate": 0.0035281549076340976,
"loss": 5.5942,
"step": 159000
},
{
"epoch": 5.916611024556718,
"grad_norm": 0.88671875,
"learning_rate": 0.003526671118035463,
"loss": 5.5923,
"step": 159500
},
{
"epoch": 5.935158394539655,
"grad_norm": 0.61328125,
"learning_rate": 0.003525187328436828,
"loss": 5.5858,
"step": 160000
},
{
"epoch": 5.95370576452259,
"grad_norm": 0.796875,
"learning_rate": 0.003523703538838193,
"loss": 5.5872,
"step": 160500
},
{
"epoch": 5.972253134505527,
"grad_norm": 1.5625,
"learning_rate": 0.0035222197492395576,
"loss": 5.5848,
"step": 161000
},
{
"epoch": 5.990800504488464,
"grad_norm": 0.703125,
"learning_rate": 0.003520735959640923,
"loss": 5.5871,
"step": 161500
},
{
"epoch": 6.0,
"eval_loss": 5.576883792877197,
"eval_runtime": 0.6871,
"eval_samples_per_second": 1257.389,
"eval_steps_per_second": 39.293,
"step": 161748
},
{
"epoch": 6.0093478744714,
"grad_norm": 6.0625,
"learning_rate": 0.003519252170042288,
"loss": 5.5846,
"step": 162000
},
{
"epoch": 6.027895244454337,
"grad_norm": 2.953125,
"learning_rate": 0.0035177683804436533,
"loss": 5.5851,
"step": 162500
},
{
"epoch": 6.0464426144372725,
"grad_norm": 0.65234375,
"learning_rate": 0.003516284590845018,
"loss": 5.5869,
"step": 163000
},
{
"epoch": 6.064989984420209,
"grad_norm": 1.296875,
"learning_rate": 0.0035148008012463833,
"loss": 5.5884,
"step": 163500
},
{
"epoch": 6.083537354403146,
"grad_norm": 0.59375,
"learning_rate": 0.0035133170116477485,
"loss": 5.583,
"step": 164000
},
{
"epoch": 6.102084724386082,
"grad_norm": 0.796875,
"learning_rate": 0.0035118332220491137,
"loss": 5.5868,
"step": 164500
},
{
"epoch": 6.120632094369019,
"grad_norm": 1.8125,
"learning_rate": 0.003510349432450479,
"loss": 5.5819,
"step": 165000
},
{
"epoch": 6.139179464351955,
"grad_norm": 0.65625,
"learning_rate": 0.0035088656428518437,
"loss": 5.5795,
"step": 165500
},
{
"epoch": 6.157726834334891,
"grad_norm": 0.796875,
"learning_rate": 0.0035073818532532085,
"loss": 5.5837,
"step": 166000
},
{
"epoch": 6.176274204317828,
"grad_norm": 2.203125,
"learning_rate": 0.0035058980636545737,
"loss": 5.5959,
"step": 166500
},
{
"epoch": 6.194821574300764,
"grad_norm": 2.390625,
"learning_rate": 0.003504414274055939,
"loss": 5.586,
"step": 167000
},
{
"epoch": 6.213368944283701,
"grad_norm": 0.99609375,
"learning_rate": 0.003502930484457304,
"loss": 5.5969,
"step": 167500
},
{
"epoch": 6.231916314266637,
"grad_norm": 9.875,
"learning_rate": 0.003501446694858669,
"loss": 5.5835,
"step": 168000
},
{
"epoch": 6.250463684249573,
"grad_norm": 0.734375,
"learning_rate": 0.003499962905260034,
"loss": 5.5868,
"step": 168500
},
{
"epoch": 6.26901105423251,
"grad_norm": 12.125,
"learning_rate": 0.0034984791156613994,
"loss": 5.5823,
"step": 169000
},
{
"epoch": 6.2875584242154465,
"grad_norm": 1.671875,
"learning_rate": 0.0034969953260627646,
"loss": 5.5801,
"step": 169500
},
{
"epoch": 6.306105794198382,
"grad_norm": 0.734375,
"learning_rate": 0.00349551153646413,
"loss": 5.5808,
"step": 170000
},
{
"epoch": 6.324653164181319,
"grad_norm": 12.0,
"learning_rate": 0.0034940277468654946,
"loss": 5.5859,
"step": 170500
},
{
"epoch": 6.3432005341642554,
"grad_norm": 0.828125,
"learning_rate": 0.0034925439572668594,
"loss": 5.5807,
"step": 171000
},
{
"epoch": 6.361747904147192,
"grad_norm": 2.53125,
"learning_rate": 0.0034910601676682246,
"loss": 5.5804,
"step": 171500
},
{
"epoch": 6.380295274130129,
"grad_norm": 0.52734375,
"learning_rate": 0.00348957637806959,
"loss": 5.5731,
"step": 172000
},
{
"epoch": 6.398842644113064,
"grad_norm": 0.79296875,
"learning_rate": 0.0034880925884709546,
"loss": 5.5813,
"step": 172500
},
{
"epoch": 6.417390014096001,
"grad_norm": 0.578125,
"learning_rate": 0.00348660879887232,
"loss": 5.5763,
"step": 173000
},
{
"epoch": 6.435937384078938,
"grad_norm": 0.70703125,
"learning_rate": 0.003485125009273685,
"loss": 5.5724,
"step": 173500
},
{
"epoch": 6.454484754061874,
"grad_norm": 1.5234375,
"learning_rate": 0.0034836412196750503,
"loss": 5.5756,
"step": 174000
},
{
"epoch": 6.473032124044811,
"grad_norm": 4.40625,
"learning_rate": 0.0034821574300764155,
"loss": 5.581,
"step": 174500
},
{
"epoch": 6.4915794940277465,
"grad_norm": 0.5546875,
"learning_rate": 0.0034806736404777803,
"loss": 5.5756,
"step": 175000
},
{
"epoch": 6.510126864010683,
"grad_norm": 0.51953125,
"learning_rate": 0.0034791898508791455,
"loss": 5.5735,
"step": 175500
},
{
"epoch": 6.52867423399362,
"grad_norm": 0.55859375,
"learning_rate": 0.0034777060612805103,
"loss": 5.573,
"step": 176000
},
{
"epoch": 6.547221603976556,
"grad_norm": 1.7578125,
"learning_rate": 0.0034762222716818755,
"loss": 5.5762,
"step": 176500
},
{
"epoch": 6.565768973959493,
"grad_norm": 0.5625,
"learning_rate": 0.0034747384820832407,
"loss": 5.5689,
"step": 177000
},
{
"epoch": 6.584316343942429,
"grad_norm": 0.54296875,
"learning_rate": 0.0034732546924846055,
"loss": 5.5706,
"step": 177500
},
{
"epoch": 6.602863713925365,
"grad_norm": 0.63671875,
"learning_rate": 0.0034717709028859707,
"loss": 5.5762,
"step": 178000
},
{
"epoch": 6.621411083908302,
"grad_norm": 0.76953125,
"learning_rate": 0.003470287113287336,
"loss": 5.5689,
"step": 178500
},
{
"epoch": 6.639958453891238,
"grad_norm": 0.7421875,
"learning_rate": 0.003468803323688701,
"loss": 5.5731,
"step": 179000
},
{
"epoch": 6.658505823874175,
"grad_norm": 0.73046875,
"learning_rate": 0.0034673195340900664,
"loss": 5.5723,
"step": 179500
},
{
"epoch": 6.677053193857111,
"grad_norm": 0.3984375,
"learning_rate": 0.003465835744491431,
"loss": 5.5661,
"step": 180000
},
{
"epoch": 6.695600563840047,
"grad_norm": 0.447265625,
"learning_rate": 0.0034643519548927964,
"loss": 5.5736,
"step": 180500
},
{
"epoch": 6.714147933822984,
"grad_norm": 1.171875,
"learning_rate": 0.003462868165294161,
"loss": 5.5735,
"step": 181000
},
{
"epoch": 6.7326953038059205,
"grad_norm": 1.09375,
"learning_rate": 0.0034613843756955264,
"loss": 5.5725,
"step": 181500
},
{
"epoch": 6.751242673788857,
"grad_norm": 3.6875,
"learning_rate": 0.0034599005860968916,
"loss": 5.5738,
"step": 182000
},
{
"epoch": 6.769790043771793,
"grad_norm": 1.078125,
"learning_rate": 0.0034584167964982564,
"loss": 5.5712,
"step": 182500
},
{
"epoch": 6.788337413754729,
"grad_norm": 0.75390625,
"learning_rate": 0.0034569330068996216,
"loss": 5.5752,
"step": 183000
},
{
"epoch": 6.806884783737666,
"grad_norm": 0.69140625,
"learning_rate": 0.003455449217300987,
"loss": 5.5772,
"step": 183500
},
{
"epoch": 6.825432153720603,
"grad_norm": 3.296875,
"learning_rate": 0.003453965427702352,
"loss": 5.5729,
"step": 184000
},
{
"epoch": 6.843979523703538,
"grad_norm": 0.578125,
"learning_rate": 0.0034524816381037173,
"loss": 5.5763,
"step": 184500
},
{
"epoch": 6.862526893686475,
"grad_norm": 0.4921875,
"learning_rate": 0.003450997848505082,
"loss": 5.5831,
"step": 185000
},
{
"epoch": 6.8810742636694116,
"grad_norm": 4.71875,
"learning_rate": 0.0034495140589064473,
"loss": 5.5789,
"step": 185500
},
{
"epoch": 6.899621633652348,
"grad_norm": 1.65625,
"learning_rate": 0.003448030269307812,
"loss": 5.5779,
"step": 186000
},
{
"epoch": 6.918169003635285,
"grad_norm": 2.90625,
"learning_rate": 0.0034465464797091773,
"loss": 5.5779,
"step": 186500
},
{
"epoch": 6.936716373618221,
"grad_norm": 0.47265625,
"learning_rate": 0.003445062690110542,
"loss": 5.5792,
"step": 187000
},
{
"epoch": 6.955263743601157,
"grad_norm": 1.015625,
"learning_rate": 0.0034435789005119073,
"loss": 5.5777,
"step": 187500
},
{
"epoch": 6.973811113584094,
"grad_norm": 7.5625,
"learning_rate": 0.0034420951109132725,
"loss": 5.5787,
"step": 188000
},
{
"epoch": 6.99235848356703,
"grad_norm": 0.55859375,
"learning_rate": 0.0034406113213146378,
"loss": 5.5825,
"step": 188500
},
{
"epoch": 7.0,
"eval_loss": 5.567850112915039,
"eval_runtime": 0.6773,
"eval_samples_per_second": 1275.659,
"eval_steps_per_second": 39.864,
"step": 188706
},
{
"epoch": 7.010905853549967,
"grad_norm": 0.6484375,
"learning_rate": 0.003439127531716003,
"loss": 5.5696,
"step": 189000
},
{
"epoch": 7.0294532235329035,
"grad_norm": 3.9375,
"learning_rate": 0.0034376437421173678,
"loss": 5.5744,
"step": 189500
},
{
"epoch": 7.048000593515839,
"grad_norm": 3.890625,
"learning_rate": 0.003436159952518733,
"loss": 5.5745,
"step": 190000
},
{
"epoch": 7.066547963498776,
"grad_norm": 0.6875,
"learning_rate": 0.003434676162920098,
"loss": 5.5733,
"step": 190500
},
{
"epoch": 7.085095333481712,
"grad_norm": 1.6796875,
"learning_rate": 0.003433192373321463,
"loss": 5.5771,
"step": 191000
},
{
"epoch": 7.103642703464649,
"grad_norm": 2.484375,
"learning_rate": 0.003431708583722828,
"loss": 5.5764,
"step": 191500
},
{
"epoch": 7.122190073447585,
"grad_norm": 7.46875,
"learning_rate": 0.003430224794124193,
"loss": 5.5742,
"step": 192000
},
{
"epoch": 7.140737443430521,
"grad_norm": 2.25,
"learning_rate": 0.0034287410045255582,
"loss": 5.5703,
"step": 192500
},
{
"epoch": 7.159284813413458,
"grad_norm": 1.6640625,
"learning_rate": 0.0034272572149269234,
"loss": 5.5721,
"step": 193000
},
{
"epoch": 7.1778321833963945,
"grad_norm": 2.296875,
"learning_rate": 0.0034257734253282887,
"loss": 5.5754,
"step": 193500
},
{
"epoch": 7.196379553379331,
"grad_norm": 5.3125,
"learning_rate": 0.003424289635729654,
"loss": 5.5735,
"step": 194000
},
{
"epoch": 7.214926923362267,
"grad_norm": 117.0,
"learning_rate": 0.0034228058461310187,
"loss": 5.5738,
"step": 194500
},
{
"epoch": 7.233474293345203,
"grad_norm": 0.80078125,
"learning_rate": 0.003421322056532384,
"loss": 5.5748,
"step": 195000
},
{
"epoch": 7.25202166332814,
"grad_norm": 3.125,
"learning_rate": 0.0034198382669337487,
"loss": 5.5712,
"step": 195500
},
{
"epoch": 7.270569033311077,
"grad_norm": 0.57421875,
"learning_rate": 0.003418354477335114,
"loss": 5.5693,
"step": 196000
},
{
"epoch": 7.289116403294013,
"grad_norm": 3.578125,
"learning_rate": 0.003416870687736479,
"loss": 5.5708,
"step": 196500
},
{
"epoch": 7.307663773276949,
"grad_norm": 6.03125,
"learning_rate": 0.003415386898137844,
"loss": 5.5688,
"step": 197000
},
{
"epoch": 7.3262111432598855,
"grad_norm": 3.25,
"learning_rate": 0.003413903108539209,
"loss": 5.5747,
"step": 197500
},
{
"epoch": 7.344758513242822,
"grad_norm": 1.078125,
"learning_rate": 0.0034124193189405743,
"loss": 5.5745,
"step": 198000
},
{
"epoch": 7.363305883225759,
"grad_norm": 7.03125,
"learning_rate": 0.0034109355293419396,
"loss": 5.5659,
"step": 198500
},
{
"epoch": 7.381853253208695,
"grad_norm": 11.4375,
"learning_rate": 0.0034094517397433048,
"loss": 5.5678,
"step": 199000
},
{
"epoch": 7.400400623191631,
"grad_norm": 9.3125,
"learning_rate": 0.0034079679501446696,
"loss": 5.5704,
"step": 199500
},
{
"epoch": 7.418947993174568,
"grad_norm": 1.296875,
"learning_rate": 0.003406484160546035,
"loss": 5.5665,
"step": 200000
},
{
"epoch": 7.437495363157504,
"grad_norm": 2.25,
"learning_rate": 0.0034050003709473996,
"loss": 5.5701,
"step": 200500
},
{
"epoch": 7.456042733140441,
"grad_norm": 1.2109375,
"learning_rate": 0.003403516581348765,
"loss": 5.5667,
"step": 201000
},
{
"epoch": 7.4745901031233775,
"grad_norm": 13.875,
"learning_rate": 0.0034020327917501296,
"loss": 5.5646,
"step": 201500
},
{
"epoch": 7.493137473106313,
"grad_norm": 1.3359375,
"learning_rate": 0.003400549002151495,
"loss": 5.5665,
"step": 202000
},
{
"epoch": 7.51168484308925,
"grad_norm": 16.875,
"learning_rate": 0.00339906521255286,
"loss": 5.5627,
"step": 202500
},
{
"epoch": 7.530232213072186,
"grad_norm": 2.578125,
"learning_rate": 0.0033975814229542252,
"loss": 5.57,
"step": 203000
},
{
"epoch": 7.548779583055123,
"grad_norm": 32.5,
"learning_rate": 0.0033960976333555905,
"loss": 5.5716,
"step": 203500
},
{
"epoch": 7.56732695303806,
"grad_norm": 0.7265625,
"learning_rate": 0.0033946138437569552,
"loss": 5.5745,
"step": 204000
},
{
"epoch": 7.585874323020995,
"grad_norm": 0.46875,
"learning_rate": 0.0033931300541583205,
"loss": 5.5691,
"step": 204500
},
{
"epoch": 7.604421693003932,
"grad_norm": 3.015625,
"learning_rate": 0.0033916462645596857,
"loss": 5.5736,
"step": 205000
},
{
"epoch": 7.6229690629868685,
"grad_norm": 75.5,
"learning_rate": 0.0033901624749610505,
"loss": 5.5712,
"step": 205500
},
{
"epoch": 7.641516432969805,
"grad_norm": 1.6953125,
"learning_rate": 0.0033886786853624157,
"loss": 5.5746,
"step": 206000
},
{
"epoch": 7.660063802952742,
"grad_norm": 0.73046875,
"learning_rate": 0.0033871948957637805,
"loss": 5.5719,
"step": 206500
},
{
"epoch": 7.678611172935677,
"grad_norm": 0.77734375,
"learning_rate": 0.0033857111061651457,
"loss": 5.5718,
"step": 207000
},
{
"epoch": 7.697158542918614,
"grad_norm": 1.1328125,
"learning_rate": 0.003384227316566511,
"loss": 5.5724,
"step": 207500
},
{
"epoch": 7.715705912901551,
"grad_norm": 0.51953125,
"learning_rate": 0.003382743526967876,
"loss": 5.5642,
"step": 208000
},
{
"epoch": 7.734253282884487,
"grad_norm": 6.65625,
"learning_rate": 0.0033812597373692414,
"loss": 5.5714,
"step": 208500
},
{
"epoch": 7.752800652867423,
"grad_norm": 3.375,
"learning_rate": 0.003379775947770606,
"loss": 5.5713,
"step": 209000
},
{
"epoch": 7.7713480228503595,
"grad_norm": 0.9453125,
"learning_rate": 0.0033782921581719714,
"loss": 5.5678,
"step": 209500
},
{
"epoch": 7.789895392833296,
"grad_norm": 6.5625,
"learning_rate": 0.0033768083685733366,
"loss": 5.5661,
"step": 210000
},
{
"epoch": 7.808442762816233,
"grad_norm": 0.90234375,
"learning_rate": 0.0033753245789747014,
"loss": 5.5663,
"step": 210500
},
{
"epoch": 7.826990132799169,
"grad_norm": 0.5390625,
"learning_rate": 0.0033738407893760666,
"loss": 5.5659,
"step": 211000
},
{
"epoch": 7.845537502782106,
"grad_norm": 3.0,
"learning_rate": 0.0033723569997774314,
"loss": 5.5639,
"step": 211500
},
{
"epoch": 7.864084872765042,
"grad_norm": 0.48046875,
"learning_rate": 0.0033708732101787966,
"loss": 5.5662,
"step": 212000
},
{
"epoch": 7.882632242747978,
"grad_norm": 0.5703125,
"learning_rate": 0.003369389420580162,
"loss": 5.5677,
"step": 212500
},
{
"epoch": 7.901179612730915,
"grad_norm": 3.03125,
"learning_rate": 0.003367905630981527,
"loss": 5.5709,
"step": 213000
},
{
"epoch": 7.9197269827138514,
"grad_norm": 0.99609375,
"learning_rate": 0.003366421841382892,
"loss": 5.5671,
"step": 213500
},
{
"epoch": 7.938274352696787,
"grad_norm": 1.0859375,
"learning_rate": 0.003364938051784257,
"loss": 5.5698,
"step": 214000
},
{
"epoch": 7.956821722679724,
"grad_norm": 0.99609375,
"learning_rate": 0.0033634542621856223,
"loss": 5.567,
"step": 214500
},
{
"epoch": 7.97536909266266,
"grad_norm": 1.0,
"learning_rate": 0.0033619704725869875,
"loss": 5.5695,
"step": 215000
},
{
"epoch": 7.993916462645597,
"grad_norm": 0.53125,
"learning_rate": 0.0033604866829883523,
"loss": 5.5664,
"step": 215500
},
{
"epoch": 8.0,
"eval_loss": 5.560152530670166,
"eval_runtime": 0.6991,
"eval_samples_per_second": 1235.91,
"eval_steps_per_second": 38.622,
"step": 215664
}
],
"logging_steps": 500,
"max_steps": 1347900,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.806432977547817e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}