|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.0, |
|
"eval_steps": 100, |
|
"global_step": 1250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.016, |
|
"grad_norm": 6.320438861846924, |
|
"learning_rate": 2.99952628392495e-06, |
|
"loss": 1.3054, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.032, |
|
"grad_norm": 5.475564956665039, |
|
"learning_rate": 2.9981054349090266e-06, |
|
"loss": 0.6821, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.048, |
|
"grad_norm": 5.251683235168457, |
|
"learning_rate": 2.995738350390921e-06, |
|
"loss": 0.6935, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.064, |
|
"grad_norm": 5.057413578033447, |
|
"learning_rate": 2.9924265254719506e-06, |
|
"loss": 0.6776, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 4.680449962615967, |
|
"learning_rate": 2.988172051971717e-06, |
|
"loss": 0.6582, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.096, |
|
"grad_norm": 5.521228313446045, |
|
"learning_rate": 2.982977617106871e-06, |
|
"loss": 0.6972, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.112, |
|
"grad_norm": 4.839247226715088, |
|
"learning_rate": 2.9768465017938084e-06, |
|
"loss": 0.6749, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.128, |
|
"grad_norm": 5.12637996673584, |
|
"learning_rate": 2.9697825785763704e-06, |
|
"loss": 0.6863, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.144, |
|
"grad_norm": 4.7851152420043945, |
|
"learning_rate": 2.961790309179866e-06, |
|
"loss": 0.6785, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 4.761219501495361, |
|
"learning_rate": 2.9528747416929465e-06, |
|
"loss": 0.6727, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_loss": 0.6868906021118164, |
|
"eval_runtime": 8.6852, |
|
"eval_samples_per_second": 230.276, |
|
"eval_steps_per_second": 14.392, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.176, |
|
"grad_norm": 4.815207004547119, |
|
"learning_rate": 2.943041507379129e-06, |
|
"loss": 0.6787, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.192, |
|
"grad_norm": 4.663937568664551, |
|
"learning_rate": 2.9322968171199645e-06, |
|
"loss": 0.6772, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.208, |
|
"grad_norm": 5.208789348602295, |
|
"learning_rate": 2.9206474574921165e-06, |
|
"loss": 0.656, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.224, |
|
"grad_norm": 4.607890605926514, |
|
"learning_rate": 2.9081007864808113e-06, |
|
"loss": 0.6792, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 5.205310821533203, |
|
"learning_rate": 2.894664728832377e-06, |
|
"loss": 0.6989, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.256, |
|
"grad_norm": 4.700341701507568, |
|
"learning_rate": 2.8803477710488056e-06, |
|
"loss": 0.6673, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.272, |
|
"grad_norm": 4.271303653717041, |
|
"learning_rate": 2.8651589560274937e-06, |
|
"loss": 0.6743, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.288, |
|
"grad_norm": 4.5935468673706055, |
|
"learning_rate": 2.8491078773495566e-06, |
|
"loss": 0.6634, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.304, |
|
"grad_norm": 4.505198955535889, |
|
"learning_rate": 2.832204673220317e-06, |
|
"loss": 0.6487, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 4.946630477905273, |
|
"learning_rate": 2.814460020065795e-06, |
|
"loss": 0.6056, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"eval_loss": 0.6831114292144775, |
|
"eval_runtime": 9.3989, |
|
"eval_samples_per_second": 212.791, |
|
"eval_steps_per_second": 13.299, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.336, |
|
"grad_norm": 4.913090229034424, |
|
"learning_rate": 2.795885125789253e-06, |
|
"loss": 0.652, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.352, |
|
"grad_norm": 4.906637191772461, |
|
"learning_rate": 2.776491722692038e-06, |
|
"loss": 0.6599, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.368, |
|
"grad_norm": 4.214770317077637, |
|
"learning_rate": 2.756292060063213e-06, |
|
"loss": 0.6539, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.384, |
|
"grad_norm": 4.808592796325684, |
|
"learning_rate": 2.735298896442641e-06, |
|
"loss": 0.6666, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 4.6029839515686035, |
|
"learning_rate": 2.713525491562421e-06, |
|
"loss": 0.6924, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.416, |
|
"grad_norm": 4.828456878662109, |
|
"learning_rate": 2.690985597971753e-06, |
|
"loss": 0.6937, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.432, |
|
"grad_norm": 4.152997970581055, |
|
"learning_rate": 2.6676934523505355e-06, |
|
"loss": 0.6664, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.448, |
|
"grad_norm": 4.614770412445068, |
|
"learning_rate": 2.643663766517172e-06, |
|
"loss": 0.6722, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.464, |
|
"grad_norm": 4.493509292602539, |
|
"learning_rate": 2.6189117181362736e-06, |
|
"loss": 0.6689, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 4.5999274253845215, |
|
"learning_rate": 2.5934529411321173e-06, |
|
"loss": 0.7033, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"eval_loss": 0.6796725988388062, |
|
"eval_runtime": 9.6974, |
|
"eval_samples_per_second": 206.241, |
|
"eval_steps_per_second": 12.89, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.496, |
|
"grad_norm": 4.2827935218811035, |
|
"learning_rate": 2.5673035158139285e-06, |
|
"loss": 0.6596, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.512, |
|
"grad_norm": 4.7542805671691895, |
|
"learning_rate": 2.5404799587192076e-06, |
|
"loss": 0.6549, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.528, |
|
"grad_norm": 4.7150492668151855, |
|
"learning_rate": 2.5129992121815365e-06, |
|
"loss": 0.6757, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.544, |
|
"grad_norm": 4.777836322784424, |
|
"learning_rate": 2.484878633629435e-06, |
|
"loss": 0.6806, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 4.323108196258545, |
|
"learning_rate": 2.456135984623035e-06, |
|
"loss": 0.6629, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.576, |
|
"grad_norm": 4.968432426452637, |
|
"learning_rate": 2.4267894196355018e-06, |
|
"loss": 0.6858, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.592, |
|
"grad_norm": 4.666640758514404, |
|
"learning_rate": 2.3968574745862785e-06, |
|
"loss": 0.665, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.608, |
|
"grad_norm": 4.655545234680176, |
|
"learning_rate": 2.3663590551334015e-06, |
|
"loss": 0.6661, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.624, |
|
"grad_norm": 4.554004669189453, |
|
"learning_rate": 2.3353134247322823e-06, |
|
"loss": 0.6559, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 4.362437725067139, |
|
"learning_rate": 2.303740192468495e-06, |
|
"loss": 0.6786, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eval_loss": 0.6770616173744202, |
|
"eval_runtime": 7.3123, |
|
"eval_samples_per_second": 273.512, |
|
"eval_steps_per_second": 17.095, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.656, |
|
"grad_norm": 4.237730503082275, |
|
"learning_rate": 2.2716593006722595e-06, |
|
"loss": 0.6456, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.672, |
|
"grad_norm": 4.062387943267822, |
|
"learning_rate": 2.2390910123224374e-06, |
|
"loss": 0.6767, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.688, |
|
"grad_norm": 4.175485134124756, |
|
"learning_rate": 2.2060558982479992e-06, |
|
"loss": 0.6389, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.704, |
|
"grad_norm": 4.427236080169678, |
|
"learning_rate": 2.1725748241350487e-06, |
|
"loss": 0.6403, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 4.3691511154174805, |
|
"learning_rate": 2.138668937347609e-06, |
|
"loss": 0.6559, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.736, |
|
"grad_norm": 4.3593430519104, |
|
"learning_rate": 2.1043596535704943e-06, |
|
"loss": 0.6787, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.752, |
|
"grad_norm": 4.350930213928223, |
|
"learning_rate": 2.069668643282702e-06, |
|
"loss": 0.6975, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.768, |
|
"grad_norm": 4.215636730194092, |
|
"learning_rate": 2.034617818069876e-06, |
|
"loss": 0.684, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.784, |
|
"grad_norm": 3.999462127685547, |
|
"learning_rate": 1.99922931678448e-06, |
|
"loss": 0.6607, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 4.186202526092529, |
|
"learning_rate": 1.963525491562421e-06, |
|
"loss": 0.6476, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 0.6736403107643127, |
|
"eval_runtime": 7.3017, |
|
"eval_samples_per_second": 273.908, |
|
"eval_steps_per_second": 17.119, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.816, |
|
"grad_norm": 4.060866355895996, |
|
"learning_rate": 1.927528893704964e-06, |
|
"loss": 0.6591, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.832, |
|
"grad_norm": 4.125776767730713, |
|
"learning_rate": 1.8912622594348455e-06, |
|
"loss": 0.6922, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.848, |
|
"grad_norm": 3.8623437881469727, |
|
"learning_rate": 1.8547484955355872e-06, |
|
"loss": 0.6513, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.864, |
|
"grad_norm": 4.550040245056152, |
|
"learning_rate": 1.8180106648830824e-06, |
|
"loss": 0.663, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 4.267393589019775, |
|
"learning_rate": 1.7810719718785873e-06, |
|
"loss": 0.6685, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.896, |
|
"grad_norm": 4.428436756134033, |
|
"learning_rate": 1.7439557477923257e-06, |
|
"loss": 0.7051, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.912, |
|
"grad_norm": 4.695955276489258, |
|
"learning_rate": 1.706685436026957e-06, |
|
"loss": 0.6251, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.928, |
|
"grad_norm": 4.578221797943115, |
|
"learning_rate": 1.6692845773102223e-06, |
|
"loss": 0.6569, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.944, |
|
"grad_norm": 4.15753173828125, |
|
"learning_rate": 1.6317767948261151e-06, |
|
"loss": 0.6451, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 3.946852922439575, |
|
"learning_rate": 1.5941857792939703e-06, |
|
"loss": 0.6562, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"eval_loss": 0.6707749962806702, |
|
"eval_runtime": 7.4417, |
|
"eval_samples_per_second": 268.756, |
|
"eval_steps_per_second": 16.797, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.976, |
|
"grad_norm": 4.214157581329346, |
|
"learning_rate": 1.556535274004902e-06, |
|
"loss": 0.6556, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.992, |
|
"grad_norm": 4.3779616355896, |
|
"learning_rate": 1.518849059825029e-06, |
|
"loss": 0.6764, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.008, |
|
"grad_norm": 3.7831478118896484, |
|
"learning_rate": 1.481150940174971e-06, |
|
"loss": 0.5361, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.024, |
|
"grad_norm": 4.746358394622803, |
|
"learning_rate": 1.4434647259950982e-06, |
|
"loss": 0.4773, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 4.30097770690918, |
|
"learning_rate": 1.40581422070603e-06, |
|
"loss": 0.4829, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.056, |
|
"grad_norm": 4.781858444213867, |
|
"learning_rate": 1.3682232051738854e-06, |
|
"loss": 0.4635, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.072, |
|
"grad_norm": 4.712700366973877, |
|
"learning_rate": 1.3307154226897775e-06, |
|
"loss": 0.4775, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.088, |
|
"grad_norm": 4.640942573547363, |
|
"learning_rate": 1.293314563973043e-06, |
|
"loss": 0.4421, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.104, |
|
"grad_norm": 5.104272365570068, |
|
"learning_rate": 1.2560442522076746e-06, |
|
"loss": 0.446, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"grad_norm": 5.069291591644287, |
|
"learning_rate": 1.2189280281214128e-06, |
|
"loss": 0.461, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"eval_loss": 0.7041329145431519, |
|
"eval_runtime": 7.9231, |
|
"eval_samples_per_second": 252.428, |
|
"eval_steps_per_second": 15.777, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.1360000000000001, |
|
"grad_norm": 5.07060432434082, |
|
"learning_rate": 1.1819893351169183e-06, |
|
"loss": 0.4614, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.152, |
|
"grad_norm": 6.156511306762695, |
|
"learning_rate": 1.1452515044644133e-06, |
|
"loss": 0.4785, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.168, |
|
"grad_norm": 5.145558834075928, |
|
"learning_rate": 1.108737740565155e-06, |
|
"loss": 0.4463, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.184, |
|
"grad_norm": 4.276706218719482, |
|
"learning_rate": 1.0724711062950359e-06, |
|
"loss": 0.4595, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 5.321038722991943, |
|
"learning_rate": 1.036474508437579e-06, |
|
"loss": 0.4687, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.216, |
|
"grad_norm": 4.610326766967773, |
|
"learning_rate": 1.0007706832155202e-06, |
|
"loss": 0.4293, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.232, |
|
"grad_norm": 5.0618462562561035, |
|
"learning_rate": 9.65382181930124e-07, |
|
"loss": 0.4636, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.248, |
|
"grad_norm": 4.6133036613464355, |
|
"learning_rate": 9.303313567172986e-07, |
|
"loss": 0.4578, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.264, |
|
"grad_norm": 4.833690166473389, |
|
"learning_rate": 8.956403464295061e-07, |
|
"loss": 0.4511, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 4.997283935546875, |
|
"learning_rate": 8.613310626523911e-07, |
|
"loss": 0.4578, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"eval_loss": 0.7093445062637329, |
|
"eval_runtime": 9.5227, |
|
"eval_samples_per_second": 210.025, |
|
"eval_steps_per_second": 13.127, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.296, |
|
"grad_norm": 5.087992191314697, |
|
"learning_rate": 8.274251758649519e-07, |
|
"loss": 0.4858, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.312, |
|
"grad_norm": 5.305168628692627, |
|
"learning_rate": 7.939441017520012e-07, |
|
"loss": 0.4756, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.328, |
|
"grad_norm": 4.691494464874268, |
|
"learning_rate": 7.609089876775628e-07, |
|
"loss": 0.4593, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.3439999999999999, |
|
"grad_norm": 6.369288921356201, |
|
"learning_rate": 7.283406993277403e-07, |
|
"loss": 0.4461, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.3599999999999999, |
|
"grad_norm": 4.588751792907715, |
|
"learning_rate": 6.962598075315047e-07, |
|
"loss": 0.4569, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.376, |
|
"grad_norm": 4.815459728240967, |
|
"learning_rate": 6.646865752677186e-07, |
|
"loss": 0.4209, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.392, |
|
"grad_norm": 4.491628170013428, |
|
"learning_rate": 6.336409448665989e-07, |
|
"loss": 0.4768, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.408, |
|
"grad_norm": 5.085056304931641, |
|
"learning_rate": 6.031425254137223e-07, |
|
"loss": 0.4539, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.424, |
|
"grad_norm": 4.508426189422607, |
|
"learning_rate": 5.732105803644987e-07, |
|
"loss": 0.4588, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"grad_norm": 4.900942325592041, |
|
"learning_rate": 5.438640153769653e-07, |
|
"loss": 0.4817, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"eval_loss": 0.7055138945579529, |
|
"eval_runtime": 9.5816, |
|
"eval_samples_per_second": 208.733, |
|
"eval_steps_per_second": 13.046, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.456, |
|
"grad_norm": 5.169991970062256, |
|
"learning_rate": 5.151213663705655e-07, |
|
"loss": 0.4608, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.472, |
|
"grad_norm": 5.345458030700684, |
|
"learning_rate": 4.870007878184633e-07, |
|
"loss": 0.4687, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.488, |
|
"grad_norm": 5.129513263702393, |
|
"learning_rate": 4.5952004128079276e-07, |
|
"loss": 0.4677, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.504, |
|
"grad_norm": 4.392848491668701, |
|
"learning_rate": 4.3269648418607197e-07, |
|
"loss": 0.4612, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 4.760442733764648, |
|
"learning_rate": 4.06547058867883e-07, |
|
"loss": 0.4864, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.536, |
|
"grad_norm": 5.239096164703369, |
|
"learning_rate": 3.8108828186372685e-07, |
|
"loss": 0.4576, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.552, |
|
"grad_norm": 4.897254467010498, |
|
"learning_rate": 3.56336233482828e-07, |
|
"loss": 0.4518, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.568, |
|
"grad_norm": 5.0241804122924805, |
|
"learning_rate": 3.32306547649465e-07, |
|
"loss": 0.454, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.584, |
|
"grad_norm": 5.055388450622559, |
|
"learning_rate": 3.0901440202824693e-07, |
|
"loss": 0.4808, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 4.961503982543945, |
|
"learning_rate": 2.86474508437579e-07, |
|
"loss": 0.4324, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_loss": 0.7080456614494324, |
|
"eval_runtime": 7.9696, |
|
"eval_samples_per_second": 250.955, |
|
"eval_steps_per_second": 15.685, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.616, |
|
"grad_norm": 4.625386714935303, |
|
"learning_rate": 2.647011035573588e-07, |
|
"loss": 0.4707, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.6320000000000001, |
|
"grad_norm": 5.237055778503418, |
|
"learning_rate": 2.437079399367875e-07, |
|
"loss": 0.4575, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.6480000000000001, |
|
"grad_norm": 5.412177562713623, |
|
"learning_rate": 2.235082773079624e-07, |
|
"loss": 0.4786, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.6640000000000001, |
|
"grad_norm": 5.122972011566162, |
|
"learning_rate": 2.0411487421074708e-07, |
|
"loss": 0.4961, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.6800000000000002, |
|
"grad_norm": 4.920769214630127, |
|
"learning_rate": 1.8553997993420495e-07, |
|
"loss": 0.4494, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.696, |
|
"grad_norm": 4.990310192108154, |
|
"learning_rate": 1.6779532677968329e-07, |
|
"loss": 0.4713, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.712, |
|
"grad_norm": 4.39508581161499, |
|
"learning_rate": 1.508921226504434e-07, |
|
"loss": 0.4633, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 1.728, |
|
"grad_norm": 5.249621391296387, |
|
"learning_rate": 1.348410439725065e-07, |
|
"loss": 0.4764, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 1.744, |
|
"grad_norm": 5.508373260498047, |
|
"learning_rate": 1.1965222895119444e-07, |
|
"loss": 0.4793, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 5.088216781616211, |
|
"learning_rate": 1.0533527116762298e-07, |
|
"loss": 0.4693, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"eval_loss": 0.7081010937690735, |
|
"eval_runtime": 7.6853, |
|
"eval_samples_per_second": 260.237, |
|
"eval_steps_per_second": 16.265, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.776, |
|
"grad_norm": 5.710309028625488, |
|
"learning_rate": 9.18992135191889e-08, |
|
"loss": 0.4405, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 1.792, |
|
"grad_norm": 4.6689982414245605, |
|
"learning_rate": 7.935254250788366e-08, |
|
"loss": 0.4484, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 1.808, |
|
"grad_norm": 4.682315349578857, |
|
"learning_rate": 6.770318288003558e-08, |
|
"loss": 0.4458, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 1.8239999999999998, |
|
"grad_norm": 5.43618106842041, |
|
"learning_rate": 5.6958492620871105e-08, |
|
"loss": 0.4914, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 1.8399999999999999, |
|
"grad_norm": 4.98464822769165, |
|
"learning_rate": 4.712525830705339e-08, |
|
"loss": 0.4567, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.8559999999999999, |
|
"grad_norm": 4.657787799835205, |
|
"learning_rate": 3.820969082013415e-08, |
|
"loss": 0.452, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 1.8719999999999999, |
|
"grad_norm": 4.8955559730529785, |
|
"learning_rate": 3.021742142362971e-08, |
|
"loss": 0.463, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 1.888, |
|
"grad_norm": 5.363377094268799, |
|
"learning_rate": 2.3153498206192002e-08, |
|
"loss": 0.4489, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 1.904, |
|
"grad_norm": 4.830941677093506, |
|
"learning_rate": 1.7022382893129074e-08, |
|
"loss": 0.4594, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"grad_norm": 5.2024827003479, |
|
"learning_rate": 1.1827948028283353e-08, |
|
"loss": 0.4475, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"eval_loss": 0.707037091255188, |
|
"eval_runtime": 7.654, |
|
"eval_samples_per_second": 261.3, |
|
"eval_steps_per_second": 16.331, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.936, |
|
"grad_norm": 4.656322956085205, |
|
"learning_rate": 7.57347452804974e-09, |
|
"loss": 0.4583, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 1.952, |
|
"grad_norm": 5.396835803985596, |
|
"learning_rate": 4.261649609079099e-09, |
|
"loss": 0.4136, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 1.968, |
|
"grad_norm": 4.812420845031738, |
|
"learning_rate": 1.8945650909737986e-09, |
|
"loss": 0.4706, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 1.984, |
|
"grad_norm": 4.871044635772705, |
|
"learning_rate": 4.737160750500902e-10, |
|
"loss": 0.4637, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 4.772403240203857, |
|
"learning_rate": 0.0, |
|
"loss": 0.4707, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"step": 1250, |
|
"total_flos": 4.566411071245517e+16, |
|
"train_loss": 0.5696872653961181, |
|
"train_runtime": 1519.873, |
|
"train_samples_per_second": 13.159, |
|
"train_steps_per_second": 0.822 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1250, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.566411071245517e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|