ll_classifier_v3 / trainer_state.json
Cyclcrclicly's picture
Upload folder using huggingface_hub
62a9d89 verified
raw
history blame contribute delete
No virus
9.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 4956,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.041969330104923326,
"grad_norm": 13.489130973815918,
"learning_rate": 4.930051116491795e-05,
"loss": 3.1245,
"step": 104
},
{
"epoch": 0.08393866020984665,
"grad_norm": 9.602147102355957,
"learning_rate": 4.8601022329835895e-05,
"loss": 2.919,
"step": 208
},
{
"epoch": 0.12590799031476999,
"grad_norm": 8.955652236938477,
"learning_rate": 4.7901533494753834e-05,
"loss": 2.865,
"step": 312
},
{
"epoch": 0.1678773204196933,
"grad_norm": 9.70827579498291,
"learning_rate": 4.720204465967178e-05,
"loss": 2.8891,
"step": 416
},
{
"epoch": 0.20984665052461662,
"grad_norm": 9.555304527282715,
"learning_rate": 4.6502555824589726e-05,
"loss": 2.8066,
"step": 520
},
{
"epoch": 0.25181598062953997,
"grad_norm": 7.922400951385498,
"learning_rate": 4.580306698950767e-05,
"loss": 2.775,
"step": 624
},
{
"epoch": 0.2937853107344633,
"grad_norm": 12.1842041015625,
"learning_rate": 4.510357815442561e-05,
"loss": 2.7193,
"step": 728
},
{
"epoch": 0.3357546408393866,
"grad_norm": 63.8215446472168,
"learning_rate": 4.4404089319343565e-05,
"loss": 2.9175,
"step": 832
},
{
"epoch": 0.37772397094430993,
"grad_norm": 8.268672943115234,
"learning_rate": 4.3704600484261504e-05,
"loss": 2.8031,
"step": 936
},
{
"epoch": 0.41969330104923325,
"grad_norm": 10.684550285339355,
"learning_rate": 4.300511164917945e-05,
"loss": 2.8335,
"step": 1040
},
{
"epoch": 0.46166263115415657,
"grad_norm": 15.027052879333496,
"learning_rate": 4.230562281409739e-05,
"loss": 2.7266,
"step": 1144
},
{
"epoch": 0.5036319612590799,
"grad_norm": 11.817206382751465,
"learning_rate": 4.1606133979015336e-05,
"loss": 2.6513,
"step": 1248
},
{
"epoch": 0.5456012913640033,
"grad_norm": 13.562594413757324,
"learning_rate": 4.090664514393328e-05,
"loss": 2.6641,
"step": 1352
},
{
"epoch": 0.5875706214689266,
"grad_norm": 18.43484878540039,
"learning_rate": 4.020715630885122e-05,
"loss": 2.7054,
"step": 1456
},
{
"epoch": 0.6295399515738499,
"grad_norm": 8.890129089355469,
"learning_rate": 3.9507667473769174e-05,
"loss": 2.676,
"step": 1560
},
{
"epoch": 0.6715092816787732,
"grad_norm": 7.976245403289795,
"learning_rate": 3.8808178638687113e-05,
"loss": 2.6769,
"step": 1664
},
{
"epoch": 0.7134786117836965,
"grad_norm": 16.992326736450195,
"learning_rate": 3.810868980360506e-05,
"loss": 2.7139,
"step": 1768
},
{
"epoch": 0.7554479418886199,
"grad_norm": 30.8548526763916,
"learning_rate": 3.7409200968523006e-05,
"loss": 2.5842,
"step": 1872
},
{
"epoch": 0.7974172719935432,
"grad_norm": 19.750226974487305,
"learning_rate": 3.670971213344095e-05,
"loss": 2.5971,
"step": 1976
},
{
"epoch": 0.8393866020984665,
"grad_norm": 8.788566589355469,
"learning_rate": 3.601022329835889e-05,
"loss": 2.583,
"step": 2080
},
{
"epoch": 0.8813559322033898,
"grad_norm": 11.469756126403809,
"learning_rate": 3.531073446327684e-05,
"loss": 2.567,
"step": 2184
},
{
"epoch": 0.9233252623083131,
"grad_norm": 11.528902053833008,
"learning_rate": 3.4611245628194784e-05,
"loss": 2.6214,
"step": 2288
},
{
"epoch": 0.9652945924132365,
"grad_norm": 13.25121021270752,
"learning_rate": 3.391175679311272e-05,
"loss": 2.6326,
"step": 2392
},
{
"epoch": 1.0,
"eval_accuracy": 0.22509578544061304,
"eval_loss": 2.676828145980835,
"eval_runtime": 18.7388,
"eval_samples_per_second": 55.713,
"eval_steps_per_second": 6.991,
"step": 2478
},
{
"epoch": 1.0072639225181599,
"grad_norm": 7.456902980804443,
"learning_rate": 3.3212267958030676e-05,
"loss": 2.5498,
"step": 2496
},
{
"epoch": 1.0492332526230832,
"grad_norm": 14.628376960754395,
"learning_rate": 3.2512779122948615e-05,
"loss": 2.5027,
"step": 2600
},
{
"epoch": 1.0912025827280065,
"grad_norm": 17.166057586669922,
"learning_rate": 3.181329028786656e-05,
"loss": 2.4437,
"step": 2704
},
{
"epoch": 1.1331719128329298,
"grad_norm": 8.214387893676758,
"learning_rate": 3.111380145278451e-05,
"loss": 2.4622,
"step": 2808
},
{
"epoch": 1.1751412429378532,
"grad_norm": 12.298638343811035,
"learning_rate": 3.041431261770245e-05,
"loss": 2.4839,
"step": 2912
},
{
"epoch": 1.2171105730427765,
"grad_norm": 10.392972946166992,
"learning_rate": 2.9714823782620393e-05,
"loss": 2.4826,
"step": 3016
},
{
"epoch": 1.2590799031476998,
"grad_norm": 10.585667610168457,
"learning_rate": 2.9015334947538336e-05,
"loss": 2.4243,
"step": 3120
},
{
"epoch": 1.3010492332526231,
"grad_norm": 10.92833137512207,
"learning_rate": 2.8315846112456285e-05,
"loss": 2.4804,
"step": 3224
},
{
"epoch": 1.3430185633575464,
"grad_norm": 10.050086975097656,
"learning_rate": 2.7616357277374228e-05,
"loss": 2.4112,
"step": 3328
},
{
"epoch": 1.3849878934624698,
"grad_norm": 8.787351608276367,
"learning_rate": 2.691686844229217e-05,
"loss": 2.3694,
"step": 3432
},
{
"epoch": 1.426957223567393,
"grad_norm": 10.800174713134766,
"learning_rate": 2.621737960721012e-05,
"loss": 2.4382,
"step": 3536
},
{
"epoch": 1.4689265536723164,
"grad_norm": 10.92818832397461,
"learning_rate": 2.5517890772128063e-05,
"loss": 2.432,
"step": 3640
},
{
"epoch": 1.5108958837772397,
"grad_norm": 12.755341529846191,
"learning_rate": 2.4818401937046006e-05,
"loss": 2.4453,
"step": 3744
},
{
"epoch": 1.552865213882163,
"grad_norm": 12.561775207519531,
"learning_rate": 2.411891310196395e-05,
"loss": 2.3501,
"step": 3848
},
{
"epoch": 1.5948345439870864,
"grad_norm": 11.288426399230957,
"learning_rate": 2.3419424266881895e-05,
"loss": 2.4433,
"step": 3952
},
{
"epoch": 1.6368038740920097,
"grad_norm": 13.944038391113281,
"learning_rate": 2.271993543179984e-05,
"loss": 2.3851,
"step": 4056
},
{
"epoch": 1.678773204196933,
"grad_norm": 12.919787406921387,
"learning_rate": 2.2020446596717783e-05,
"loss": 2.4121,
"step": 4160
},
{
"epoch": 1.7207425343018563,
"grad_norm": 19.511533737182617,
"learning_rate": 2.132095776163573e-05,
"loss": 2.3032,
"step": 4264
},
{
"epoch": 1.7627118644067796,
"grad_norm": 13.31141471862793,
"learning_rate": 2.0621468926553672e-05,
"loss": 2.3692,
"step": 4368
},
{
"epoch": 1.804681194511703,
"grad_norm": 14.81092357635498,
"learning_rate": 1.992198009147162e-05,
"loss": 2.341,
"step": 4472
},
{
"epoch": 1.8466505246166263,
"grad_norm": 16.881967544555664,
"learning_rate": 1.9222491256389565e-05,
"loss": 2.3533,
"step": 4576
},
{
"epoch": 1.8886198547215496,
"grad_norm": 13.03023624420166,
"learning_rate": 1.8523002421307507e-05,
"loss": 2.3374,
"step": 4680
},
{
"epoch": 1.9305891848264731,
"grad_norm": 9.3682861328125,
"learning_rate": 1.782351358622545e-05,
"loss": 2.3585,
"step": 4784
},
{
"epoch": 1.9725585149313964,
"grad_norm": 13.498007774353027,
"learning_rate": 1.7124024751143396e-05,
"loss": 2.3383,
"step": 4888
},
{
"epoch": 2.0,
"eval_accuracy": 0.2835249042145594,
"eval_loss": 2.457916259765625,
"eval_runtime": 18.7722,
"eval_samples_per_second": 55.614,
"eval_steps_per_second": 6.978,
"step": 4956
}
],
"logging_steps": 104,
"max_steps": 7434,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.0431630640472064e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}