lesso04's picture
Training in progress, step 100, checkpoint
f292191 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.5649717514124294,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005649717514124294,
"grad_norm": 0.6486179828643799,
"learning_rate": 1e-05,
"loss": 1.8002,
"step": 1
},
{
"epoch": 0.005649717514124294,
"eval_loss": 1.6410346031188965,
"eval_runtime": 17.2358,
"eval_samples_per_second": 8.703,
"eval_steps_per_second": 1.102,
"step": 1
},
{
"epoch": 0.011299435028248588,
"grad_norm": 0.5344129800796509,
"learning_rate": 2e-05,
"loss": 1.6629,
"step": 2
},
{
"epoch": 0.01694915254237288,
"grad_norm": 0.5211488008499146,
"learning_rate": 3e-05,
"loss": 1.5839,
"step": 3
},
{
"epoch": 0.022598870056497175,
"grad_norm": 0.5224741101264954,
"learning_rate": 4e-05,
"loss": 1.596,
"step": 4
},
{
"epoch": 0.02824858757062147,
"grad_norm": 0.5804663300514221,
"learning_rate": 5e-05,
"loss": 1.609,
"step": 5
},
{
"epoch": 0.03389830508474576,
"grad_norm": 0.5538197755813599,
"learning_rate": 6e-05,
"loss": 1.4777,
"step": 6
},
{
"epoch": 0.03954802259887006,
"grad_norm": 0.5609269142150879,
"learning_rate": 7e-05,
"loss": 1.5004,
"step": 7
},
{
"epoch": 0.04519774011299435,
"grad_norm": 0.61570143699646,
"learning_rate": 8e-05,
"loss": 1.5759,
"step": 8
},
{
"epoch": 0.05084745762711865,
"grad_norm": 0.6159215569496155,
"learning_rate": 9e-05,
"loss": 1.422,
"step": 9
},
{
"epoch": 0.05084745762711865,
"eval_loss": 1.5287469625473022,
"eval_runtime": 16.5372,
"eval_samples_per_second": 9.07,
"eval_steps_per_second": 1.149,
"step": 9
},
{
"epoch": 0.05649717514124294,
"grad_norm": 0.5658262372016907,
"learning_rate": 0.0001,
"loss": 1.4229,
"step": 10
},
{
"epoch": 0.062146892655367235,
"grad_norm": 0.5656306743621826,
"learning_rate": 9.99695413509548e-05,
"loss": 1.5591,
"step": 11
},
{
"epoch": 0.06779661016949153,
"grad_norm": 0.6409211754798889,
"learning_rate": 9.987820251299122e-05,
"loss": 1.4758,
"step": 12
},
{
"epoch": 0.07344632768361582,
"grad_norm": 0.6326680183410645,
"learning_rate": 9.972609476841367e-05,
"loss": 1.5493,
"step": 13
},
{
"epoch": 0.07909604519774012,
"grad_norm": 0.6271660327911377,
"learning_rate": 9.951340343707852e-05,
"loss": 1.491,
"step": 14
},
{
"epoch": 0.0847457627118644,
"grad_norm": 0.6144469380378723,
"learning_rate": 9.924038765061042e-05,
"loss": 1.5226,
"step": 15
},
{
"epoch": 0.0903954802259887,
"grad_norm": 0.6342131495475769,
"learning_rate": 9.890738003669029e-05,
"loss": 1.3165,
"step": 16
},
{
"epoch": 0.096045197740113,
"grad_norm": 0.6053822636604309,
"learning_rate": 9.851478631379982e-05,
"loss": 1.4884,
"step": 17
},
{
"epoch": 0.1016949152542373,
"grad_norm": 0.549248218536377,
"learning_rate": 9.806308479691595e-05,
"loss": 1.419,
"step": 18
},
{
"epoch": 0.1016949152542373,
"eval_loss": 1.4287166595458984,
"eval_runtime": 16.5222,
"eval_samples_per_second": 9.079,
"eval_steps_per_second": 1.15,
"step": 18
},
{
"epoch": 0.10734463276836158,
"grad_norm": 0.5126931071281433,
"learning_rate": 9.755282581475769e-05,
"loss": 1.4541,
"step": 19
},
{
"epoch": 0.11299435028248588,
"grad_norm": 0.498399555683136,
"learning_rate": 9.698463103929542e-05,
"loss": 1.3967,
"step": 20
},
{
"epoch": 0.11864406779661017,
"grad_norm": 0.5689512491226196,
"learning_rate": 9.635919272833938e-05,
"loss": 1.4162,
"step": 21
},
{
"epoch": 0.12429378531073447,
"grad_norm": 0.5674974918365479,
"learning_rate": 9.567727288213005e-05,
"loss": 1.4221,
"step": 22
},
{
"epoch": 0.12994350282485875,
"grad_norm": 0.475652813911438,
"learning_rate": 9.493970231495835e-05,
"loss": 1.3541,
"step": 23
},
{
"epoch": 0.13559322033898305,
"grad_norm": 0.48423147201538086,
"learning_rate": 9.414737964294636e-05,
"loss": 1.3972,
"step": 24
},
{
"epoch": 0.14124293785310735,
"grad_norm": 0.5211918950080872,
"learning_rate": 9.330127018922194e-05,
"loss": 1.4415,
"step": 25
},
{
"epoch": 0.14689265536723164,
"grad_norm": 0.49290183186531067,
"learning_rate": 9.24024048078213e-05,
"loss": 1.3392,
"step": 26
},
{
"epoch": 0.15254237288135594,
"grad_norm": 0.4921974837779999,
"learning_rate": 9.145187862775209e-05,
"loss": 1.358,
"step": 27
},
{
"epoch": 0.15254237288135594,
"eval_loss": 1.3978703022003174,
"eval_runtime": 16.5286,
"eval_samples_per_second": 9.075,
"eval_steps_per_second": 1.15,
"step": 27
},
{
"epoch": 0.15819209039548024,
"grad_norm": 0.4419269561767578,
"learning_rate": 9.045084971874738e-05,
"loss": 1.4269,
"step": 28
},
{
"epoch": 0.1638418079096045,
"grad_norm": 0.4582747519016266,
"learning_rate": 8.940053768033609e-05,
"loss": 1.3388,
"step": 29
},
{
"epoch": 0.1694915254237288,
"grad_norm": 0.4794791042804718,
"learning_rate": 8.83022221559489e-05,
"loss": 1.4946,
"step": 30
},
{
"epoch": 0.1751412429378531,
"grad_norm": 0.46071624755859375,
"learning_rate": 8.715724127386972e-05,
"loss": 1.3205,
"step": 31
},
{
"epoch": 0.1807909604519774,
"grad_norm": 0.5038266777992249,
"learning_rate": 8.596699001693255e-05,
"loss": 1.5066,
"step": 32
},
{
"epoch": 0.1864406779661017,
"grad_norm": 0.44110360741615295,
"learning_rate": 8.473291852294987e-05,
"loss": 1.3141,
"step": 33
},
{
"epoch": 0.192090395480226,
"grad_norm": 0.4921356439590454,
"learning_rate": 8.345653031794292e-05,
"loss": 1.3517,
"step": 34
},
{
"epoch": 0.1977401129943503,
"grad_norm": 0.4924282431602478,
"learning_rate": 8.213938048432697e-05,
"loss": 1.3963,
"step": 35
},
{
"epoch": 0.2033898305084746,
"grad_norm": 0.48636412620544434,
"learning_rate": 8.07830737662829e-05,
"loss": 1.3429,
"step": 36
},
{
"epoch": 0.2033898305084746,
"eval_loss": 1.3836274147033691,
"eval_runtime": 16.519,
"eval_samples_per_second": 9.08,
"eval_steps_per_second": 1.15,
"step": 36
},
{
"epoch": 0.20903954802259886,
"grad_norm": 0.5329720973968506,
"learning_rate": 7.938926261462366e-05,
"loss": 1.3342,
"step": 37
},
{
"epoch": 0.21468926553672316,
"grad_norm": 0.5271207094192505,
"learning_rate": 7.795964517353735e-05,
"loss": 1.3605,
"step": 38
},
{
"epoch": 0.22033898305084745,
"grad_norm": 0.49143949151039124,
"learning_rate": 7.649596321166024e-05,
"loss": 1.3008,
"step": 39
},
{
"epoch": 0.22598870056497175,
"grad_norm": 0.5104478597640991,
"learning_rate": 7.500000000000001e-05,
"loss": 1.4657,
"step": 40
},
{
"epoch": 0.23163841807909605,
"grad_norm": 0.4765436351299286,
"learning_rate": 7.347357813929454e-05,
"loss": 1.3839,
"step": 41
},
{
"epoch": 0.23728813559322035,
"grad_norm": 0.570360004901886,
"learning_rate": 7.191855733945387e-05,
"loss": 1.5159,
"step": 42
},
{
"epoch": 0.24293785310734464,
"grad_norm": 0.4970918893814087,
"learning_rate": 7.033683215379002e-05,
"loss": 1.2854,
"step": 43
},
{
"epoch": 0.24858757062146894,
"grad_norm": 0.45877355337142944,
"learning_rate": 6.873032967079561e-05,
"loss": 1.4661,
"step": 44
},
{
"epoch": 0.2542372881355932,
"grad_norm": 0.4653535485267639,
"learning_rate": 6.710100716628344e-05,
"loss": 1.4256,
"step": 45
},
{
"epoch": 0.2542372881355932,
"eval_loss": 1.3712184429168701,
"eval_runtime": 16.5203,
"eval_samples_per_second": 9.08,
"eval_steps_per_second": 1.15,
"step": 45
},
{
"epoch": 0.2598870056497175,
"grad_norm": 0.4850931167602539,
"learning_rate": 6.545084971874738e-05,
"loss": 1.4359,
"step": 46
},
{
"epoch": 0.2655367231638418,
"grad_norm": 0.4870792031288147,
"learning_rate": 6.378186779084995e-05,
"loss": 1.324,
"step": 47
},
{
"epoch": 0.2711864406779661,
"grad_norm": 0.509199321269989,
"learning_rate": 6.209609477998338e-05,
"loss": 1.4819,
"step": 48
},
{
"epoch": 0.2768361581920904,
"grad_norm": 0.48916783928871155,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.3841,
"step": 49
},
{
"epoch": 0.2824858757062147,
"grad_norm": 0.5241059064865112,
"learning_rate": 5.868240888334653e-05,
"loss": 1.3468,
"step": 50
},
{
"epoch": 0.288135593220339,
"grad_norm": 0.4669332504272461,
"learning_rate": 5.695865504800327e-05,
"loss": 1.3887,
"step": 51
},
{
"epoch": 0.2937853107344633,
"grad_norm": 0.5064263939857483,
"learning_rate": 5.522642316338268e-05,
"loss": 1.4251,
"step": 52
},
{
"epoch": 0.2994350282485876,
"grad_norm": 0.46047088503837585,
"learning_rate": 5.348782368720626e-05,
"loss": 1.4501,
"step": 53
},
{
"epoch": 0.3050847457627119,
"grad_norm": 0.487646222114563,
"learning_rate": 5.174497483512506e-05,
"loss": 1.4195,
"step": 54
},
{
"epoch": 0.3050847457627119,
"eval_loss": 1.3669663667678833,
"eval_runtime": 16.5222,
"eval_samples_per_second": 9.079,
"eval_steps_per_second": 1.15,
"step": 54
},
{
"epoch": 0.3107344632768362,
"grad_norm": 0.4937644600868225,
"learning_rate": 5e-05,
"loss": 1.3128,
"step": 55
},
{
"epoch": 0.3163841807909605,
"grad_norm": 0.5353518128395081,
"learning_rate": 4.825502516487497e-05,
"loss": 1.3723,
"step": 56
},
{
"epoch": 0.3220338983050847,
"grad_norm": 0.5348647236824036,
"learning_rate": 4.6512176312793736e-05,
"loss": 1.29,
"step": 57
},
{
"epoch": 0.327683615819209,
"grad_norm": 0.4655182361602783,
"learning_rate": 4.477357683661734e-05,
"loss": 1.3349,
"step": 58
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.42419737577438354,
"learning_rate": 4.3041344951996746e-05,
"loss": 1.2884,
"step": 59
},
{
"epoch": 0.3389830508474576,
"grad_norm": 0.49362912774086,
"learning_rate": 4.131759111665349e-05,
"loss": 1.3669,
"step": 60
},
{
"epoch": 0.3446327683615819,
"grad_norm": 0.5063827633857727,
"learning_rate": 3.960441545911204e-05,
"loss": 1.3261,
"step": 61
},
{
"epoch": 0.3502824858757062,
"grad_norm": 0.498791366815567,
"learning_rate": 3.790390522001662e-05,
"loss": 1.3106,
"step": 62
},
{
"epoch": 0.3559322033898305,
"grad_norm": 0.48362022638320923,
"learning_rate": 3.6218132209150045e-05,
"loss": 1.3544,
"step": 63
},
{
"epoch": 0.3559322033898305,
"eval_loss": 1.3615933656692505,
"eval_runtime": 16.5254,
"eval_samples_per_second": 9.077,
"eval_steps_per_second": 1.15,
"step": 63
},
{
"epoch": 0.3615819209039548,
"grad_norm": 0.5257571935653687,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.4127,
"step": 64
},
{
"epoch": 0.3672316384180791,
"grad_norm": 0.45339685678482056,
"learning_rate": 3.289899283371657e-05,
"loss": 1.3731,
"step": 65
},
{
"epoch": 0.3728813559322034,
"grad_norm": 0.46512871980667114,
"learning_rate": 3.12696703292044e-05,
"loss": 1.3005,
"step": 66
},
{
"epoch": 0.3785310734463277,
"grad_norm": 0.4432275593280792,
"learning_rate": 2.9663167846209998e-05,
"loss": 1.3082,
"step": 67
},
{
"epoch": 0.384180790960452,
"grad_norm": 0.5095959305763245,
"learning_rate": 2.8081442660546125e-05,
"loss": 1.2312,
"step": 68
},
{
"epoch": 0.3898305084745763,
"grad_norm": 0.4842095673084259,
"learning_rate": 2.6526421860705473e-05,
"loss": 1.3686,
"step": 69
},
{
"epoch": 0.3954802259887006,
"grad_norm": 0.4719487726688385,
"learning_rate": 2.500000000000001e-05,
"loss": 1.2836,
"step": 70
},
{
"epoch": 0.4011299435028249,
"grad_norm": 0.4679429829120636,
"learning_rate": 2.350403678833976e-05,
"loss": 1.3177,
"step": 71
},
{
"epoch": 0.4067796610169492,
"grad_norm": 0.4831387996673584,
"learning_rate": 2.2040354826462668e-05,
"loss": 1.3919,
"step": 72
},
{
"epoch": 0.4067796610169492,
"eval_loss": 1.3577146530151367,
"eval_runtime": 16.5505,
"eval_samples_per_second": 9.063,
"eval_steps_per_second": 1.148,
"step": 72
},
{
"epoch": 0.4124293785310734,
"grad_norm": 0.4939872622489929,
"learning_rate": 2.061073738537635e-05,
"loss": 1.3774,
"step": 73
},
{
"epoch": 0.4180790960451977,
"grad_norm": 0.4667295217514038,
"learning_rate": 1.9216926233717085e-05,
"loss": 1.4589,
"step": 74
},
{
"epoch": 0.423728813559322,
"grad_norm": 0.47072529792785645,
"learning_rate": 1.7860619515673033e-05,
"loss": 1.3581,
"step": 75
},
{
"epoch": 0.4293785310734463,
"grad_norm": 0.41899287700653076,
"learning_rate": 1.6543469682057106e-05,
"loss": 1.301,
"step": 76
},
{
"epoch": 0.4350282485875706,
"grad_norm": 0.486950159072876,
"learning_rate": 1.526708147705013e-05,
"loss": 1.379,
"step": 77
},
{
"epoch": 0.4406779661016949,
"grad_norm": 0.4415300786495209,
"learning_rate": 1.4033009983067452e-05,
"loss": 1.4033,
"step": 78
},
{
"epoch": 0.4463276836158192,
"grad_norm": 0.4912453889846802,
"learning_rate": 1.2842758726130283e-05,
"loss": 1.3361,
"step": 79
},
{
"epoch": 0.4519774011299435,
"grad_norm": 0.4673634171485901,
"learning_rate": 1.1697777844051105e-05,
"loss": 1.4221,
"step": 80
},
{
"epoch": 0.4576271186440678,
"grad_norm": 0.4738830029964447,
"learning_rate": 1.0599462319663905e-05,
"loss": 1.3336,
"step": 81
},
{
"epoch": 0.4576271186440678,
"eval_loss": 1.3546689748764038,
"eval_runtime": 16.5231,
"eval_samples_per_second": 9.078,
"eval_steps_per_second": 1.15,
"step": 81
},
{
"epoch": 0.4632768361581921,
"grad_norm": 0.4595194458961487,
"learning_rate": 9.549150281252633e-06,
"loss": 1.3226,
"step": 82
},
{
"epoch": 0.4689265536723164,
"grad_norm": 0.46952760219573975,
"learning_rate": 8.548121372247918e-06,
"loss": 1.4328,
"step": 83
},
{
"epoch": 0.4745762711864407,
"grad_norm": 0.4681397080421448,
"learning_rate": 7.597595192178702e-06,
"loss": 1.4563,
"step": 84
},
{
"epoch": 0.480225988700565,
"grad_norm": 0.49031302332878113,
"learning_rate": 6.698729810778065e-06,
"loss": 1.3676,
"step": 85
},
{
"epoch": 0.4858757062146893,
"grad_norm": 0.5184950232505798,
"learning_rate": 5.852620357053651e-06,
"loss": 1.3509,
"step": 86
},
{
"epoch": 0.4915254237288136,
"grad_norm": 0.4513101577758789,
"learning_rate": 5.060297685041659e-06,
"loss": 1.3306,
"step": 87
},
{
"epoch": 0.4971751412429379,
"grad_norm": 0.5032865405082703,
"learning_rate": 4.322727117869951e-06,
"loss": 1.2823,
"step": 88
},
{
"epoch": 0.5028248587570622,
"grad_norm": 0.4926997125148773,
"learning_rate": 3.6408072716606346e-06,
"loss": 1.4277,
"step": 89
},
{
"epoch": 0.5084745762711864,
"grad_norm": 0.4777621924877167,
"learning_rate": 3.0153689607045845e-06,
"loss": 1.34,
"step": 90
},
{
"epoch": 0.5084745762711864,
"eval_loss": 1.3536385297775269,
"eval_runtime": 16.5216,
"eval_samples_per_second": 9.079,
"eval_steps_per_second": 1.15,
"step": 90
},
{
"epoch": 0.5141242937853108,
"grad_norm": 0.4962153434753418,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.3593,
"step": 91
},
{
"epoch": 0.519774011299435,
"grad_norm": 0.49940770864486694,
"learning_rate": 1.9369152030840556e-06,
"loss": 1.3884,
"step": 92
},
{
"epoch": 0.5254237288135594,
"grad_norm": 0.44710472226142883,
"learning_rate": 1.4852136862001764e-06,
"loss": 1.3047,
"step": 93
},
{
"epoch": 0.5310734463276836,
"grad_norm": 0.49211978912353516,
"learning_rate": 1.0926199633097157e-06,
"loss": 1.3709,
"step": 94
},
{
"epoch": 0.536723163841808,
"grad_norm": 0.45334574580192566,
"learning_rate": 7.596123493895991e-07,
"loss": 1.4069,
"step": 95
},
{
"epoch": 0.5423728813559322,
"grad_norm": 0.4448023736476898,
"learning_rate": 4.865965629214819e-07,
"loss": 1.3452,
"step": 96
},
{
"epoch": 0.5480225988700564,
"grad_norm": 0.507058322429657,
"learning_rate": 2.7390523158633554e-07,
"loss": 1.459,
"step": 97
},
{
"epoch": 0.5536723163841808,
"grad_norm": 0.43171530961990356,
"learning_rate": 1.2179748700879012e-07,
"loss": 1.2972,
"step": 98
},
{
"epoch": 0.559322033898305,
"grad_norm": 0.5051499605178833,
"learning_rate": 3.04586490452119e-08,
"loss": 1.4438,
"step": 99
},
{
"epoch": 0.559322033898305,
"eval_loss": 1.3532921075820923,
"eval_runtime": 16.5539,
"eval_samples_per_second": 9.061,
"eval_steps_per_second": 1.148,
"step": 99
},
{
"epoch": 0.5649717514124294,
"grad_norm": 0.49200373888015747,
"learning_rate": 0.0,
"loss": 1.4128,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.41887283560448e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}