nttx's picture
Training in progress, step 100, checkpoint
3c15ea4 verified
{
"best_metric": 0.15427330136299133,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 0.21141649048625794,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0021141649048625794,
"grad_norm": 6.459140777587891,
"learning_rate": 1e-06,
"loss": 0.7812,
"step": 1
},
{
"epoch": 0.0021141649048625794,
"eval_loss": 1.4333610534667969,
"eval_runtime": 16.3362,
"eval_samples_per_second": 12.182,
"eval_steps_per_second": 3.061,
"step": 1
},
{
"epoch": 0.004228329809725159,
"grad_norm": 11.925323486328125,
"learning_rate": 2e-06,
"loss": 1.0607,
"step": 2
},
{
"epoch": 0.006342494714587738,
"grad_norm": 11.720135688781738,
"learning_rate": 3e-06,
"loss": 1.1449,
"step": 3
},
{
"epoch": 0.008456659619450317,
"grad_norm": 13.44752025604248,
"learning_rate": 4e-06,
"loss": 1.3492,
"step": 4
},
{
"epoch": 0.010570824524312896,
"grad_norm": 9.380379676818848,
"learning_rate": 4.9999999999999996e-06,
"loss": 0.9138,
"step": 5
},
{
"epoch": 0.012684989429175475,
"grad_norm": 11.46026611328125,
"learning_rate": 6e-06,
"loss": 1.1924,
"step": 6
},
{
"epoch": 0.014799154334038054,
"grad_norm": 11.76174259185791,
"learning_rate": 7e-06,
"loss": 1.1333,
"step": 7
},
{
"epoch": 0.016913319238900635,
"grad_norm": 9.97977066040039,
"learning_rate": 8e-06,
"loss": 0.901,
"step": 8
},
{
"epoch": 0.019027484143763214,
"grad_norm": 12.512874603271484,
"learning_rate": 9e-06,
"loss": 0.8938,
"step": 9
},
{
"epoch": 0.021141649048625793,
"grad_norm": 9.057429313659668,
"learning_rate": 9.999999999999999e-06,
"loss": 0.6049,
"step": 10
},
{
"epoch": 0.023255813953488372,
"grad_norm": 6.595938682556152,
"learning_rate": 1.1e-05,
"loss": 0.5438,
"step": 11
},
{
"epoch": 0.02536997885835095,
"grad_norm": 3.882584571838379,
"learning_rate": 1.2e-05,
"loss": 0.3101,
"step": 12
},
{
"epoch": 0.02748414376321353,
"grad_norm": 2.7945213317871094,
"learning_rate": 1.3000000000000001e-05,
"loss": 0.2377,
"step": 13
},
{
"epoch": 0.02959830866807611,
"grad_norm": 2.9702303409576416,
"learning_rate": 1.4e-05,
"loss": 0.241,
"step": 14
},
{
"epoch": 0.03171247357293869,
"grad_norm": 1.6650919914245605,
"learning_rate": 1.5e-05,
"loss": 0.1683,
"step": 15
},
{
"epoch": 0.03382663847780127,
"grad_norm": 1.3479413986206055,
"learning_rate": 1.6e-05,
"loss": 0.1654,
"step": 16
},
{
"epoch": 0.035940803382663845,
"grad_norm": 1.9006567001342773,
"learning_rate": 1.7e-05,
"loss": 0.1907,
"step": 17
},
{
"epoch": 0.03805496828752643,
"grad_norm": 0.9984680414199829,
"learning_rate": 1.8e-05,
"loss": 0.1304,
"step": 18
},
{
"epoch": 0.040169133192389,
"grad_norm": 3.5023303031921387,
"learning_rate": 1.9e-05,
"loss": 0.1606,
"step": 19
},
{
"epoch": 0.042283298097251586,
"grad_norm": 3.85945987701416,
"learning_rate": 1.9999999999999998e-05,
"loss": 0.157,
"step": 20
},
{
"epoch": 0.04439746300211417,
"grad_norm": 1.6296824216842651,
"learning_rate": 2.1e-05,
"loss": 0.1808,
"step": 21
},
{
"epoch": 0.046511627906976744,
"grad_norm": 1.0764927864074707,
"learning_rate": 2.2e-05,
"loss": 0.1336,
"step": 22
},
{
"epoch": 0.048625792811839326,
"grad_norm": 1.0031847953796387,
"learning_rate": 2.3000000000000003e-05,
"loss": 0.1067,
"step": 23
},
{
"epoch": 0.0507399577167019,
"grad_norm": 1.4380738735198975,
"learning_rate": 2.4e-05,
"loss": 0.1375,
"step": 24
},
{
"epoch": 0.052854122621564484,
"grad_norm": 4.694511890411377,
"learning_rate": 2.5e-05,
"loss": 0.3519,
"step": 25
},
{
"epoch": 0.05496828752642706,
"grad_norm": 4.3105597496032715,
"learning_rate": 2.6000000000000002e-05,
"loss": 0.3183,
"step": 26
},
{
"epoch": 0.05708245243128964,
"grad_norm": 6.309661388397217,
"learning_rate": 2.7000000000000002e-05,
"loss": 0.2612,
"step": 27
},
{
"epoch": 0.05919661733615222,
"grad_norm": 2.288469076156616,
"learning_rate": 2.8e-05,
"loss": 0.1406,
"step": 28
},
{
"epoch": 0.0613107822410148,
"grad_norm": 1.6597330570220947,
"learning_rate": 2.9e-05,
"loss": 0.1428,
"step": 29
},
{
"epoch": 0.06342494714587738,
"grad_norm": 1.5649864673614502,
"learning_rate": 3e-05,
"loss": 0.1474,
"step": 30
},
{
"epoch": 0.06553911205073996,
"grad_norm": 0.9382965564727783,
"learning_rate": 2.9984895998119723e-05,
"loss": 0.1487,
"step": 31
},
{
"epoch": 0.06765327695560254,
"grad_norm": 2.024787187576294,
"learning_rate": 2.993961440992859e-05,
"loss": 0.1662,
"step": 32
},
{
"epoch": 0.06976744186046512,
"grad_norm": 0.768234372138977,
"learning_rate": 2.9864246426519023e-05,
"loss": 0.1607,
"step": 33
},
{
"epoch": 0.07188160676532769,
"grad_norm": 4.649656772613525,
"learning_rate": 2.9758943828979444e-05,
"loss": 0.2874,
"step": 34
},
{
"epoch": 0.07399577167019028,
"grad_norm": 6.809800148010254,
"learning_rate": 2.9623918682727355e-05,
"loss": 0.1592,
"step": 35
},
{
"epoch": 0.07610993657505286,
"grad_norm": 0.9345769882202148,
"learning_rate": 2.9459442910437798e-05,
"loss": 0.1477,
"step": 36
},
{
"epoch": 0.07822410147991543,
"grad_norm": 0.6217982769012451,
"learning_rate": 2.9265847744427305e-05,
"loss": 0.1162,
"step": 37
},
{
"epoch": 0.080338266384778,
"grad_norm": 0.593116819858551,
"learning_rate": 2.904352305959606e-05,
"loss": 0.1915,
"step": 38
},
{
"epoch": 0.0824524312896406,
"grad_norm": 0.6891128420829773,
"learning_rate": 2.8792916588271762e-05,
"loss": 0.179,
"step": 39
},
{
"epoch": 0.08456659619450317,
"grad_norm": 0.4601763188838959,
"learning_rate": 2.8514533018536286e-05,
"loss": 0.1387,
"step": 40
},
{
"epoch": 0.08668076109936575,
"grad_norm": 0.5080468654632568,
"learning_rate": 2.820893297785107e-05,
"loss": 0.1572,
"step": 41
},
{
"epoch": 0.08879492600422834,
"grad_norm": 6.51386833190918,
"learning_rate": 2.7876731904027994e-05,
"loss": 0.1897,
"step": 42
},
{
"epoch": 0.09090909090909091,
"grad_norm": 0.8196491003036499,
"learning_rate": 2.7518598805819542e-05,
"loss": 0.1766,
"step": 43
},
{
"epoch": 0.09302325581395349,
"grad_norm": 10.984284400939941,
"learning_rate": 2.7135254915624213e-05,
"loss": 0.2437,
"step": 44
},
{
"epoch": 0.09513742071881606,
"grad_norm": 1.1500269174575806,
"learning_rate": 2.672747223702045e-05,
"loss": 0.1754,
"step": 45
},
{
"epoch": 0.09725158562367865,
"grad_norm": 0.9413855075836182,
"learning_rate": 2.6296071990054167e-05,
"loss": 0.2031,
"step": 46
},
{
"epoch": 0.09936575052854123,
"grad_norm": 0.5329380035400391,
"learning_rate": 2.5841922957410875e-05,
"loss": 0.1199,
"step": 47
},
{
"epoch": 0.1014799154334038,
"grad_norm": 2.4237194061279297,
"learning_rate": 2.5365939734802973e-05,
"loss": 0.3149,
"step": 48
},
{
"epoch": 0.10359408033826638,
"grad_norm": 0.9596079587936401,
"learning_rate": 2.4869080889095693e-05,
"loss": 0.2,
"step": 49
},
{
"epoch": 0.10570824524312897,
"grad_norm": 0.9466848969459534,
"learning_rate": 2.4352347027881003e-05,
"loss": 0.2231,
"step": 50
},
{
"epoch": 0.10570824524312897,
"eval_loss": 0.15802142024040222,
"eval_runtime": 16.125,
"eval_samples_per_second": 12.341,
"eval_steps_per_second": 3.101,
"step": 50
},
{
"epoch": 0.10782241014799154,
"grad_norm": 0.9042031168937683,
"learning_rate": 2.3816778784387097e-05,
"loss": 0.1909,
"step": 51
},
{
"epoch": 0.10993657505285412,
"grad_norm": 2.605374574661255,
"learning_rate": 2.3263454721781537e-05,
"loss": 0.1462,
"step": 52
},
{
"epoch": 0.11205073995771671,
"grad_norm": 0.618105411529541,
"learning_rate": 2.2693489161088592e-05,
"loss": 0.2167,
"step": 53
},
{
"epoch": 0.11416490486257928,
"grad_norm": 0.24392980337142944,
"learning_rate": 2.210802993709498e-05,
"loss": 0.1316,
"step": 54
},
{
"epoch": 0.11627906976744186,
"grad_norm": 0.21554243564605713,
"learning_rate": 2.1508256086763372e-05,
"loss": 0.126,
"step": 55
},
{
"epoch": 0.11839323467230443,
"grad_norm": 0.241543248295784,
"learning_rate": 2.0895375474808857e-05,
"loss": 0.1109,
"step": 56
},
{
"epoch": 0.12050739957716702,
"grad_norm": 0.3184583783149719,
"learning_rate": 2.0270622361220143e-05,
"loss": 0.1138,
"step": 57
},
{
"epoch": 0.1226215644820296,
"grad_norm": 0.32614850997924805,
"learning_rate": 1.963525491562421e-05,
"loss": 0.1389,
"step": 58
},
{
"epoch": 0.12473572938689217,
"grad_norm": 0.3486217260360718,
"learning_rate": 1.8990552683500128e-05,
"loss": 0.139,
"step": 59
},
{
"epoch": 0.12684989429175475,
"grad_norm": 0.3227207660675049,
"learning_rate": 1.8337814009344716e-05,
"loss": 0.1433,
"step": 60
},
{
"epoch": 0.12896405919661733,
"grad_norm": 0.4387953579425812,
"learning_rate": 1.767835342197955e-05,
"loss": 0.1673,
"step": 61
},
{
"epoch": 0.13107822410147993,
"grad_norm": 0.5242079496383667,
"learning_rate": 1.7013498987264832e-05,
"loss": 0.1236,
"step": 62
},
{
"epoch": 0.1331923890063425,
"grad_norm": 0.4730517864227295,
"learning_rate": 1.6344589633551502e-05,
"loss": 0.148,
"step": 63
},
{
"epoch": 0.13530655391120508,
"grad_norm": 0.7624608278274536,
"learning_rate": 1.5672972455257726e-05,
"loss": 0.1519,
"step": 64
},
{
"epoch": 0.13742071881606766,
"grad_norm": 0.43941426277160645,
"learning_rate": 1.5e-05,
"loss": 0.1183,
"step": 65
},
{
"epoch": 0.13953488372093023,
"grad_norm": 0.6278932094573975,
"learning_rate": 1.4327027544742281e-05,
"loss": 0.1535,
"step": 66
},
{
"epoch": 0.1416490486257928,
"grad_norm": 0.5535889863967896,
"learning_rate": 1.36554103664485e-05,
"loss": 0.1329,
"step": 67
},
{
"epoch": 0.14376321353065538,
"grad_norm": 0.6688005328178406,
"learning_rate": 1.2986501012735174e-05,
"loss": 0.157,
"step": 68
},
{
"epoch": 0.14587737843551796,
"grad_norm": 0.40927010774612427,
"learning_rate": 1.2321646578020452e-05,
"loss": 0.1283,
"step": 69
},
{
"epoch": 0.14799154334038056,
"grad_norm": 0.5657337307929993,
"learning_rate": 1.1662185990655285e-05,
"loss": 0.1415,
"step": 70
},
{
"epoch": 0.15010570824524314,
"grad_norm": 0.909653902053833,
"learning_rate": 1.1009447316499875e-05,
"loss": 0.2031,
"step": 71
},
{
"epoch": 0.1522198731501057,
"grad_norm": 0.7087647318840027,
"learning_rate": 1.036474508437579e-05,
"loss": 0.1479,
"step": 72
},
{
"epoch": 0.1543340380549683,
"grad_norm": 0.8850060701370239,
"learning_rate": 9.729377638779859e-06,
"loss": 0.166,
"step": 73
},
{
"epoch": 0.15644820295983086,
"grad_norm": 0.7543150782585144,
"learning_rate": 9.104624525191147e-06,
"loss": 0.1362,
"step": 74
},
{
"epoch": 0.15856236786469344,
"grad_norm": 0.83201003074646,
"learning_rate": 8.491743913236629e-06,
"loss": 0.1328,
"step": 75
},
{
"epoch": 0.160676532769556,
"grad_norm": 2.301429510116577,
"learning_rate": 7.89197006290502e-06,
"loss": 0.1032,
"step": 76
},
{
"epoch": 0.16279069767441862,
"grad_norm": 0.9384440779685974,
"learning_rate": 7.30651083891141e-06,
"loss": 0.1566,
"step": 77
},
{
"epoch": 0.1649048625792812,
"grad_norm": 1.3696162700653076,
"learning_rate": 6.736545278218464e-06,
"loss": 0.1467,
"step": 78
},
{
"epoch": 0.16701902748414377,
"grad_norm": 3.9601619243621826,
"learning_rate": 6.1832212156129045e-06,
"loss": 0.3389,
"step": 79
},
{
"epoch": 0.16913319238900634,
"grad_norm": 1.9955356121063232,
"learning_rate": 5.647652972118998e-06,
"loss": 0.1527,
"step": 80
},
{
"epoch": 0.17124735729386892,
"grad_norm": 1.0902405977249146,
"learning_rate": 5.130919110904311e-06,
"loss": 0.1259,
"step": 81
},
{
"epoch": 0.1733615221987315,
"grad_norm": 1.0407441854476929,
"learning_rate": 4.6340602651970304e-06,
"loss": 0.1474,
"step": 82
},
{
"epoch": 0.17547568710359407,
"grad_norm": 2.1278271675109863,
"learning_rate": 4.158077042589129e-06,
"loss": 0.1673,
"step": 83
},
{
"epoch": 0.17758985200845667,
"grad_norm": 2.6037819385528564,
"learning_rate": 3.7039280099458373e-06,
"loss": 0.1636,
"step": 84
},
{
"epoch": 0.17970401691331925,
"grad_norm": 1.4455296993255615,
"learning_rate": 3.272527762979553e-06,
"loss": 0.1374,
"step": 85
},
{
"epoch": 0.18181818181818182,
"grad_norm": 0.9174126982688904,
"learning_rate": 2.86474508437579e-06,
"loss": 0.1216,
"step": 86
},
{
"epoch": 0.1839323467230444,
"grad_norm": 1.9632937908172607,
"learning_rate": 2.4814011941804603e-06,
"loss": 0.1475,
"step": 87
},
{
"epoch": 0.18604651162790697,
"grad_norm": 1.6227281093597412,
"learning_rate": 2.1232680959720085e-06,
"loss": 0.141,
"step": 88
},
{
"epoch": 0.18816067653276955,
"grad_norm": 1.872657060623169,
"learning_rate": 1.79106702214893e-06,
"loss": 0.1461,
"step": 89
},
{
"epoch": 0.19027484143763213,
"grad_norm": 1.4686189889907837,
"learning_rate": 1.4854669814637145e-06,
"loss": 0.1545,
"step": 90
},
{
"epoch": 0.19238900634249473,
"grad_norm": 1.4979490041732788,
"learning_rate": 1.2070834117282414e-06,
"loss": 0.1281,
"step": 91
},
{
"epoch": 0.1945031712473573,
"grad_norm": 1.4667952060699463,
"learning_rate": 9.56476940403942e-07,
"loss": 0.1528,
"step": 92
},
{
"epoch": 0.19661733615221988,
"grad_norm": 1.0837724208831787,
"learning_rate": 7.341522555726971e-07,
"loss": 0.144,
"step": 93
},
{
"epoch": 0.19873150105708245,
"grad_norm": 1.2933568954467773,
"learning_rate": 5.405570895622014e-07,
"loss": 0.1902,
"step": 94
},
{
"epoch": 0.20084566596194503,
"grad_norm": 3.0054140090942383,
"learning_rate": 3.760813172726457e-07,
"loss": 0.1994,
"step": 95
},
{
"epoch": 0.2029598308668076,
"grad_norm": 2.414536714553833,
"learning_rate": 2.41056171020555e-07,
"loss": 0.1413,
"step": 96
},
{
"epoch": 0.20507399577167018,
"grad_norm": 2.2516028881073,
"learning_rate": 1.357535734809795e-07,
"loss": 0.1668,
"step": 97
},
{
"epoch": 0.20718816067653276,
"grad_norm": 1.4251919984817505,
"learning_rate": 6.038559007141397e-08,
"loss": 0.1955,
"step": 98
},
{
"epoch": 0.20930232558139536,
"grad_norm": 1.7404764890670776,
"learning_rate": 1.510400188028116e-08,
"loss": 0.1756,
"step": 99
},
{
"epoch": 0.21141649048625794,
"grad_norm": 0.9121055603027344,
"learning_rate": 0.0,
"loss": 0.1873,
"step": 100
},
{
"epoch": 0.21141649048625794,
"eval_loss": 0.15427330136299133,
"eval_runtime": 16.1464,
"eval_samples_per_second": 12.325,
"eval_steps_per_second": 3.097,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.77128394686464e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}