abaddon182's picture
Training in progress, step 400, checkpoint
2989876 verified
{
"best_metric": 1.3278324604034424,
"best_model_checkpoint": "miner_id_24/checkpoint-400",
"epoch": 0.026940108770689163,
"eval_steps": 50,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 6.73502719267229e-05,
"grad_norm": 2.030458927154541,
"learning_rate": 1e-05,
"loss": 2.2072,
"step": 1
},
{
"epoch": 6.73502719267229e-05,
"eval_loss": 3.1524617671966553,
"eval_runtime": 1690.2881,
"eval_samples_per_second": 14.795,
"eval_steps_per_second": 3.699,
"step": 1
},
{
"epoch": 0.0001347005438534458,
"grad_norm": 2.239384412765503,
"learning_rate": 2e-05,
"loss": 2.3997,
"step": 2
},
{
"epoch": 0.0002020508157801687,
"grad_norm": 2.486010789871216,
"learning_rate": 3e-05,
"loss": 2.4882,
"step": 3
},
{
"epoch": 0.0002694010877068916,
"grad_norm": 2.396327495574951,
"learning_rate": 4e-05,
"loss": 2.4251,
"step": 4
},
{
"epoch": 0.00033675135963361454,
"grad_norm": 2.505659580230713,
"learning_rate": 5e-05,
"loss": 2.3752,
"step": 5
},
{
"epoch": 0.0004041016315603374,
"grad_norm": 2.2823729515075684,
"learning_rate": 6e-05,
"loss": 2.3087,
"step": 6
},
{
"epoch": 0.00047145190348706035,
"grad_norm": 2.2923173904418945,
"learning_rate": 7e-05,
"loss": 2.0596,
"step": 7
},
{
"epoch": 0.0005388021754137832,
"grad_norm": 2.598090410232544,
"learning_rate": 8e-05,
"loss": 1.9093,
"step": 8
},
{
"epoch": 0.0006061524473405061,
"grad_norm": 2.5075840950012207,
"learning_rate": 9e-05,
"loss": 1.5863,
"step": 9
},
{
"epoch": 0.0006735027192672291,
"grad_norm": 1.9895573854446411,
"learning_rate": 0.0001,
"loss": 1.7257,
"step": 10
},
{
"epoch": 0.000740852991193952,
"grad_norm": 2.011659622192383,
"learning_rate": 9.99983777858264e-05,
"loss": 1.6529,
"step": 11
},
{
"epoch": 0.0008082032631206748,
"grad_norm": 2.3391153812408447,
"learning_rate": 9.999351124856874e-05,
"loss": 1.5113,
"step": 12
},
{
"epoch": 0.0008755535350473977,
"grad_norm": 1.9589104652404785,
"learning_rate": 9.998540070400966e-05,
"loss": 1.5592,
"step": 13
},
{
"epoch": 0.0009429038069741207,
"grad_norm": 1.6105904579162598,
"learning_rate": 9.997404667843075e-05,
"loss": 1.6131,
"step": 14
},
{
"epoch": 0.0010102540789008435,
"grad_norm": 1.6169345378875732,
"learning_rate": 9.995944990857849e-05,
"loss": 1.645,
"step": 15
},
{
"epoch": 0.0010776043508275665,
"grad_norm": 1.6247849464416504,
"learning_rate": 9.994161134161634e-05,
"loss": 1.5378,
"step": 16
},
{
"epoch": 0.0011449546227542894,
"grad_norm": 1.5837732553482056,
"learning_rate": 9.992053213506334e-05,
"loss": 1.6621,
"step": 17
},
{
"epoch": 0.0012123048946810122,
"grad_norm": 1.5088438987731934,
"learning_rate": 9.989621365671902e-05,
"loss": 1.4937,
"step": 18
},
{
"epoch": 0.0012796551666077352,
"grad_norm": 1.3942575454711914,
"learning_rate": 9.986865748457457e-05,
"loss": 1.4834,
"step": 19
},
{
"epoch": 0.0013470054385344582,
"grad_norm": 1.6120097637176514,
"learning_rate": 9.983786540671051e-05,
"loss": 1.5793,
"step": 20
},
{
"epoch": 0.001414355710461181,
"grad_norm": 1.7541120052337646,
"learning_rate": 9.980383942118066e-05,
"loss": 1.4375,
"step": 21
},
{
"epoch": 0.001481705982387904,
"grad_norm": 1.7503563165664673,
"learning_rate": 9.976658173588244e-05,
"loss": 1.6468,
"step": 22
},
{
"epoch": 0.0015490562543146267,
"grad_norm": 1.4936574697494507,
"learning_rate": 9.972609476841367e-05,
"loss": 1.3518,
"step": 23
},
{
"epoch": 0.0016164065262413497,
"grad_norm": 1.5292739868164062,
"learning_rate": 9.968238114591566e-05,
"loss": 1.4441,
"step": 24
},
{
"epoch": 0.0016837567981680727,
"grad_norm": 1.5529022216796875,
"learning_rate": 9.96354437049027e-05,
"loss": 1.4522,
"step": 25
},
{
"epoch": 0.0017511070700947954,
"grad_norm": 1.448577880859375,
"learning_rate": 9.95852854910781e-05,
"loss": 1.3866,
"step": 26
},
{
"epoch": 0.0018184573420215184,
"grad_norm": 1.4345710277557373,
"learning_rate": 9.953190975913647e-05,
"loss": 1.4611,
"step": 27
},
{
"epoch": 0.0018858076139482414,
"grad_norm": 1.4968644380569458,
"learning_rate": 9.947531997255256e-05,
"loss": 1.485,
"step": 28
},
{
"epoch": 0.001953157885874964,
"grad_norm": 1.4573954343795776,
"learning_rate": 9.941551980335652e-05,
"loss": 1.7,
"step": 29
},
{
"epoch": 0.002020508157801687,
"grad_norm": 1.665392279624939,
"learning_rate": 9.935251313189564e-05,
"loss": 1.6596,
"step": 30
},
{
"epoch": 0.00208785842972841,
"grad_norm": 1.3012564182281494,
"learning_rate": 9.928630404658255e-05,
"loss": 1.2388,
"step": 31
},
{
"epoch": 0.002155208701655133,
"grad_norm": 1.714619755744934,
"learning_rate": 9.921689684362989e-05,
"loss": 1.5723,
"step": 32
},
{
"epoch": 0.0022225589735818557,
"grad_norm": 1.408392310142517,
"learning_rate": 9.914429602677162e-05,
"loss": 1.5278,
"step": 33
},
{
"epoch": 0.002289909245508579,
"grad_norm": 1.3923598527908325,
"learning_rate": 9.906850630697068e-05,
"loss": 1.4961,
"step": 34
},
{
"epoch": 0.0023572595174353016,
"grad_norm": 1.5535411834716797,
"learning_rate": 9.898953260211338e-05,
"loss": 1.5434,
"step": 35
},
{
"epoch": 0.0024246097893620244,
"grad_norm": 1.6934750080108643,
"learning_rate": 9.890738003669029e-05,
"loss": 1.5558,
"step": 36
},
{
"epoch": 0.0024919600612887476,
"grad_norm": 1.5046813488006592,
"learning_rate": 9.882205394146361e-05,
"loss": 1.3927,
"step": 37
},
{
"epoch": 0.0025593103332154704,
"grad_norm": 1.781728744506836,
"learning_rate": 9.87335598531214e-05,
"loss": 1.6191,
"step": 38
},
{
"epoch": 0.002626660605142193,
"grad_norm": 1.8981614112854004,
"learning_rate": 9.864190351391822e-05,
"loss": 1.5438,
"step": 39
},
{
"epoch": 0.0026940108770689163,
"grad_norm": 1.7245213985443115,
"learning_rate": 9.85470908713026e-05,
"loss": 1.7798,
"step": 40
},
{
"epoch": 0.002761361148995639,
"grad_norm": 1.7648022174835205,
"learning_rate": 9.844912807753104e-05,
"loss": 1.6861,
"step": 41
},
{
"epoch": 0.002828711420922362,
"grad_norm": 1.7356939315795898,
"learning_rate": 9.834802148926882e-05,
"loss": 1.736,
"step": 42
},
{
"epoch": 0.002896061692849085,
"grad_norm": 1.753619909286499,
"learning_rate": 9.824377766717759e-05,
"loss": 1.6496,
"step": 43
},
{
"epoch": 0.002963411964775808,
"grad_norm": 1.9407163858413696,
"learning_rate": 9.813640337548954e-05,
"loss": 1.7003,
"step": 44
},
{
"epoch": 0.0030307622367025306,
"grad_norm": 1.6801741123199463,
"learning_rate": 9.802590558156862e-05,
"loss": 1.668,
"step": 45
},
{
"epoch": 0.0030981125086292534,
"grad_norm": 1.9571483135223389,
"learning_rate": 9.791229145545831e-05,
"loss": 1.6165,
"step": 46
},
{
"epoch": 0.0031654627805559766,
"grad_norm": 2.2739717960357666,
"learning_rate": 9.779556836941645e-05,
"loss": 1.6873,
"step": 47
},
{
"epoch": 0.0032328130524826994,
"grad_norm": 2.0475990772247314,
"learning_rate": 9.767574389743682e-05,
"loss": 1.5453,
"step": 48
},
{
"epoch": 0.003300163324409422,
"grad_norm": 2.1875641345977783,
"learning_rate": 9.755282581475769e-05,
"loss": 1.5151,
"step": 49
},
{
"epoch": 0.0033675135963361453,
"grad_norm": 3.4982423782348633,
"learning_rate": 9.742682209735727e-05,
"loss": 1.5134,
"step": 50
},
{
"epoch": 0.0033675135963361453,
"eval_loss": 1.4206453561782837,
"eval_runtime": 1693.3046,
"eval_samples_per_second": 14.768,
"eval_steps_per_second": 3.692,
"step": 50
},
{
"epoch": 0.003434863868262868,
"grad_norm": 2.413867950439453,
"learning_rate": 9.729774092143627e-05,
"loss": 1.3517,
"step": 51
},
{
"epoch": 0.003502214140189591,
"grad_norm": 1.2003623247146606,
"learning_rate": 9.716559066288715e-05,
"loss": 1.109,
"step": 52
},
{
"epoch": 0.003569564412116314,
"grad_norm": 1.154285192489624,
"learning_rate": 9.703037989675087e-05,
"loss": 1.1729,
"step": 53
},
{
"epoch": 0.003636914684043037,
"grad_norm": 1.2117410898208618,
"learning_rate": 9.689211739666023e-05,
"loss": 1.2341,
"step": 54
},
{
"epoch": 0.0037042649559697596,
"grad_norm": 1.291092872619629,
"learning_rate": 9.675081213427076e-05,
"loss": 1.213,
"step": 55
},
{
"epoch": 0.003771615227896483,
"grad_norm": 1.0849498510360718,
"learning_rate": 9.66064732786784e-05,
"loss": 1.0593,
"step": 56
},
{
"epoch": 0.0038389654998232056,
"grad_norm": 1.084412693977356,
"learning_rate": 9.645911019582467e-05,
"loss": 1.1513,
"step": 57
},
{
"epoch": 0.003906315771749928,
"grad_norm": 1.2769420146942139,
"learning_rate": 9.630873244788883e-05,
"loss": 1.317,
"step": 58
},
{
"epoch": 0.0039736660436766515,
"grad_norm": 1.097766399383545,
"learning_rate": 9.615534979266745e-05,
"loss": 1.2195,
"step": 59
},
{
"epoch": 0.004041016315603374,
"grad_norm": 1.1258280277252197,
"learning_rate": 9.599897218294122e-05,
"loss": 1.2284,
"step": 60
},
{
"epoch": 0.004108366587530097,
"grad_norm": 1.2113773822784424,
"learning_rate": 9.583960976582913e-05,
"loss": 1.3336,
"step": 61
},
{
"epoch": 0.00417571685945682,
"grad_norm": 1.1779729127883911,
"learning_rate": 9.567727288213005e-05,
"loss": 1.2489,
"step": 62
},
{
"epoch": 0.004243067131383543,
"grad_norm": 1.2701348066329956,
"learning_rate": 9.551197206565173e-05,
"loss": 1.496,
"step": 63
},
{
"epoch": 0.004310417403310266,
"grad_norm": 1.2010540962219238,
"learning_rate": 9.534371804252728e-05,
"loss": 1.2049,
"step": 64
},
{
"epoch": 0.004377767675236989,
"grad_norm": 1.1773868799209595,
"learning_rate": 9.517252173051911e-05,
"loss": 1.2613,
"step": 65
},
{
"epoch": 0.004445117947163711,
"grad_norm": 1.1867501735687256,
"learning_rate": 9.49983942383106e-05,
"loss": 1.5148,
"step": 66
},
{
"epoch": 0.0045124682190904345,
"grad_norm": 1.1402329206466675,
"learning_rate": 9.482134686478519e-05,
"loss": 1.1267,
"step": 67
},
{
"epoch": 0.004579818491017158,
"grad_norm": 1.2517718076705933,
"learning_rate": 9.464139109829321e-05,
"loss": 1.4253,
"step": 68
},
{
"epoch": 0.00464716876294388,
"grad_norm": 1.2350069284439087,
"learning_rate": 9.445853861590647e-05,
"loss": 1.3885,
"step": 69
},
{
"epoch": 0.004714519034870603,
"grad_norm": 1.1498900651931763,
"learning_rate": 9.42728012826605e-05,
"loss": 1.2204,
"step": 70
},
{
"epoch": 0.0047818693067973265,
"grad_norm": 1.2308297157287598,
"learning_rate": 9.408419115078471e-05,
"loss": 1.519,
"step": 71
},
{
"epoch": 0.004849219578724049,
"grad_norm": 1.3052582740783691,
"learning_rate": 9.389272045892024e-05,
"loss": 1.3961,
"step": 72
},
{
"epoch": 0.004916569850650772,
"grad_norm": 1.3931699991226196,
"learning_rate": 9.36984016313259e-05,
"loss": 1.3947,
"step": 73
},
{
"epoch": 0.004983920122577495,
"grad_norm": 1.1981548070907593,
"learning_rate": 9.350124727707197e-05,
"loss": 1.1178,
"step": 74
},
{
"epoch": 0.0050512703945042176,
"grad_norm": 1.209280252456665,
"learning_rate": 9.330127018922194e-05,
"loss": 1.5509,
"step": 75
},
{
"epoch": 0.005118620666430941,
"grad_norm": 1.3320664167404175,
"learning_rate": 9.309848334400246e-05,
"loss": 1.3596,
"step": 76
},
{
"epoch": 0.005185970938357664,
"grad_norm": 1.3996878862380981,
"learning_rate": 9.289289989996133e-05,
"loss": 1.4188,
"step": 77
},
{
"epoch": 0.005253321210284386,
"grad_norm": 1.3204342126846313,
"learning_rate": 9.268453319711363e-05,
"loss": 1.3423,
"step": 78
},
{
"epoch": 0.0053206714822111095,
"grad_norm": 1.2412264347076416,
"learning_rate": 9.247339675607605e-05,
"loss": 1.4367,
"step": 79
},
{
"epoch": 0.005388021754137833,
"grad_norm": 1.2513152360916138,
"learning_rate": 9.225950427718975e-05,
"loss": 1.3262,
"step": 80
},
{
"epoch": 0.005455372026064555,
"grad_norm": 1.1856772899627686,
"learning_rate": 9.204286963963111e-05,
"loss": 1.3741,
"step": 81
},
{
"epoch": 0.005522722297991278,
"grad_norm": 1.1955853700637817,
"learning_rate": 9.182350690051133e-05,
"loss": 1.4515,
"step": 82
},
{
"epoch": 0.005590072569918001,
"grad_norm": 1.2296124696731567,
"learning_rate": 9.160143029396422e-05,
"loss": 1.3032,
"step": 83
},
{
"epoch": 0.005657422841844724,
"grad_norm": 1.264333724975586,
"learning_rate": 9.13766542302225e-05,
"loss": 1.2194,
"step": 84
},
{
"epoch": 0.005724773113771447,
"grad_norm": 1.3912543058395386,
"learning_rate": 9.114919329468282e-05,
"loss": 1.4172,
"step": 85
},
{
"epoch": 0.00579212338569817,
"grad_norm": 1.4577277898788452,
"learning_rate": 9.091906224695935e-05,
"loss": 1.4781,
"step": 86
},
{
"epoch": 0.0058594736576248925,
"grad_norm": 1.327652096748352,
"learning_rate": 9.068627601992598e-05,
"loss": 1.3372,
"step": 87
},
{
"epoch": 0.005926823929551616,
"grad_norm": 1.5228052139282227,
"learning_rate": 9.045084971874738e-05,
"loss": 1.5345,
"step": 88
},
{
"epoch": 0.005994174201478338,
"grad_norm": 1.5462514162063599,
"learning_rate": 9.021279861989885e-05,
"loss": 1.8167,
"step": 89
},
{
"epoch": 0.006061524473405061,
"grad_norm": 1.3531618118286133,
"learning_rate": 8.997213817017507e-05,
"loss": 1.3477,
"step": 90
},
{
"epoch": 0.0061288747453317844,
"grad_norm": 1.3700722455978394,
"learning_rate": 8.972888398568772e-05,
"loss": 1.3551,
"step": 91
},
{
"epoch": 0.006196225017258507,
"grad_norm": 1.5112624168395996,
"learning_rate": 8.948305185085225e-05,
"loss": 1.6612,
"step": 92
},
{
"epoch": 0.00626357528918523,
"grad_norm": 1.519962191581726,
"learning_rate": 8.92346577173636e-05,
"loss": 1.5554,
"step": 93
},
{
"epoch": 0.006330925561111953,
"grad_norm": 1.3995540142059326,
"learning_rate": 8.898371770316111e-05,
"loss": 1.2633,
"step": 94
},
{
"epoch": 0.0063982758330386755,
"grad_norm": 1.8468939065933228,
"learning_rate": 8.873024809138272e-05,
"loss": 1.7552,
"step": 95
},
{
"epoch": 0.006465626104965399,
"grad_norm": 1.6963553428649902,
"learning_rate": 8.847426532930831e-05,
"loss": 1.5469,
"step": 96
},
{
"epoch": 0.006532976376892122,
"grad_norm": 1.7640057802200317,
"learning_rate": 8.821578602729242e-05,
"loss": 1.5224,
"step": 97
},
{
"epoch": 0.006600326648818844,
"grad_norm": 2.2663934230804443,
"learning_rate": 8.795482695768658e-05,
"loss": 1.6114,
"step": 98
},
{
"epoch": 0.0066676769207455674,
"grad_norm": 1.793444275856018,
"learning_rate": 8.769140505375085e-05,
"loss": 1.603,
"step": 99
},
{
"epoch": 0.006735027192672291,
"grad_norm": 2.0338592529296875,
"learning_rate": 8.742553740855506e-05,
"loss": 1.4991,
"step": 100
},
{
"epoch": 0.006735027192672291,
"eval_loss": 1.3707906007766724,
"eval_runtime": 738.2821,
"eval_samples_per_second": 33.872,
"eval_steps_per_second": 8.468,
"step": 100
},
{
"epoch": 0.006802377464599013,
"grad_norm": 1.0350993871688843,
"learning_rate": 8.715724127386972e-05,
"loss": 1.0369,
"step": 101
},
{
"epoch": 0.006869727736525736,
"grad_norm": 1.2037113904953003,
"learning_rate": 8.688653405904652e-05,
"loss": 1.0181,
"step": 102
},
{
"epoch": 0.006937078008452459,
"grad_norm": 1.2135459184646606,
"learning_rate": 8.661343332988869e-05,
"loss": 1.2639,
"step": 103
},
{
"epoch": 0.007004428280379182,
"grad_norm": 1.301133155822754,
"learning_rate": 8.633795680751116e-05,
"loss": 0.9774,
"step": 104
},
{
"epoch": 0.007071778552305905,
"grad_norm": 1.127380132675171,
"learning_rate": 8.606012236719073e-05,
"loss": 1.1987,
"step": 105
},
{
"epoch": 0.007139128824232628,
"grad_norm": 1.1390761137008667,
"learning_rate": 8.577994803720606e-05,
"loss": 1.2403,
"step": 106
},
{
"epoch": 0.0072064790961593505,
"grad_norm": 1.0206562280654907,
"learning_rate": 8.549745199766792e-05,
"loss": 1.2218,
"step": 107
},
{
"epoch": 0.007273829368086074,
"grad_norm": 1.1791778802871704,
"learning_rate": 8.521265257933948e-05,
"loss": 1.195,
"step": 108
},
{
"epoch": 0.007341179640012797,
"grad_norm": 1.1491755247116089,
"learning_rate": 8.492556826244687e-05,
"loss": 1.2749,
"step": 109
},
{
"epoch": 0.007408529911939519,
"grad_norm": 1.105694055557251,
"learning_rate": 8.463621767547998e-05,
"loss": 1.3577,
"step": 110
},
{
"epoch": 0.007475880183866242,
"grad_norm": 1.261724591255188,
"learning_rate": 8.434461959398376e-05,
"loss": 1.3621,
"step": 111
},
{
"epoch": 0.007543230455792966,
"grad_norm": 1.0862749814987183,
"learning_rate": 8.405079293933986e-05,
"loss": 1.143,
"step": 112
},
{
"epoch": 0.007610580727719688,
"grad_norm": 0.9798551201820374,
"learning_rate": 8.375475677753881e-05,
"loss": 1.0633,
"step": 113
},
{
"epoch": 0.007677930999646411,
"grad_norm": 1.2961410284042358,
"learning_rate": 8.345653031794292e-05,
"loss": 1.3191,
"step": 114
},
{
"epoch": 0.007745281271573134,
"grad_norm": 1.138549566268921,
"learning_rate": 8.315613291203976e-05,
"loss": 1.3754,
"step": 115
},
{
"epoch": 0.007812631543499857,
"grad_norm": 1.0177663564682007,
"learning_rate": 8.285358405218655e-05,
"loss": 1.2438,
"step": 116
},
{
"epoch": 0.00787998181542658,
"grad_norm": 1.2038869857788086,
"learning_rate": 8.25489033703452e-05,
"loss": 1.195,
"step": 117
},
{
"epoch": 0.007947332087353303,
"grad_norm": 1.0598257780075073,
"learning_rate": 8.224211063680853e-05,
"loss": 1.2098,
"step": 118
},
{
"epoch": 0.008014682359280026,
"grad_norm": 1.2124276161193848,
"learning_rate": 8.19332257589174e-05,
"loss": 1.3533,
"step": 119
},
{
"epoch": 0.008082032631206748,
"grad_norm": 1.3013999462127686,
"learning_rate": 8.162226877976887e-05,
"loss": 1.3665,
"step": 120
},
{
"epoch": 0.008149382903133471,
"grad_norm": 2.011751651763916,
"learning_rate": 8.130925987691569e-05,
"loss": 1.3127,
"step": 121
},
{
"epoch": 0.008216733175060194,
"grad_norm": 1.0220164060592651,
"learning_rate": 8.099421936105702e-05,
"loss": 1.1425,
"step": 122
},
{
"epoch": 0.008284083446986917,
"grad_norm": 1.2821173667907715,
"learning_rate": 8.067716767472045e-05,
"loss": 1.4458,
"step": 123
},
{
"epoch": 0.00835143371891364,
"grad_norm": 1.3284802436828613,
"learning_rate": 8.035812539093557e-05,
"loss": 1.5041,
"step": 124
},
{
"epoch": 0.008418783990840364,
"grad_norm": 1.151969313621521,
"learning_rate": 8.003711321189895e-05,
"loss": 1.338,
"step": 125
},
{
"epoch": 0.008486134262767085,
"grad_norm": 1.336993932723999,
"learning_rate": 7.971415196763088e-05,
"loss": 1.2922,
"step": 126
},
{
"epoch": 0.008553484534693808,
"grad_norm": 1.0862728357315063,
"learning_rate": 7.938926261462366e-05,
"loss": 1.3108,
"step": 127
},
{
"epoch": 0.008620834806620532,
"grad_norm": 1.2815523147583008,
"learning_rate": 7.906246623448183e-05,
"loss": 1.3628,
"step": 128
},
{
"epoch": 0.008688185078547255,
"grad_norm": 1.260210394859314,
"learning_rate": 7.873378403255419e-05,
"loss": 1.3585,
"step": 129
},
{
"epoch": 0.008755535350473978,
"grad_norm": 1.4499257802963257,
"learning_rate": 7.840323733655778e-05,
"loss": 1.3755,
"step": 130
},
{
"epoch": 0.008822885622400701,
"grad_norm": 1.2586874961853027,
"learning_rate": 7.807084759519405e-05,
"loss": 1.3574,
"step": 131
},
{
"epoch": 0.008890235894327423,
"grad_norm": 1.2724554538726807,
"learning_rate": 7.773663637675694e-05,
"loss": 1.4911,
"step": 132
},
{
"epoch": 0.008957586166254146,
"grad_norm": 1.361464500427246,
"learning_rate": 7.740062536773352e-05,
"loss": 1.5767,
"step": 133
},
{
"epoch": 0.009024936438180869,
"grad_norm": 1.5993369817733765,
"learning_rate": 7.706283637139658e-05,
"loss": 1.4852,
"step": 134
},
{
"epoch": 0.009092286710107592,
"grad_norm": 1.4950506687164307,
"learning_rate": 7.672329130639005e-05,
"loss": 1.4919,
"step": 135
},
{
"epoch": 0.009159636982034315,
"grad_norm": 1.5922907590866089,
"learning_rate": 7.638201220530665e-05,
"loss": 1.6347,
"step": 136
},
{
"epoch": 0.009226987253961039,
"grad_norm": 1.2615551948547363,
"learning_rate": 7.603902121325813e-05,
"loss": 1.1959,
"step": 137
},
{
"epoch": 0.00929433752588776,
"grad_norm": 1.3236908912658691,
"learning_rate": 7.569434058643844e-05,
"loss": 1.4719,
"step": 138
},
{
"epoch": 0.009361687797814483,
"grad_norm": 1.2798570394515991,
"learning_rate": 7.534799269067953e-05,
"loss": 1.309,
"step": 139
},
{
"epoch": 0.009429038069741207,
"grad_norm": 1.588212013244629,
"learning_rate": 7.500000000000001e-05,
"loss": 1.5616,
"step": 140
},
{
"epoch": 0.00949638834166793,
"grad_norm": 1.4299564361572266,
"learning_rate": 7.465038509514688e-05,
"loss": 1.5298,
"step": 141
},
{
"epoch": 0.009563738613594653,
"grad_norm": 1.6370961666107178,
"learning_rate": 7.42991706621303e-05,
"loss": 1.6629,
"step": 142
},
{
"epoch": 0.009631088885521374,
"grad_norm": 1.483093023300171,
"learning_rate": 7.394637949075154e-05,
"loss": 1.4603,
"step": 143
},
{
"epoch": 0.009698439157448098,
"grad_norm": 1.846943736076355,
"learning_rate": 7.35920344731241e-05,
"loss": 1.6674,
"step": 144
},
{
"epoch": 0.00976578942937482,
"grad_norm": 1.6040513515472412,
"learning_rate": 7.323615860218843e-05,
"loss": 1.6899,
"step": 145
},
{
"epoch": 0.009833139701301544,
"grad_norm": 1.330278992652893,
"learning_rate": 7.287877497021978e-05,
"loss": 1.3632,
"step": 146
},
{
"epoch": 0.009900489973228267,
"grad_norm": 1.5452008247375488,
"learning_rate": 7.251990676732984e-05,
"loss": 1.6167,
"step": 147
},
{
"epoch": 0.00996784024515499,
"grad_norm": 1.6564722061157227,
"learning_rate": 7.215957727996207e-05,
"loss": 1.6265,
"step": 148
},
{
"epoch": 0.010035190517081712,
"grad_norm": 1.940214991569519,
"learning_rate": 7.179780988938051e-05,
"loss": 1.599,
"step": 149
},
{
"epoch": 0.010102540789008435,
"grad_norm": 2.222428321838379,
"learning_rate": 7.143462807015271e-05,
"loss": 1.6995,
"step": 150
},
{
"epoch": 0.010102540789008435,
"eval_loss": 1.3560807704925537,
"eval_runtime": 738.5582,
"eval_samples_per_second": 33.859,
"eval_steps_per_second": 8.465,
"step": 150
},
{
"epoch": 0.010169891060935158,
"grad_norm": 0.9370902180671692,
"learning_rate": 7.107005538862646e-05,
"loss": 1.1064,
"step": 151
},
{
"epoch": 0.010237241332861882,
"grad_norm": 0.9733120799064636,
"learning_rate": 7.07041155014006e-05,
"loss": 1.1346,
"step": 152
},
{
"epoch": 0.010304591604788605,
"grad_norm": 0.9286036491394043,
"learning_rate": 7.033683215379002e-05,
"loss": 1.0471,
"step": 153
},
{
"epoch": 0.010371941876715328,
"grad_norm": 1.061566948890686,
"learning_rate": 6.996822917828477e-05,
"loss": 1.1593,
"step": 154
},
{
"epoch": 0.01043929214864205,
"grad_norm": 0.988382875919342,
"learning_rate": 6.959833049300377e-05,
"loss": 1.2365,
"step": 155
},
{
"epoch": 0.010506642420568773,
"grad_norm": 0.9433530569076538,
"learning_rate": 6.922716010014255e-05,
"loss": 1.1843,
"step": 156
},
{
"epoch": 0.010573992692495496,
"grad_norm": 0.9518124461174011,
"learning_rate": 6.885474208441603e-05,
"loss": 1.1346,
"step": 157
},
{
"epoch": 0.010641342964422219,
"grad_norm": 1.0984117984771729,
"learning_rate": 6.848110061149556e-05,
"loss": 1.3145,
"step": 158
},
{
"epoch": 0.010708693236348942,
"grad_norm": 1.0021629333496094,
"learning_rate": 6.810625992644085e-05,
"loss": 1.1335,
"step": 159
},
{
"epoch": 0.010776043508275665,
"grad_norm": 1.0588817596435547,
"learning_rate": 6.773024435212678e-05,
"loss": 1.2448,
"step": 160
},
{
"epoch": 0.010843393780202387,
"grad_norm": 1.0959153175354004,
"learning_rate": 6.735307828766515e-05,
"loss": 1.148,
"step": 161
},
{
"epoch": 0.01091074405212911,
"grad_norm": 0.9979251623153687,
"learning_rate": 6.697478620682137e-05,
"loss": 1.0908,
"step": 162
},
{
"epoch": 0.010978094324055833,
"grad_norm": 1.0619982481002808,
"learning_rate": 6.659539265642643e-05,
"loss": 1.2502,
"step": 163
},
{
"epoch": 0.011045444595982556,
"grad_norm": 1.091411828994751,
"learning_rate": 6.621492225478414e-05,
"loss": 1.1467,
"step": 164
},
{
"epoch": 0.01111279486790928,
"grad_norm": 1.1791459321975708,
"learning_rate": 6.583339969007363e-05,
"loss": 1.3743,
"step": 165
},
{
"epoch": 0.011180145139836003,
"grad_norm": 1.069138765335083,
"learning_rate": 6.545084971874738e-05,
"loss": 1.2523,
"step": 166
},
{
"epoch": 0.011247495411762724,
"grad_norm": 1.167673945426941,
"learning_rate": 6.506729716392481e-05,
"loss": 1.4057,
"step": 167
},
{
"epoch": 0.011314845683689448,
"grad_norm": 1.1494766473770142,
"learning_rate": 6.468276691378155e-05,
"loss": 1.2518,
"step": 168
},
{
"epoch": 0.01138219595561617,
"grad_norm": 1.156091570854187,
"learning_rate": 6.429728391993446e-05,
"loss": 1.3678,
"step": 169
},
{
"epoch": 0.011449546227542894,
"grad_norm": 1.1573467254638672,
"learning_rate": 6.391087319582264e-05,
"loss": 1.4178,
"step": 170
},
{
"epoch": 0.011516896499469617,
"grad_norm": 1.1399964094161987,
"learning_rate": 6.35235598150842e-05,
"loss": 1.2234,
"step": 171
},
{
"epoch": 0.01158424677139634,
"grad_norm": 1.1700021028518677,
"learning_rate": 6.313536890992935e-05,
"loss": 1.3678,
"step": 172
},
{
"epoch": 0.011651597043323062,
"grad_norm": 1.1301724910736084,
"learning_rate": 6.274632566950967e-05,
"loss": 1.3965,
"step": 173
},
{
"epoch": 0.011718947315249785,
"grad_norm": 1.0706028938293457,
"learning_rate": 6.235645533828349e-05,
"loss": 1.3663,
"step": 174
},
{
"epoch": 0.011786297587176508,
"grad_norm": 1.0472501516342163,
"learning_rate": 6.19657832143779e-05,
"loss": 1.2252,
"step": 175
},
{
"epoch": 0.011853647859103231,
"grad_norm": 1.1286253929138184,
"learning_rate": 6.157433464794716e-05,
"loss": 1.3053,
"step": 176
},
{
"epoch": 0.011920998131029955,
"grad_norm": 1.138118863105774,
"learning_rate": 6.118213503952779e-05,
"loss": 1.34,
"step": 177
},
{
"epoch": 0.011988348402956676,
"grad_norm": 1.0691560506820679,
"learning_rate": 6.078920983839031e-05,
"loss": 1.3048,
"step": 178
},
{
"epoch": 0.0120556986748834,
"grad_norm": 1.226596713066101,
"learning_rate": 6.0395584540887963e-05,
"loss": 1.3146,
"step": 179
},
{
"epoch": 0.012123048946810122,
"grad_norm": 1.2226495742797852,
"learning_rate": 6.0001284688802226e-05,
"loss": 1.4645,
"step": 180
},
{
"epoch": 0.012190399218736846,
"grad_norm": 1.215620994567871,
"learning_rate": 5.960633586768543e-05,
"loss": 1.299,
"step": 181
},
{
"epoch": 0.012257749490663569,
"grad_norm": 1.2768441438674927,
"learning_rate": 5.921076370520058e-05,
"loss": 1.5455,
"step": 182
},
{
"epoch": 0.012325099762590292,
"grad_norm": 1.2758901119232178,
"learning_rate": 5.8814593869458455e-05,
"loss": 1.4963,
"step": 183
},
{
"epoch": 0.012392450034517014,
"grad_norm": 1.3229836225509644,
"learning_rate": 5.841785206735192e-05,
"loss": 1.435,
"step": 184
},
{
"epoch": 0.012459800306443737,
"grad_norm": 1.2354806661605835,
"learning_rate": 5.8020564042888015e-05,
"loss": 1.4156,
"step": 185
},
{
"epoch": 0.01252715057837046,
"grad_norm": 1.3381010293960571,
"learning_rate": 5.762275557551727e-05,
"loss": 1.4482,
"step": 186
},
{
"epoch": 0.012594500850297183,
"grad_norm": 1.2151193618774414,
"learning_rate": 5.7224452478461064e-05,
"loss": 1.3234,
"step": 187
},
{
"epoch": 0.012661851122223906,
"grad_norm": 1.646081566810608,
"learning_rate": 5.682568059703659e-05,
"loss": 1.4757,
"step": 188
},
{
"epoch": 0.01272920139415063,
"grad_norm": 1.3965771198272705,
"learning_rate": 5.642646580697973e-05,
"loss": 1.5457,
"step": 189
},
{
"epoch": 0.012796551666077351,
"grad_norm": 1.2820106744766235,
"learning_rate": 5.602683401276615e-05,
"loss": 1.4958,
"step": 190
},
{
"epoch": 0.012863901938004074,
"grad_norm": 1.421890139579773,
"learning_rate": 5.562681114593028e-05,
"loss": 1.5091,
"step": 191
},
{
"epoch": 0.012931252209930797,
"grad_norm": 1.5250858068466187,
"learning_rate": 5.522642316338268e-05,
"loss": 1.5578,
"step": 192
},
{
"epoch": 0.01299860248185752,
"grad_norm": 1.6469945907592773,
"learning_rate": 5.482569604572576e-05,
"loss": 1.5527,
"step": 193
},
{
"epoch": 0.013065952753784244,
"grad_norm": 1.660823106765747,
"learning_rate": 5.442465579556793e-05,
"loss": 1.4799,
"step": 194
},
{
"epoch": 0.013133303025710967,
"grad_norm": 1.4221848249435425,
"learning_rate": 5.402332843583631e-05,
"loss": 1.5711,
"step": 195
},
{
"epoch": 0.013200653297637688,
"grad_norm": 1.3503718376159668,
"learning_rate": 5.3621740008088126e-05,
"loss": 1.4474,
"step": 196
},
{
"epoch": 0.013268003569564412,
"grad_norm": 1.51528000831604,
"learning_rate": 5.321991657082097e-05,
"loss": 1.5336,
"step": 197
},
{
"epoch": 0.013335353841491135,
"grad_norm": 1.7951074838638306,
"learning_rate": 5.281788419778187e-05,
"loss": 1.2894,
"step": 198
},
{
"epoch": 0.013402704113417858,
"grad_norm": 1.6057062149047852,
"learning_rate": 5.2415668976275355e-05,
"loss": 1.7802,
"step": 199
},
{
"epoch": 0.013470054385344581,
"grad_norm": 1.7107088565826416,
"learning_rate": 5.201329700547076e-05,
"loss": 1.2964,
"step": 200
},
{
"epoch": 0.013470054385344581,
"eval_loss": 1.3432029485702515,
"eval_runtime": 738.1699,
"eval_samples_per_second": 33.877,
"eval_steps_per_second": 8.47,
"step": 200
},
{
"epoch": 0.013537404657271305,
"grad_norm": 1.1227874755859375,
"learning_rate": 5.161079439470866e-05,
"loss": 1.1946,
"step": 201
},
{
"epoch": 0.013604754929198026,
"grad_norm": 1.0087556838989258,
"learning_rate": 5.1208187261806615e-05,
"loss": 1.2882,
"step": 202
},
{
"epoch": 0.01367210520112475,
"grad_norm": 1.0933849811553955,
"learning_rate": 5.080550173136457e-05,
"loss": 1.3311,
"step": 203
},
{
"epoch": 0.013739455473051472,
"grad_norm": 0.9209799766540527,
"learning_rate": 5.0402763933069496e-05,
"loss": 1.1001,
"step": 204
},
{
"epoch": 0.013806805744978196,
"grad_norm": 0.9551730155944824,
"learning_rate": 5e-05,
"loss": 1.1042,
"step": 205
},
{
"epoch": 0.013874156016904919,
"grad_norm": 0.8248175382614136,
"learning_rate": 4.9597236066930516e-05,
"loss": 1.0215,
"step": 206
},
{
"epoch": 0.013941506288831642,
"grad_norm": 0.9202468395233154,
"learning_rate": 4.919449826863544e-05,
"loss": 1.0346,
"step": 207
},
{
"epoch": 0.014008856560758363,
"grad_norm": 0.9693543910980225,
"learning_rate": 4.87918127381934e-05,
"loss": 1.1723,
"step": 208
},
{
"epoch": 0.014076206832685087,
"grad_norm": 0.8910868763923645,
"learning_rate": 4.8389205605291365e-05,
"loss": 1.0814,
"step": 209
},
{
"epoch": 0.01414355710461181,
"grad_norm": 1.1232092380523682,
"learning_rate": 4.798670299452926e-05,
"loss": 1.311,
"step": 210
},
{
"epoch": 0.014210907376538533,
"grad_norm": 1.0476187467575073,
"learning_rate": 4.758433102372466e-05,
"loss": 1.3065,
"step": 211
},
{
"epoch": 0.014278257648465256,
"grad_norm": 0.9502547979354858,
"learning_rate": 4.7182115802218126e-05,
"loss": 1.0397,
"step": 212
},
{
"epoch": 0.01434560792039198,
"grad_norm": 1.0085866451263428,
"learning_rate": 4.678008342917903e-05,
"loss": 1.2223,
"step": 213
},
{
"epoch": 0.014412958192318701,
"grad_norm": 0.9929443001747131,
"learning_rate": 4.6378259991911886e-05,
"loss": 1.0958,
"step": 214
},
{
"epoch": 0.014480308464245424,
"grad_norm": 1.5844645500183105,
"learning_rate": 4.597667156416371e-05,
"loss": 1.262,
"step": 215
},
{
"epoch": 0.014547658736172147,
"grad_norm": 1.1886199712753296,
"learning_rate": 4.5575344204432084e-05,
"loss": 1.4208,
"step": 216
},
{
"epoch": 0.01461500900809887,
"grad_norm": 0.991338312625885,
"learning_rate": 4.5174303954274244e-05,
"loss": 1.2287,
"step": 217
},
{
"epoch": 0.014682359280025594,
"grad_norm": 1.0494619607925415,
"learning_rate": 4.477357683661734e-05,
"loss": 1.2124,
"step": 218
},
{
"epoch": 0.014749709551952315,
"grad_norm": 1.0562416315078735,
"learning_rate": 4.437318885406973e-05,
"loss": 1.2285,
"step": 219
},
{
"epoch": 0.014817059823879038,
"grad_norm": 1.1794100999832153,
"learning_rate": 4.397316598723385e-05,
"loss": 1.2839,
"step": 220
},
{
"epoch": 0.014884410095805762,
"grad_norm": 1.2214128971099854,
"learning_rate": 4.3573534193020274e-05,
"loss": 1.4051,
"step": 221
},
{
"epoch": 0.014951760367732485,
"grad_norm": 1.0522944927215576,
"learning_rate": 4.317431940296343e-05,
"loss": 1.135,
"step": 222
},
{
"epoch": 0.015019110639659208,
"grad_norm": 1.0426898002624512,
"learning_rate": 4.277554752153895e-05,
"loss": 1.2934,
"step": 223
},
{
"epoch": 0.015086460911585931,
"grad_norm": 1.1241427659988403,
"learning_rate": 4.237724442448273e-05,
"loss": 1.237,
"step": 224
},
{
"epoch": 0.015153811183512653,
"grad_norm": 1.202584147453308,
"learning_rate": 4.197943595711198e-05,
"loss": 1.1759,
"step": 225
},
{
"epoch": 0.015221161455439376,
"grad_norm": 1.0882707834243774,
"learning_rate": 4.1582147932648074e-05,
"loss": 1.3437,
"step": 226
},
{
"epoch": 0.015288511727366099,
"grad_norm": 1.1346945762634277,
"learning_rate": 4.118540613054156e-05,
"loss": 1.2853,
"step": 227
},
{
"epoch": 0.015355861999292822,
"grad_norm": 1.1027241945266724,
"learning_rate": 4.078923629479943e-05,
"loss": 1.1455,
"step": 228
},
{
"epoch": 0.015423212271219545,
"grad_norm": 1.2200098037719727,
"learning_rate": 4.039366413231458e-05,
"loss": 1.4002,
"step": 229
},
{
"epoch": 0.015490562543146269,
"grad_norm": 1.1374239921569824,
"learning_rate": 3.9998715311197785e-05,
"loss": 1.4173,
"step": 230
},
{
"epoch": 0.01555791281507299,
"grad_norm": 1.2057877779006958,
"learning_rate": 3.960441545911204e-05,
"loss": 1.5388,
"step": 231
},
{
"epoch": 0.015625263086999713,
"grad_norm": 1.1581593751907349,
"learning_rate": 3.92107901616097e-05,
"loss": 1.452,
"step": 232
},
{
"epoch": 0.01569261335892644,
"grad_norm": 1.4164345264434814,
"learning_rate": 3.8817864960472236e-05,
"loss": 1.5977,
"step": 233
},
{
"epoch": 0.01575996363085316,
"grad_norm": 1.3473681211471558,
"learning_rate": 3.842566535205286e-05,
"loss": 1.6104,
"step": 234
},
{
"epoch": 0.01582731390277988,
"grad_norm": 1.365702748298645,
"learning_rate": 3.803421678562213e-05,
"loss": 1.455,
"step": 235
},
{
"epoch": 0.015894664174706606,
"grad_norm": 1.410869836807251,
"learning_rate": 3.764354466171652e-05,
"loss": 1.5434,
"step": 236
},
{
"epoch": 0.015962014446633328,
"grad_norm": 1.152729868888855,
"learning_rate": 3.725367433049033e-05,
"loss": 1.3922,
"step": 237
},
{
"epoch": 0.016029364718560053,
"grad_norm": 1.2944568395614624,
"learning_rate": 3.6864631090070655e-05,
"loss": 1.328,
"step": 238
},
{
"epoch": 0.016096714990486774,
"grad_norm": 1.3709889650344849,
"learning_rate": 3.6476440184915815e-05,
"loss": 1.3385,
"step": 239
},
{
"epoch": 0.016164065262413495,
"grad_norm": 1.472702980041504,
"learning_rate": 3.608912680417737e-05,
"loss": 1.7066,
"step": 240
},
{
"epoch": 0.01623141553434022,
"grad_norm": 1.4194689989089966,
"learning_rate": 3.570271608006555e-05,
"loss": 1.5297,
"step": 241
},
{
"epoch": 0.016298765806266942,
"grad_norm": 1.4132965803146362,
"learning_rate": 3.531723308621847e-05,
"loss": 1.4771,
"step": 242
},
{
"epoch": 0.016366116078193667,
"grad_norm": 1.3434481620788574,
"learning_rate": 3.493270283607522e-05,
"loss": 1.3609,
"step": 243
},
{
"epoch": 0.01643346635012039,
"grad_norm": 1.3477119207382202,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.4376,
"step": 244
},
{
"epoch": 0.016500816622047113,
"grad_norm": 1.482155442237854,
"learning_rate": 3.4166600309926387e-05,
"loss": 1.5748,
"step": 245
},
{
"epoch": 0.016568166893973835,
"grad_norm": 1.5454543828964233,
"learning_rate": 3.3785077745215873e-05,
"loss": 1.3573,
"step": 246
},
{
"epoch": 0.016635517165900556,
"grad_norm": 1.6062289476394653,
"learning_rate": 3.340460734357359e-05,
"loss": 1.7007,
"step": 247
},
{
"epoch": 0.01670286743782728,
"grad_norm": 1.4669798612594604,
"learning_rate": 3.3025213793178646e-05,
"loss": 1.5439,
"step": 248
},
{
"epoch": 0.016770217709754003,
"grad_norm": 1.748022198677063,
"learning_rate": 3.264692171233485e-05,
"loss": 1.562,
"step": 249
},
{
"epoch": 0.016837567981680727,
"grad_norm": 2.035632848739624,
"learning_rate": 3.226975564787322e-05,
"loss": 1.6668,
"step": 250
},
{
"epoch": 0.016837567981680727,
"eval_loss": 1.3342212438583374,
"eval_runtime": 739.2168,
"eval_samples_per_second": 33.829,
"eval_steps_per_second": 8.458,
"step": 250
},
{
"epoch": 0.01690491825360745,
"grad_norm": 0.8713572025299072,
"learning_rate": 3.189374007355917e-05,
"loss": 0.9818,
"step": 251
},
{
"epoch": 0.01697226852553417,
"grad_norm": 0.9513303637504578,
"learning_rate": 3.151889938850445e-05,
"loss": 1.1104,
"step": 252
},
{
"epoch": 0.017039618797460895,
"grad_norm": 1.0168979167938232,
"learning_rate": 3.114525791558398e-05,
"loss": 1.1361,
"step": 253
},
{
"epoch": 0.017106969069387617,
"grad_norm": 1.0765156745910645,
"learning_rate": 3.0772839899857464e-05,
"loss": 1.2839,
"step": 254
},
{
"epoch": 0.017174319341314342,
"grad_norm": 0.9411038160324097,
"learning_rate": 3.0401669506996256e-05,
"loss": 1.1857,
"step": 255
},
{
"epoch": 0.017241669613241063,
"grad_norm": 1.0114924907684326,
"learning_rate": 3.003177082171523e-05,
"loss": 1.1497,
"step": 256
},
{
"epoch": 0.017309019885167785,
"grad_norm": 0.9451395869255066,
"learning_rate": 2.9663167846209998e-05,
"loss": 1.1326,
"step": 257
},
{
"epoch": 0.01737637015709451,
"grad_norm": 1.0250037908554077,
"learning_rate": 2.9295884498599414e-05,
"loss": 1.2372,
"step": 258
},
{
"epoch": 0.01744372042902123,
"grad_norm": 0.9830464124679565,
"learning_rate": 2.8929944611373554e-05,
"loss": 1.1286,
"step": 259
},
{
"epoch": 0.017511070700947956,
"grad_norm": 1.1806437969207764,
"learning_rate": 2.8565371929847284e-05,
"loss": 1.1044,
"step": 260
},
{
"epoch": 0.017578420972874678,
"grad_norm": 0.910880446434021,
"learning_rate": 2.8202190110619493e-05,
"loss": 1.0903,
"step": 261
},
{
"epoch": 0.017645771244801402,
"grad_norm": 1.0307596921920776,
"learning_rate": 2.784042272003794e-05,
"loss": 1.2891,
"step": 262
},
{
"epoch": 0.017713121516728124,
"grad_norm": 1.1910873651504517,
"learning_rate": 2.7480093232670158e-05,
"loss": 1.3077,
"step": 263
},
{
"epoch": 0.017780471788654845,
"grad_norm": 1.0718063116073608,
"learning_rate": 2.712122502978024e-05,
"loss": 1.3975,
"step": 264
},
{
"epoch": 0.01784782206058157,
"grad_norm": 0.9779868721961975,
"learning_rate": 2.6763841397811573e-05,
"loss": 1.3028,
"step": 265
},
{
"epoch": 0.017915172332508292,
"grad_norm": 1.050061821937561,
"learning_rate": 2.64079655268759e-05,
"loss": 1.2114,
"step": 266
},
{
"epoch": 0.017982522604435017,
"grad_norm": 1.144936203956604,
"learning_rate": 2.605362050924848e-05,
"loss": 1.4258,
"step": 267
},
{
"epoch": 0.018049872876361738,
"grad_norm": 1.0306668281555176,
"learning_rate": 2.57008293378697e-05,
"loss": 1.2216,
"step": 268
},
{
"epoch": 0.01811722314828846,
"grad_norm": 1.282397985458374,
"learning_rate": 2.534961490485313e-05,
"loss": 1.2653,
"step": 269
},
{
"epoch": 0.018184573420215185,
"grad_norm": 1.2951265573501587,
"learning_rate": 2.500000000000001e-05,
"loss": 1.5976,
"step": 270
},
{
"epoch": 0.018251923692141906,
"grad_norm": 1.2171090841293335,
"learning_rate": 2.4652007309320498e-05,
"loss": 1.39,
"step": 271
},
{
"epoch": 0.01831927396406863,
"grad_norm": 1.021344542503357,
"learning_rate": 2.430565941356157e-05,
"loss": 1.178,
"step": 272
},
{
"epoch": 0.018386624235995352,
"grad_norm": 1.0217483043670654,
"learning_rate": 2.3960978786741877e-05,
"loss": 1.2201,
"step": 273
},
{
"epoch": 0.018453974507922077,
"grad_norm": 1.1176892518997192,
"learning_rate": 2.361798779469336e-05,
"loss": 1.234,
"step": 274
},
{
"epoch": 0.0185213247798488,
"grad_norm": 1.0938963890075684,
"learning_rate": 2.3276708693609943e-05,
"loss": 1.359,
"step": 275
},
{
"epoch": 0.01858867505177552,
"grad_norm": 1.1674708127975464,
"learning_rate": 2.2937163628603435e-05,
"loss": 1.4127,
"step": 276
},
{
"epoch": 0.018656025323702245,
"grad_norm": 1.1793794631958008,
"learning_rate": 2.259937463226651e-05,
"loss": 1.2394,
"step": 277
},
{
"epoch": 0.018723375595628967,
"grad_norm": 1.1035133600234985,
"learning_rate": 2.2263363623243054e-05,
"loss": 1.2598,
"step": 278
},
{
"epoch": 0.01879072586755569,
"grad_norm": 1.2473664283752441,
"learning_rate": 2.192915240480596e-05,
"loss": 1.3994,
"step": 279
},
{
"epoch": 0.018858076139482413,
"grad_norm": 1.1195029020309448,
"learning_rate": 2.1596762663442218e-05,
"loss": 1.2052,
"step": 280
},
{
"epoch": 0.018925426411409135,
"grad_norm": 1.2758724689483643,
"learning_rate": 2.1266215967445824e-05,
"loss": 1.5162,
"step": 281
},
{
"epoch": 0.01899277668333586,
"grad_norm": 1.1609270572662354,
"learning_rate": 2.0937533765518187e-05,
"loss": 1.3269,
"step": 282
},
{
"epoch": 0.01906012695526258,
"grad_norm": 1.2329775094985962,
"learning_rate": 2.061073738537635e-05,
"loss": 1.4856,
"step": 283
},
{
"epoch": 0.019127477227189306,
"grad_norm": 1.1754076480865479,
"learning_rate": 2.0285848032369137e-05,
"loss": 1.3518,
"step": 284
},
{
"epoch": 0.019194827499116027,
"grad_norm": 1.2435510158538818,
"learning_rate": 1.996288678810105e-05,
"loss": 1.3986,
"step": 285
},
{
"epoch": 0.01926217777104275,
"grad_norm": 1.1661019325256348,
"learning_rate": 1.9641874609064443e-05,
"loss": 1.3242,
"step": 286
},
{
"epoch": 0.019329528042969474,
"grad_norm": 1.2058144807815552,
"learning_rate": 1.932283232527956e-05,
"loss": 1.3828,
"step": 287
},
{
"epoch": 0.019396878314896195,
"grad_norm": 1.216773271560669,
"learning_rate": 1.9005780638942982e-05,
"loss": 1.3686,
"step": 288
},
{
"epoch": 0.01946422858682292,
"grad_norm": 1.413074254989624,
"learning_rate": 1.8690740123084316e-05,
"loss": 1.4573,
"step": 289
},
{
"epoch": 0.01953157885874964,
"grad_norm": 1.2821346521377563,
"learning_rate": 1.837773122023114e-05,
"loss": 1.3789,
"step": 290
},
{
"epoch": 0.019598929130676367,
"grad_norm": 1.2425432205200195,
"learning_rate": 1.8066774241082612e-05,
"loss": 1.5083,
"step": 291
},
{
"epoch": 0.019666279402603088,
"grad_norm": 1.3399276733398438,
"learning_rate": 1.7757889363191483e-05,
"loss": 1.4766,
"step": 292
},
{
"epoch": 0.01973362967452981,
"grad_norm": 1.17010498046875,
"learning_rate": 1.745109662965481e-05,
"loss": 1.2784,
"step": 293
},
{
"epoch": 0.019800979946456534,
"grad_norm": 1.728461503982544,
"learning_rate": 1.714641594781347e-05,
"loss": 1.3018,
"step": 294
},
{
"epoch": 0.019868330218383256,
"grad_norm": 1.5740691423416138,
"learning_rate": 1.684386708796025e-05,
"loss": 1.4088,
"step": 295
},
{
"epoch": 0.01993568049030998,
"grad_norm": 1.5061848163604736,
"learning_rate": 1.6543469682057106e-05,
"loss": 1.6017,
"step": 296
},
{
"epoch": 0.020003030762236702,
"grad_norm": 1.895371437072754,
"learning_rate": 1.62452432224612e-05,
"loss": 1.5244,
"step": 297
},
{
"epoch": 0.020070381034163424,
"grad_norm": 1.5589200258255005,
"learning_rate": 1.5949207060660138e-05,
"loss": 1.742,
"step": 298
},
{
"epoch": 0.02013773130609015,
"grad_norm": 1.8027925491333008,
"learning_rate": 1.5655380406016235e-05,
"loss": 1.7559,
"step": 299
},
{
"epoch": 0.02020508157801687,
"grad_norm": 1.7761001586914062,
"learning_rate": 1.536378232452003e-05,
"loss": 1.5844,
"step": 300
},
{
"epoch": 0.02020508157801687,
"eval_loss": 1.3293969631195068,
"eval_runtime": 738.6384,
"eval_samples_per_second": 33.856,
"eval_steps_per_second": 8.464,
"step": 300
},
{
"epoch": 0.020272431849943595,
"grad_norm": 0.9483279585838318,
"learning_rate": 1.5074431737553157e-05,
"loss": 1.1289,
"step": 301
},
{
"epoch": 0.020339782121870317,
"grad_norm": 0.880562961101532,
"learning_rate": 1.4787347420660541e-05,
"loss": 1.0546,
"step": 302
},
{
"epoch": 0.02040713239379704,
"grad_norm": 0.9176260232925415,
"learning_rate": 1.4502548002332088e-05,
"loss": 1.0747,
"step": 303
},
{
"epoch": 0.020474482665723763,
"grad_norm": 0.941221296787262,
"learning_rate": 1.422005196279395e-05,
"loss": 0.9572,
"step": 304
},
{
"epoch": 0.020541832937650484,
"grad_norm": 0.9974879622459412,
"learning_rate": 1.3939877632809278e-05,
"loss": 1.1104,
"step": 305
},
{
"epoch": 0.02060918320957721,
"grad_norm": 1.0085439682006836,
"learning_rate": 1.3662043192488849e-05,
"loss": 1.1802,
"step": 306
},
{
"epoch": 0.02067653348150393,
"grad_norm": 1.0595355033874512,
"learning_rate": 1.338656667011134e-05,
"loss": 1.1322,
"step": 307
},
{
"epoch": 0.020743883753430656,
"grad_norm": 1.025657057762146,
"learning_rate": 1.3113465940953495e-05,
"loss": 1.1411,
"step": 308
},
{
"epoch": 0.020811234025357377,
"grad_norm": 1.0750242471694946,
"learning_rate": 1.2842758726130283e-05,
"loss": 1.3605,
"step": 309
},
{
"epoch": 0.0208785842972841,
"grad_norm": 1.1114938259124756,
"learning_rate": 1.257446259144494e-05,
"loss": 1.2302,
"step": 310
},
{
"epoch": 0.020945934569210824,
"grad_norm": 0.9650045037269592,
"learning_rate": 1.2308594946249163e-05,
"loss": 1.1415,
"step": 311
},
{
"epoch": 0.021013284841137545,
"grad_norm": 0.9643846154212952,
"learning_rate": 1.204517304231343e-05,
"loss": 1.1501,
"step": 312
},
{
"epoch": 0.02108063511306427,
"grad_norm": 0.9403222799301147,
"learning_rate": 1.178421397270758e-05,
"loss": 1.1286,
"step": 313
},
{
"epoch": 0.02114798538499099,
"grad_norm": 1.1337164640426636,
"learning_rate": 1.1525734670691701e-05,
"loss": 1.2084,
"step": 314
},
{
"epoch": 0.021215335656917717,
"grad_norm": 0.9642013311386108,
"learning_rate": 1.1269751908617277e-05,
"loss": 1.1753,
"step": 315
},
{
"epoch": 0.021282685928844438,
"grad_norm": 1.0559114217758179,
"learning_rate": 1.1016282296838887e-05,
"loss": 1.2275,
"step": 316
},
{
"epoch": 0.02135003620077116,
"grad_norm": 1.1197682619094849,
"learning_rate": 1.0765342282636416e-05,
"loss": 1.1159,
"step": 317
},
{
"epoch": 0.021417386472697884,
"grad_norm": 1.038716435432434,
"learning_rate": 1.0516948149147754e-05,
"loss": 1.4075,
"step": 318
},
{
"epoch": 0.021484736744624606,
"grad_norm": 0.96999591588974,
"learning_rate": 1.0271116014312293e-05,
"loss": 1.1907,
"step": 319
},
{
"epoch": 0.02155208701655133,
"grad_norm": 1.1059964895248413,
"learning_rate": 1.0027861829824952e-05,
"loss": 1.2472,
"step": 320
},
{
"epoch": 0.021619437288478052,
"grad_norm": 1.0419682264328003,
"learning_rate": 9.787201380101157e-06,
"loss": 1.2452,
"step": 321
},
{
"epoch": 0.021686787560404774,
"grad_norm": 1.1106568574905396,
"learning_rate": 9.549150281252633e-06,
"loss": 1.1885,
"step": 322
},
{
"epoch": 0.0217541378323315,
"grad_norm": 1.0976035594940186,
"learning_rate": 9.313723980074018e-06,
"loss": 1.3643,
"step": 323
},
{
"epoch": 0.02182148810425822,
"grad_norm": 1.1701173782348633,
"learning_rate": 9.080937753040646e-06,
"loss": 1.3457,
"step": 324
},
{
"epoch": 0.021888838376184945,
"grad_norm": 1.2230879068374634,
"learning_rate": 8.850806705317183e-06,
"loss": 1.5155,
"step": 325
},
{
"epoch": 0.021956188648111667,
"grad_norm": 1.1395500898361206,
"learning_rate": 8.623345769777514e-06,
"loss": 1.3319,
"step": 326
},
{
"epoch": 0.022023538920038388,
"grad_norm": 1.1425197124481201,
"learning_rate": 8.398569706035792e-06,
"loss": 1.3877,
"step": 327
},
{
"epoch": 0.022090889191965113,
"grad_norm": 1.0961964130401611,
"learning_rate": 8.176493099488663e-06,
"loss": 1.2585,
"step": 328
},
{
"epoch": 0.022158239463891834,
"grad_norm": 1.2439264059066772,
"learning_rate": 7.957130360368898e-06,
"loss": 1.491,
"step": 329
},
{
"epoch": 0.02222558973581856,
"grad_norm": 1.177886962890625,
"learning_rate": 7.740495722810271e-06,
"loss": 1.415,
"step": 330
},
{
"epoch": 0.02229294000774528,
"grad_norm": 1.4000835418701172,
"learning_rate": 7.526603243923957e-06,
"loss": 1.4878,
"step": 331
},
{
"epoch": 0.022360290279672006,
"grad_norm": 1.2325903177261353,
"learning_rate": 7.315466802886401e-06,
"loss": 1.2558,
"step": 332
},
{
"epoch": 0.022427640551598727,
"grad_norm": 1.2314106225967407,
"learning_rate": 7.107100100038671e-06,
"loss": 1.3935,
"step": 333
},
{
"epoch": 0.02249499082352545,
"grad_norm": 1.2869185209274292,
"learning_rate": 6.901516655997536e-06,
"loss": 1.51,
"step": 334
},
{
"epoch": 0.022562341095452174,
"grad_norm": 1.3866029977798462,
"learning_rate": 6.698729810778065e-06,
"loss": 1.4728,
"step": 335
},
{
"epoch": 0.022629691367378895,
"grad_norm": 1.3128485679626465,
"learning_rate": 6.498752722928042e-06,
"loss": 1.3169,
"step": 336
},
{
"epoch": 0.02269704163930562,
"grad_norm": 1.304387092590332,
"learning_rate": 6.301598368674105e-06,
"loss": 1.4541,
"step": 337
},
{
"epoch": 0.02276439191123234,
"grad_norm": 1.4286638498306274,
"learning_rate": 6.107279541079769e-06,
"loss": 1.4538,
"step": 338
},
{
"epoch": 0.022831742183159063,
"grad_norm": 1.4982728958129883,
"learning_rate": 5.915808849215304e-06,
"loss": 1.5706,
"step": 339
},
{
"epoch": 0.022899092455085788,
"grad_norm": 1.199910283088684,
"learning_rate": 5.727198717339511e-06,
"loss": 1.4591,
"step": 340
},
{
"epoch": 0.02296644272701251,
"grad_norm": 1.2605010271072388,
"learning_rate": 5.54146138409355e-06,
"loss": 1.3878,
"step": 341
},
{
"epoch": 0.023033792998939234,
"grad_norm": 1.4372059106826782,
"learning_rate": 5.358608901706802e-06,
"loss": 1.5722,
"step": 342
},
{
"epoch": 0.023101143270865956,
"grad_norm": 2.0687360763549805,
"learning_rate": 5.178653135214812e-06,
"loss": 1.3645,
"step": 343
},
{
"epoch": 0.02316849354279268,
"grad_norm": 1.444715976715088,
"learning_rate": 5.001605761689398e-06,
"loss": 1.3622,
"step": 344
},
{
"epoch": 0.023235843814719402,
"grad_norm": 1.7077946662902832,
"learning_rate": 4.827478269480895e-06,
"loss": 1.5487,
"step": 345
},
{
"epoch": 0.023303194086646124,
"grad_norm": 1.355229377746582,
"learning_rate": 4.65628195747273e-06,
"loss": 1.5458,
"step": 346
},
{
"epoch": 0.02337054435857285,
"grad_norm": 1.549574851989746,
"learning_rate": 4.488027934348271e-06,
"loss": 1.3594,
"step": 347
},
{
"epoch": 0.02343789463049957,
"grad_norm": 1.6200557947158813,
"learning_rate": 4.322727117869951e-06,
"loss": 1.6635,
"step": 348
},
{
"epoch": 0.023505244902426295,
"grad_norm": 1.8985865116119385,
"learning_rate": 4.16039023417088e-06,
"loss": 1.4596,
"step": 349
},
{
"epoch": 0.023572595174353016,
"grad_norm": 1.9179009199142456,
"learning_rate": 4.001027817058789e-06,
"loss": 1.5636,
"step": 350
},
{
"epoch": 0.023572595174353016,
"eval_loss": 1.3278803825378418,
"eval_runtime": 738.5748,
"eval_samples_per_second": 33.858,
"eval_steps_per_second": 8.465,
"step": 350
},
{
"epoch": 0.023639945446279738,
"grad_norm": 1.4624953269958496,
"learning_rate": 3.844650207332562e-06,
"loss": 1.0904,
"step": 351
},
{
"epoch": 0.023707295718206463,
"grad_norm": 1.0814251899719238,
"learning_rate": 3.691267552111183e-06,
"loss": 1.0723,
"step": 352
},
{
"epoch": 0.023774645990133184,
"grad_norm": 0.9951095581054688,
"learning_rate": 3.54088980417534e-06,
"loss": 1.1652,
"step": 353
},
{
"epoch": 0.02384199626205991,
"grad_norm": 0.9580594897270203,
"learning_rate": 3.393526721321616e-06,
"loss": 1.1686,
"step": 354
},
{
"epoch": 0.02390934653398663,
"grad_norm": 1.0866096019744873,
"learning_rate": 3.249187865729264e-06,
"loss": 1.1205,
"step": 355
},
{
"epoch": 0.023976696805913352,
"grad_norm": 0.9641712307929993,
"learning_rate": 3.1078826033397843e-06,
"loss": 1.1141,
"step": 356
},
{
"epoch": 0.024044047077840077,
"grad_norm": 0.8843631744384766,
"learning_rate": 2.9696201032491434e-06,
"loss": 1.117,
"step": 357
},
{
"epoch": 0.0241113973497668,
"grad_norm": 0.8816883563995361,
"learning_rate": 2.8344093371128424e-06,
"loss": 1.0141,
"step": 358
},
{
"epoch": 0.024178747621693523,
"grad_norm": 0.9398783445358276,
"learning_rate": 2.70225907856374e-06,
"loss": 1.1264,
"step": 359
},
{
"epoch": 0.024246097893620245,
"grad_norm": 1.1492022275924683,
"learning_rate": 2.573177902642726e-06,
"loss": 1.3692,
"step": 360
},
{
"epoch": 0.02431344816554697,
"grad_norm": 0.9554076790809631,
"learning_rate": 2.4471741852423237e-06,
"loss": 1.0897,
"step": 361
},
{
"epoch": 0.02438079843747369,
"grad_norm": 1.031767725944519,
"learning_rate": 2.324256102563188e-06,
"loss": 1.2614,
"step": 362
},
{
"epoch": 0.024448148709400413,
"grad_norm": 1.0227885246276855,
"learning_rate": 2.204431630583548e-06,
"loss": 1.1484,
"step": 363
},
{
"epoch": 0.024515498981327138,
"grad_norm": 0.943084716796875,
"learning_rate": 2.087708544541689e-06,
"loss": 1.1367,
"step": 364
},
{
"epoch": 0.02458284925325386,
"grad_norm": 0.9876199960708618,
"learning_rate": 1.974094418431388e-06,
"loss": 1.3024,
"step": 365
},
{
"epoch": 0.024650199525180584,
"grad_norm": 1.0107133388519287,
"learning_rate": 1.8635966245104664e-06,
"loss": 1.152,
"step": 366
},
{
"epoch": 0.024717549797107306,
"grad_norm": 0.987256646156311,
"learning_rate": 1.7562223328224325e-06,
"loss": 1.2102,
"step": 367
},
{
"epoch": 0.024784900069034027,
"grad_norm": 1.1262116432189941,
"learning_rate": 1.6519785107311891e-06,
"loss": 1.2629,
"step": 368
},
{
"epoch": 0.024852250340960752,
"grad_norm": 1.0786629915237427,
"learning_rate": 1.5508719224689717e-06,
"loss": 1.3168,
"step": 369
},
{
"epoch": 0.024919600612887473,
"grad_norm": 1.2885220050811768,
"learning_rate": 1.4529091286973995e-06,
"loss": 1.3615,
"step": 370
},
{
"epoch": 0.0249869508848142,
"grad_norm": 1.0082440376281738,
"learning_rate": 1.358096486081778e-06,
"loss": 1.2357,
"step": 371
},
{
"epoch": 0.02505430115674092,
"grad_norm": 1.1223047971725464,
"learning_rate": 1.2664401468786114e-06,
"loss": 1.3303,
"step": 372
},
{
"epoch": 0.025121651428667645,
"grad_norm": 0.9703611731529236,
"learning_rate": 1.1779460585363944e-06,
"loss": 0.9298,
"step": 373
},
{
"epoch": 0.025189001700594366,
"grad_norm": 1.0855671167373657,
"learning_rate": 1.0926199633097157e-06,
"loss": 1.1574,
"step": 374
},
{
"epoch": 0.025256351972521088,
"grad_norm": 1.0677353143692017,
"learning_rate": 1.0104673978866164e-06,
"loss": 1.3944,
"step": 375
},
{
"epoch": 0.025323702244447813,
"grad_norm": 1.1818300485610962,
"learning_rate": 9.314936930293283e-07,
"loss": 1.4814,
"step": 376
},
{
"epoch": 0.025391052516374534,
"grad_norm": 1.0969845056533813,
"learning_rate": 8.557039732283944e-07,
"loss": 1.3965,
"step": 377
},
{
"epoch": 0.02545840278830126,
"grad_norm": 1.1741873025894165,
"learning_rate": 7.83103156370113e-07,
"loss": 1.4252,
"step": 378
},
{
"epoch": 0.02552575306022798,
"grad_norm": 1.0685746669769287,
"learning_rate": 7.136959534174592e-07,
"loss": 1.2656,
"step": 379
},
{
"epoch": 0.025593103332154702,
"grad_norm": 1.052201747894287,
"learning_rate": 6.474868681043578e-07,
"loss": 1.2646,
"step": 380
},
{
"epoch": 0.025660453604081427,
"grad_norm": 1.2119255065917969,
"learning_rate": 5.844801966434832e-07,
"loss": 1.4586,
"step": 381
},
{
"epoch": 0.02572780387600815,
"grad_norm": 1.235493540763855,
"learning_rate": 5.246800274474439e-07,
"loss": 1.3549,
"step": 382
},
{
"epoch": 0.025795154147934873,
"grad_norm": 1.270772099494934,
"learning_rate": 4.680902408635335e-07,
"loss": 1.389,
"step": 383
},
{
"epoch": 0.025862504419861595,
"grad_norm": 1.2856876850128174,
"learning_rate": 4.1471450892189846e-07,
"loss": 1.4744,
"step": 384
},
{
"epoch": 0.02592985469178832,
"grad_norm": 1.3383358716964722,
"learning_rate": 3.6455629509730136e-07,
"loss": 1.5902,
"step": 385
},
{
"epoch": 0.02599720496371504,
"grad_norm": 1.2960913181304932,
"learning_rate": 3.1761885408435054e-07,
"loss": 1.4461,
"step": 386
},
{
"epoch": 0.026064555235641763,
"grad_norm": 1.5527251958847046,
"learning_rate": 2.7390523158633554e-07,
"loss": 1.5167,
"step": 387
},
{
"epoch": 0.026131905507568488,
"grad_norm": 1.2937921285629272,
"learning_rate": 2.334182641175686e-07,
"loss": 1.2337,
"step": 388
},
{
"epoch": 0.02619925577949521,
"grad_norm": 1.2841928005218506,
"learning_rate": 1.9616057881935436e-07,
"loss": 1.2654,
"step": 389
},
{
"epoch": 0.026266606051421934,
"grad_norm": 1.3135737180709839,
"learning_rate": 1.6213459328950352e-07,
"loss": 1.433,
"step": 390
},
{
"epoch": 0.026333956323348656,
"grad_norm": 1.3532600402832031,
"learning_rate": 1.3134251542544774e-07,
"loss": 1.3902,
"step": 391
},
{
"epoch": 0.026401306595275377,
"grad_norm": 1.3329143524169922,
"learning_rate": 1.0378634328099269e-07,
"loss": 1.4056,
"step": 392
},
{
"epoch": 0.026468656867202102,
"grad_norm": 1.5213643312454224,
"learning_rate": 7.946786493666647e-08,
"loss": 1.5343,
"step": 393
},
{
"epoch": 0.026536007139128823,
"grad_norm": 1.5116889476776123,
"learning_rate": 5.838865838366792e-08,
"loss": 1.6032,
"step": 394
},
{
"epoch": 0.02660335741105555,
"grad_norm": 1.2199372053146362,
"learning_rate": 4.055009142152067e-08,
"loss": 1.3873,
"step": 395
},
{
"epoch": 0.02667070768298227,
"grad_norm": 1.539748191833496,
"learning_rate": 2.595332156925534e-08,
"loss": 1.4327,
"step": 396
},
{
"epoch": 0.02673805795490899,
"grad_norm": 1.466637134552002,
"learning_rate": 1.4599295990352924e-08,
"loss": 1.5486,
"step": 397
},
{
"epoch": 0.026805408226835716,
"grad_norm": 1.5383957624435425,
"learning_rate": 6.488751431266149e-09,
"loss": 1.2895,
"step": 398
},
{
"epoch": 0.026872758498762438,
"grad_norm": 1.4582974910736084,
"learning_rate": 1.622214173602199e-09,
"loss": 1.3618,
"step": 399
},
{
"epoch": 0.026940108770689163,
"grad_norm": 1.8420403003692627,
"learning_rate": 0.0,
"loss": 1.5286,
"step": 400
},
{
"epoch": 0.026940108770689163,
"eval_loss": 1.3278324604034424,
"eval_runtime": 738.8324,
"eval_samples_per_second": 33.847,
"eval_steps_per_second": 8.462,
"step": 400
}
],
"logging_steps": 1,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.0933414555287552e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}