{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.013095290060674844, "eval_steps": 25, "global_step": 75, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.00017460386747566458, "grad_norm": 0.7365045547485352, "learning_rate": 3.3333333333333335e-05, "loss": 1.8112, "step": 1 }, { "epoch": 0.00017460386747566458, "eval_loss": 1.5676409006118774, "eval_runtime": 538.1585, "eval_samples_per_second": 8.962, "eval_steps_per_second": 4.482, "step": 1 }, { "epoch": 0.00034920773495132916, "grad_norm": 1.230597972869873, "learning_rate": 6.666666666666667e-05, "loss": 2.0408, "step": 2 }, { "epoch": 0.0005238116024269937, "grad_norm": 1.01038658618927, "learning_rate": 0.0001, "loss": 1.5271, "step": 3 }, { "epoch": 0.0006984154699026583, "grad_norm": 1.0910125970840454, "learning_rate": 9.99524110790929e-05, "loss": 1.5123, "step": 4 }, { "epoch": 0.0008730193373783229, "grad_norm": 1.1756590604782104, "learning_rate": 9.980973490458728e-05, "loss": 1.3788, "step": 5 }, { "epoch": 0.0010476232048539874, "grad_norm": 1.2190275192260742, "learning_rate": 9.957224306869053e-05, "loss": 1.6374, "step": 6 }, { "epoch": 0.001222227072329652, "grad_norm": 1.3136614561080933, "learning_rate": 9.924038765061042e-05, "loss": 1.4122, "step": 7 }, { "epoch": 0.0013968309398053166, "grad_norm": 1.3199424743652344, "learning_rate": 9.881480035599667e-05, "loss": 1.4148, "step": 8 }, { "epoch": 0.0015714348072809812, "grad_norm": 1.2985801696777344, "learning_rate": 9.829629131445342e-05, "loss": 1.3932, "step": 9 }, { "epoch": 0.0017460386747566458, "grad_norm": 1.776046872138977, "learning_rate": 9.768584753741134e-05, "loss": 1.2455, "step": 10 }, { "epoch": 0.0019206425422323104, "grad_norm": 1.5616651773452759, "learning_rate": 9.698463103929542e-05, "loss": 1.396, "step": 11 }, { "epoch": 0.002095246409707975, "grad_norm": 1.5658314228057861, "learning_rate": 9.619397662556435e-05, "loss": 1.2894, "step": 12 }, { "epoch": 0.0022698502771836397, "grad_norm": 1.3641587495803833, "learning_rate": 9.53153893518325e-05, "loss": 1.1194, "step": 13 }, { "epoch": 0.002444454144659304, "grad_norm": 1.3050167560577393, "learning_rate": 9.435054165891109e-05, "loss": 1.3872, "step": 14 }, { "epoch": 0.002619058012134969, "grad_norm": 1.1339519023895264, "learning_rate": 9.330127018922194e-05, "loss": 1.5126, "step": 15 }, { "epoch": 0.0027936618796106333, "grad_norm": 1.3289313316345215, "learning_rate": 9.21695722906443e-05, "loss": 1.4058, "step": 16 }, { "epoch": 0.002968265747086298, "grad_norm": 1.438923954963684, "learning_rate": 9.09576022144496e-05, "loss": 1.2404, "step": 17 }, { "epoch": 0.0031428696145619625, "grad_norm": 1.2848269939422607, "learning_rate": 8.966766701456177e-05, "loss": 1.2624, "step": 18 }, { "epoch": 0.0033174734820376273, "grad_norm": 1.341930866241455, "learning_rate": 8.83022221559489e-05, "loss": 1.3761, "step": 19 }, { "epoch": 0.0034920773495132917, "grad_norm": 1.1548206806182861, "learning_rate": 8.68638668405062e-05, "loss": 1.5395, "step": 20 }, { "epoch": 0.0036666812169889565, "grad_norm": 1.1897963285446167, "learning_rate": 8.535533905932738e-05, "loss": 1.4114, "step": 21 }, { "epoch": 0.003841285084464621, "grad_norm": 1.1397008895874023, "learning_rate": 8.377951038078302e-05, "loss": 1.6104, "step": 22 }, { "epoch": 0.004015888951940286, "grad_norm": 1.6328984498977661, "learning_rate": 8.213938048432697e-05, "loss": 1.5125, "step": 23 }, { "epoch": 0.00419049281941595, "grad_norm": 1.214820384979248, "learning_rate": 8.043807145043604e-05, "loss": 1.222, "step": 24 }, { "epoch": 0.0043650966868916145, "grad_norm": 1.469793677330017, "learning_rate": 7.86788218175523e-05, "loss": 1.1239, "step": 25 }, { "epoch": 0.0043650966868916145, "eval_loss": 1.2938027381896973, "eval_runtime": 539.1895, "eval_samples_per_second": 8.945, "eval_steps_per_second": 4.473, "step": 25 }, { "epoch": 0.004539700554367279, "grad_norm": 1.3989931344985962, "learning_rate": 7.68649804173412e-05, "loss": 1.0884, "step": 26 }, { "epoch": 0.004714304421842944, "grad_norm": 1.6388263702392578, "learning_rate": 7.500000000000001e-05, "loss": 1.1209, "step": 27 }, { "epoch": 0.004888908289318608, "grad_norm": 1.3612502813339233, "learning_rate": 7.308743066175172e-05, "loss": 1.4583, "step": 28 }, { "epoch": 0.005063512156794273, "grad_norm": 1.323246955871582, "learning_rate": 7.113091308703498e-05, "loss": 1.3841, "step": 29 }, { "epoch": 0.005238116024269938, "grad_norm": 1.2201770544052124, "learning_rate": 6.91341716182545e-05, "loss": 1.3537, "step": 30 }, { "epoch": 0.005412719891745603, "grad_norm": 1.169925570487976, "learning_rate": 6.710100716628344e-05, "loss": 1.0672, "step": 31 }, { "epoch": 0.0055873237592212665, "grad_norm": 1.8130548000335693, "learning_rate": 6.503528997521366e-05, "loss": 1.2598, "step": 32 }, { "epoch": 0.005761927626696931, "grad_norm": 1.4853343963623047, "learning_rate": 6.294095225512603e-05, "loss": 1.2338, "step": 33 }, { "epoch": 0.005936531494172596, "grad_norm": 1.5606756210327148, "learning_rate": 6.0821980696905146e-05, "loss": 1.1368, "step": 34 }, { "epoch": 0.00611113536164826, "grad_norm": 1.3997530937194824, "learning_rate": 5.868240888334653e-05, "loss": 1.0479, "step": 35 }, { "epoch": 0.006285739229123925, "grad_norm": 1.3681511878967285, "learning_rate": 5.6526309611002594e-05, "loss": 1.3044, "step": 36 }, { "epoch": 0.00646034309659959, "grad_norm": 1.387187123298645, "learning_rate": 5.435778713738292e-05, "loss": 1.4802, "step": 37 }, { "epoch": 0.006634946964075255, "grad_norm": 1.53128182888031, "learning_rate": 5.218096936826681e-05, "loss": 1.2611, "step": 38 }, { "epoch": 0.0068095508315509186, "grad_norm": 1.3304214477539062, "learning_rate": 5e-05, "loss": 1.4855, "step": 39 }, { "epoch": 0.006984154699026583, "grad_norm": 1.5679274797439575, "learning_rate": 4.781903063173321e-05, "loss": 1.0539, "step": 40 }, { "epoch": 0.007158758566502248, "grad_norm": 1.6547032594680786, "learning_rate": 4.564221286261709e-05, "loss": 1.3219, "step": 41 }, { "epoch": 0.007333362433977913, "grad_norm": 1.4425872564315796, "learning_rate": 4.347369038899744e-05, "loss": 1.2929, "step": 42 }, { "epoch": 0.007507966301453577, "grad_norm": 1.4797848463058472, "learning_rate": 4.131759111665349e-05, "loss": 0.9107, "step": 43 }, { "epoch": 0.007682570168929242, "grad_norm": 1.6376514434814453, "learning_rate": 3.917801930309486e-05, "loss": 1.2856, "step": 44 }, { "epoch": 0.007857174036404907, "grad_norm": 1.8783955574035645, "learning_rate": 3.705904774487396e-05, "loss": 1.378, "step": 45 }, { "epoch": 0.008031777903880571, "grad_norm": 1.9573678970336914, "learning_rate": 3.4964710024786354e-05, "loss": 1.2765, "step": 46 }, { "epoch": 0.008206381771356236, "grad_norm": 1.8132413625717163, "learning_rate": 3.289899283371657e-05, "loss": 1.2729, "step": 47 }, { "epoch": 0.0083809856388319, "grad_norm": 1.527602195739746, "learning_rate": 3.086582838174551e-05, "loss": 0.9316, "step": 48 }, { "epoch": 0.008555589506307564, "grad_norm": 2.0688371658325195, "learning_rate": 2.886908691296504e-05, "loss": 1.2326, "step": 49 }, { "epoch": 0.008730193373783229, "grad_norm": 3.0766944885253906, "learning_rate": 2.6912569338248315e-05, "loss": 1.3781, "step": 50 }, { "epoch": 0.008730193373783229, "eval_loss": 1.292413592338562, "eval_runtime": 537.8588, "eval_samples_per_second": 8.967, "eval_steps_per_second": 4.484, "step": 50 }, { "epoch": 0.008904797241258894, "grad_norm": 0.8374080657958984, "learning_rate": 2.500000000000001e-05, "loss": 1.7621, "step": 51 }, { "epoch": 0.009079401108734559, "grad_norm": 0.9951280951499939, "learning_rate": 2.3135019582658802e-05, "loss": 1.3985, "step": 52 }, { "epoch": 0.009254004976210223, "grad_norm": 1.160885214805603, "learning_rate": 2.132117818244771e-05, "loss": 1.7472, "step": 53 }, { "epoch": 0.009428608843685888, "grad_norm": 1.3874828815460205, "learning_rate": 1.9561928549563968e-05, "loss": 1.4435, "step": 54 }, { "epoch": 0.009603212711161551, "grad_norm": 1.014530897140503, "learning_rate": 1.7860619515673033e-05, "loss": 1.5598, "step": 55 }, { "epoch": 0.009777816578637216, "grad_norm": 0.8571755290031433, "learning_rate": 1.622048961921699e-05, "loss": 1.4335, "step": 56 }, { "epoch": 0.009952420446112881, "grad_norm": 1.069875717163086, "learning_rate": 1.4644660940672627e-05, "loss": 0.9947, "step": 57 }, { "epoch": 0.010127024313588546, "grad_norm": 0.9428219199180603, "learning_rate": 1.3136133159493802e-05, "loss": 1.4383, "step": 58 }, { "epoch": 0.01030162818106421, "grad_norm": 0.9815278649330139, "learning_rate": 1.1697777844051105e-05, "loss": 1.4594, "step": 59 }, { "epoch": 0.010476232048539875, "grad_norm": 1.117757797241211, "learning_rate": 1.0332332985438248e-05, "loss": 1.6801, "step": 60 }, { "epoch": 0.01065083591601554, "grad_norm": 1.043749213218689, "learning_rate": 9.042397785550405e-06, "loss": 1.2292, "step": 61 }, { "epoch": 0.010825439783491205, "grad_norm": 1.0151351690292358, "learning_rate": 7.830427709355725e-06, "loss": 1.3968, "step": 62 }, { "epoch": 0.011000043650966868, "grad_norm": 1.138023853302002, "learning_rate": 6.698729810778065e-06, "loss": 1.4304, "step": 63 }, { "epoch": 0.011174647518442533, "grad_norm": 1.0070977210998535, "learning_rate": 5.649458341088915e-06, "loss": 1.2263, "step": 64 }, { "epoch": 0.011349251385918198, "grad_norm": 1.4350625276565552, "learning_rate": 4.684610648167503e-06, "loss": 1.5576, "step": 65 }, { "epoch": 0.011523855253393863, "grad_norm": 1.046802282333374, "learning_rate": 3.8060233744356633e-06, "loss": 1.2175, "step": 66 }, { "epoch": 0.011698459120869528, "grad_norm": 1.1393954753875732, "learning_rate": 3.0153689607045845e-06, "loss": 1.1448, "step": 67 }, { "epoch": 0.011873062988345192, "grad_norm": 1.213820219039917, "learning_rate": 2.314152462588659e-06, "loss": 1.2862, "step": 68 }, { "epoch": 0.012047666855820857, "grad_norm": 1.0615662336349487, "learning_rate": 1.70370868554659e-06, "loss": 1.2547, "step": 69 }, { "epoch": 0.01222227072329652, "grad_norm": 2.1386613845825195, "learning_rate": 1.1851996440033319e-06, "loss": 1.3689, "step": 70 }, { "epoch": 0.012396874590772185, "grad_norm": 1.4475680589675903, "learning_rate": 7.596123493895991e-07, "loss": 1.0843, "step": 71 }, { "epoch": 0.01257147845824785, "grad_norm": 1.0686284303665161, "learning_rate": 4.277569313094809e-07, "loss": 1.2531, "step": 72 }, { "epoch": 0.012746082325723515, "grad_norm": 1.3505923748016357, "learning_rate": 1.9026509541272275e-07, "loss": 1.2997, "step": 73 }, { "epoch": 0.01292068619319918, "grad_norm": 1.0805271863937378, "learning_rate": 4.7588920907110094e-08, "loss": 1.5371, "step": 74 }, { "epoch": 0.013095290060674844, "grad_norm": 1.453600525856018, "learning_rate": 0.0, "loss": 1.4003, "step": 75 }, { "epoch": 0.013095290060674844, "eval_loss": 1.2775593996047974, "eval_runtime": 538.056, "eval_samples_per_second": 8.964, "eval_steps_per_second": 4.483, "step": 75 } ], "logging_steps": 1, "max_steps": 75, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.1208807043773235e+17, "train_batch_size": 2, "trial_name": null, "trial_params": null }