|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.07272727272727272, |
|
"eval_steps": 5, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0014545454545454545, |
|
"grad_norm": 0.5051049590110779, |
|
"learning_rate": 2e-05, |
|
"loss": 0.3615, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0014545454545454545, |
|
"eval_loss": 0.4058317542076111, |
|
"eval_runtime": 24.9192, |
|
"eval_samples_per_second": 11.638, |
|
"eval_steps_per_second": 5.819, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002909090909090909, |
|
"grad_norm": 0.7143672704696655, |
|
"learning_rate": 4e-05, |
|
"loss": 0.4981, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.004363636363636364, |
|
"grad_norm": 0.3360948860645294, |
|
"learning_rate": 6e-05, |
|
"loss": 0.2856, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.005818181818181818, |
|
"grad_norm": 0.44477200508117676, |
|
"learning_rate": 8e-05, |
|
"loss": 0.2617, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.007272727272727273, |
|
"grad_norm": 0.4044454097747803, |
|
"learning_rate": 0.0001, |
|
"loss": 0.2466, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.007272727272727273, |
|
"eval_loss": 0.3780933916568756, |
|
"eval_runtime": 24.6289, |
|
"eval_samples_per_second": 11.775, |
|
"eval_steps_per_second": 5.887, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.008727272727272728, |
|
"grad_norm": 0.43507444858551025, |
|
"learning_rate": 0.00012, |
|
"loss": 0.2384, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.010181818181818183, |
|
"grad_norm": 0.541296124458313, |
|
"learning_rate": 0.00014, |
|
"loss": 0.2606, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.011636363636363636, |
|
"grad_norm": 0.4881332814693451, |
|
"learning_rate": 0.00016, |
|
"loss": 0.2463, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.01309090909090909, |
|
"grad_norm": 0.7712896466255188, |
|
"learning_rate": 0.00018, |
|
"loss": 0.3117, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.014545454545454545, |
|
"grad_norm": 0.48558855056762695, |
|
"learning_rate": 0.0002, |
|
"loss": 0.1169, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.014545454545454545, |
|
"eval_loss": 0.12428049743175507, |
|
"eval_runtime": 24.7343, |
|
"eval_samples_per_second": 11.725, |
|
"eval_steps_per_second": 5.862, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.016, |
|
"grad_norm": 0.4993226230144501, |
|
"learning_rate": 0.0001996917333733128, |
|
"loss": 0.1023, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.017454545454545455, |
|
"grad_norm": 0.8036471009254456, |
|
"learning_rate": 0.00019876883405951377, |
|
"loss": 0.0975, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.01890909090909091, |
|
"grad_norm": 0.8658446073532104, |
|
"learning_rate": 0.00019723699203976766, |
|
"loss": 0.1072, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.020363636363636365, |
|
"grad_norm": 0.5599696040153503, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 0.0612, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.02181818181818182, |
|
"grad_norm": 0.5136664509773254, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 0.0837, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02181818181818182, |
|
"eval_loss": 0.07245617359876633, |
|
"eval_runtime": 24.6969, |
|
"eval_samples_per_second": 11.742, |
|
"eval_steps_per_second": 5.871, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02327272727272727, |
|
"grad_norm": 0.6104558706283569, |
|
"learning_rate": 0.0001891006524188368, |
|
"loss": 0.0643, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.024727272727272726, |
|
"grad_norm": 0.5080060958862305, |
|
"learning_rate": 0.00018526401643540922, |
|
"loss": 0.0751, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.02618181818181818, |
|
"grad_norm": 0.3353659212589264, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.0363, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.027636363636363636, |
|
"grad_norm": 0.5001118779182434, |
|
"learning_rate": 0.0001760405965600031, |
|
"loss": 0.0556, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.02909090909090909, |
|
"grad_norm": 0.6435399055480957, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 0.0711, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02909090909090909, |
|
"eval_loss": 0.06782884895801544, |
|
"eval_runtime": 24.764, |
|
"eval_samples_per_second": 11.711, |
|
"eval_steps_per_second": 5.855, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.030545454545454546, |
|
"grad_norm": 0.561763346195221, |
|
"learning_rate": 0.00016494480483301836, |
|
"loss": 0.095, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.032, |
|
"grad_norm": 0.8185080885887146, |
|
"learning_rate": 0.00015877852522924732, |
|
"loss": 0.1344, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.03345454545454545, |
|
"grad_norm": 0.3717232942581177, |
|
"learning_rate": 0.0001522498564715949, |
|
"loss": 0.0271, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.03490909090909091, |
|
"grad_norm": 0.31359872221946716, |
|
"learning_rate": 0.00014539904997395468, |
|
"loss": 0.0392, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.03636363636363636, |
|
"grad_norm": 0.5982680916786194, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 0.0611, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03636363636363636, |
|
"eval_loss": 0.05668775737285614, |
|
"eval_runtime": 24.7809, |
|
"eval_samples_per_second": 11.703, |
|
"eval_steps_per_second": 5.851, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03781818181818182, |
|
"grad_norm": 0.22259199619293213, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.0199, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.03927272727272727, |
|
"grad_norm": 0.7268075942993164, |
|
"learning_rate": 0.00012334453638559057, |
|
"loss": 0.177, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.04072727272727273, |
|
"grad_norm": 0.22450760006904602, |
|
"learning_rate": 0.0001156434465040231, |
|
"loss": 0.0249, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.04218181818181818, |
|
"grad_norm": 0.4080381691455841, |
|
"learning_rate": 0.0001078459095727845, |
|
"loss": 0.0378, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.04363636363636364, |
|
"grad_norm": 0.47309610247612, |
|
"learning_rate": 0.0001, |
|
"loss": 0.0387, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04363636363636364, |
|
"eval_loss": 0.0551588237285614, |
|
"eval_runtime": 24.7856, |
|
"eval_samples_per_second": 11.7, |
|
"eval_steps_per_second": 5.85, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04509090909090909, |
|
"grad_norm": 0.565868079662323, |
|
"learning_rate": 9.215409042721552e-05, |
|
"loss": 0.1016, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.04654545454545454, |
|
"grad_norm": 0.5154087543487549, |
|
"learning_rate": 8.435655349597689e-05, |
|
"loss": 0.0434, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.048, |
|
"grad_norm": 0.7290896773338318, |
|
"learning_rate": 7.66554636144095e-05, |
|
"loss": 0.1061, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.04945454545454545, |
|
"grad_norm": 0.7846975326538086, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.1535, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.05090909090909091, |
|
"grad_norm": 0.3980070650577545, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 0.0709, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.05090909090909091, |
|
"eval_loss": 0.05250132083892822, |
|
"eval_runtime": 24.7914, |
|
"eval_samples_per_second": 11.698, |
|
"eval_steps_per_second": 5.849, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.05236363636363636, |
|
"grad_norm": 0.49924206733703613, |
|
"learning_rate": 5.4600950026045326e-05, |
|
"loss": 0.0803, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.05381818181818182, |
|
"grad_norm": 0.42729631066322327, |
|
"learning_rate": 4.7750143528405126e-05, |
|
"loss": 0.0447, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.05527272727272727, |
|
"grad_norm": 0.20265118777751923, |
|
"learning_rate": 4.12214747707527e-05, |
|
"loss": 0.0277, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.05672727272727273, |
|
"grad_norm": 0.5974223613739014, |
|
"learning_rate": 3.5055195166981645e-05, |
|
"loss": 0.1184, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.05818181818181818, |
|
"grad_norm": 0.6130847334861755, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.1079, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05818181818181818, |
|
"eval_loss": 0.05132311210036278, |
|
"eval_runtime": 24.7921, |
|
"eval_samples_per_second": 11.697, |
|
"eval_steps_per_second": 5.849, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05963636363636363, |
|
"grad_norm": 0.5219517946243286, |
|
"learning_rate": 2.3959403439996907e-05, |
|
"loss": 0.0571, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.06109090909090909, |
|
"grad_norm": 0.20746634900569916, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 0.0254, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.06254545454545454, |
|
"grad_norm": 1.359476089477539, |
|
"learning_rate": 1.4735983564590783e-05, |
|
"loss": 0.04, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.064, |
|
"grad_norm": 0.55459064245224, |
|
"learning_rate": 1.0899347581163221e-05, |
|
"loss": 0.0772, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.06545454545454546, |
|
"grad_norm": 0.19099941849708557, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 0.0142, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.06545454545454546, |
|
"eval_loss": 0.051045071333646774, |
|
"eval_runtime": 24.8018, |
|
"eval_samples_per_second": 11.693, |
|
"eval_steps_per_second": 5.846, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0669090909090909, |
|
"grad_norm": 0.20805087685585022, |
|
"learning_rate": 4.8943483704846475e-06, |
|
"loss": 0.0106, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.06836363636363636, |
|
"grad_norm": 0.36373722553253174, |
|
"learning_rate": 2.7630079602323442e-06, |
|
"loss": 0.0605, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.06981818181818182, |
|
"grad_norm": 0.2527661621570587, |
|
"learning_rate": 1.231165940486234e-06, |
|
"loss": 0.0241, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.07127272727272728, |
|
"grad_norm": 2.111194372177124, |
|
"learning_rate": 3.0826662668720364e-07, |
|
"loss": 0.1286, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.07272727272727272, |
|
"grad_norm": 0.45172688364982605, |
|
"learning_rate": 0.0, |
|
"loss": 0.0227, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.07272727272727272, |
|
"eval_loss": 0.05098990350961685, |
|
"eval_runtime": 24.8142, |
|
"eval_samples_per_second": 11.687, |
|
"eval_steps_per_second": 5.843, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 70, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.8588120693866496e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|