nttx's picture
Training in progress, epoch 0, checkpoint
1458312 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.011404784307016793,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 5.7023921535083966e-05,
"eval_loss": 1.856095790863037,
"eval_runtime": 600.2462,
"eval_samples_per_second": 12.302,
"eval_steps_per_second": 6.151,
"step": 1
},
{
"epoch": 0.0002851196076754198,
"grad_norm": 2.9381930828094482,
"learning_rate": 5e-05,
"loss": 1.6122,
"step": 5
},
{
"epoch": 0.0005702392153508396,
"grad_norm": 2.9322147369384766,
"learning_rate": 0.0001,
"loss": 1.3257,
"step": 10
},
{
"epoch": 0.0008553588230262595,
"grad_norm": 2.425527334213257,
"learning_rate": 9.98292246503335e-05,
"loss": 0.7947,
"step": 15
},
{
"epoch": 0.0011404784307016793,
"grad_norm": 3.9417734146118164,
"learning_rate": 9.931806517013612e-05,
"loss": 0.657,
"step": 20
},
{
"epoch": 0.0014255980383770992,
"grad_norm": 2.3073930740356445,
"learning_rate": 9.847001329696653e-05,
"loss": 0.8799,
"step": 25
},
{
"epoch": 0.001710717646052519,
"grad_norm": 2.638801097869873,
"learning_rate": 9.729086208503174e-05,
"loss": 0.8697,
"step": 30
},
{
"epoch": 0.0019958372537279387,
"grad_norm": 4.064486026763916,
"learning_rate": 9.578866633275288e-05,
"loss": 0.5677,
"step": 35
},
{
"epoch": 0.0022809568614033586,
"grad_norm": 2.6450159549713135,
"learning_rate": 9.397368756032445e-05,
"loss": 0.679,
"step": 40
},
{
"epoch": 0.0025660764690787785,
"grad_norm": 1.7913905382156372,
"learning_rate": 9.185832391312644e-05,
"loss": 0.6127,
"step": 45
},
{
"epoch": 0.0028511960767541983,
"grad_norm": 6.29102087020874,
"learning_rate": 8.945702546981969e-05,
"loss": 0.7151,
"step": 50
},
{
"epoch": 0.0028511960767541983,
"eval_loss": 0.7523384094238281,
"eval_runtime": 603.0908,
"eval_samples_per_second": 12.244,
"eval_steps_per_second": 6.122,
"step": 50
},
{
"epoch": 0.003136315684429618,
"grad_norm": 2.869847536087036,
"learning_rate": 8.678619553365659e-05,
"loss": 1.1559,
"step": 55
},
{
"epoch": 0.003421435292105038,
"grad_norm": 3.1679632663726807,
"learning_rate": 8.386407858128706e-05,
"loss": 0.7223,
"step": 60
},
{
"epoch": 0.003706554899780458,
"grad_norm": 1.9816867113113403,
"learning_rate": 8.07106356344834e-05,
"loss": 0.8216,
"step": 65
},
{
"epoch": 0.003991674507455877,
"grad_norm": 1.56283438205719,
"learning_rate": 7.734740790612136e-05,
"loss": 0.6841,
"step": 70
},
{
"epoch": 0.004276794115131297,
"grad_norm": 2.0818769931793213,
"learning_rate": 7.379736965185368e-05,
"loss": 0.7426,
"step": 75
},
{
"epoch": 0.004561913722806717,
"grad_norm": 2.3219709396362305,
"learning_rate": 7.008477123264848e-05,
"loss": 0.6782,
"step": 80
},
{
"epoch": 0.004847033330482137,
"grad_norm": 3.3414204120635986,
"learning_rate": 6.623497346023418e-05,
"loss": 0.8352,
"step": 85
},
{
"epoch": 0.005132152938157557,
"grad_norm": 2.7498598098754883,
"learning_rate": 6.227427435703997e-05,
"loss": 0.6972,
"step": 90
},
{
"epoch": 0.005417272545832977,
"grad_norm": 2.993232488632202,
"learning_rate": 5.8229729514036705e-05,
"loss": 0.8673,
"step": 95
},
{
"epoch": 0.005702392153508397,
"grad_norm": 2.752300262451172,
"learning_rate": 5.4128967273616625e-05,
"loss": 0.6133,
"step": 100
},
{
"epoch": 0.005702392153508397,
"eval_loss": 0.7154624462127686,
"eval_runtime": 602.8113,
"eval_samples_per_second": 12.249,
"eval_steps_per_second": 6.125,
"step": 100
},
{
"epoch": 0.0059875117611838165,
"grad_norm": 1.8962048292160034,
"learning_rate": 5e-05,
"loss": 0.8849,
"step": 105
},
{
"epoch": 0.006272631368859236,
"grad_norm": 3.029825210571289,
"learning_rate": 4.5871032726383386e-05,
"loss": 0.7899,
"step": 110
},
{
"epoch": 0.006557750976534656,
"grad_norm": 1.9052181243896484,
"learning_rate": 4.17702704859633e-05,
"loss": 0.6048,
"step": 115
},
{
"epoch": 0.006842870584210076,
"grad_norm": 2.7878966331481934,
"learning_rate": 3.772572564296005e-05,
"loss": 0.8162,
"step": 120
},
{
"epoch": 0.007127990191885496,
"grad_norm": 2.5749361515045166,
"learning_rate": 3.3765026539765834e-05,
"loss": 0.6882,
"step": 125
},
{
"epoch": 0.007413109799560916,
"grad_norm": 1.8944499492645264,
"learning_rate": 2.991522876735154e-05,
"loss": 0.619,
"step": 130
},
{
"epoch": 0.007698229407236336,
"grad_norm": 6.684533596038818,
"learning_rate": 2.6202630348146324e-05,
"loss": 0.7434,
"step": 135
},
{
"epoch": 0.007983349014911755,
"grad_norm": 2.9358720779418945,
"learning_rate": 2.2652592093878666e-05,
"loss": 0.6902,
"step": 140
},
{
"epoch": 0.008268468622587176,
"grad_norm": 2.7193732261657715,
"learning_rate": 1.928936436551661e-05,
"loss": 0.5603,
"step": 145
},
{
"epoch": 0.008553588230262595,
"grad_norm": 2.623371124267578,
"learning_rate": 1.6135921418712956e-05,
"loss": 0.7067,
"step": 150
},
{
"epoch": 0.008553588230262595,
"eval_loss": 0.7039576768875122,
"eval_runtime": 603.0182,
"eval_samples_per_second": 12.245,
"eval_steps_per_second": 6.123,
"step": 150
},
{
"epoch": 0.008838707837938015,
"grad_norm": 1.8917717933654785,
"learning_rate": 1.3213804466343421e-05,
"loss": 0.9448,
"step": 155
},
{
"epoch": 0.009123827445613434,
"grad_norm": 2.2349658012390137,
"learning_rate": 1.0542974530180327e-05,
"loss": 0.9108,
"step": 160
},
{
"epoch": 0.009408947053288855,
"grad_norm": 2.135024309158325,
"learning_rate": 8.141676086873572e-06,
"loss": 0.84,
"step": 165
},
{
"epoch": 0.009694066660964274,
"grad_norm": 2.4019577503204346,
"learning_rate": 6.026312439675552e-06,
"loss": 0.7272,
"step": 170
},
{
"epoch": 0.009979186268639695,
"grad_norm": 3.315854549407959,
"learning_rate": 4.2113336672471245e-06,
"loss": 0.7174,
"step": 175
},
{
"epoch": 0.010264305876315114,
"grad_norm": 2.2670226097106934,
"learning_rate": 2.7091379149682685e-06,
"loss": 0.6392,
"step": 180
},
{
"epoch": 0.010549425483990535,
"grad_norm": 3.537588357925415,
"learning_rate": 1.5299867030334814e-06,
"loss": 0.8069,
"step": 185
},
{
"epoch": 0.010834545091665954,
"grad_norm": 1.9623892307281494,
"learning_rate": 6.819348298638839e-07,
"loss": 0.5686,
"step": 190
},
{
"epoch": 0.011119664699341374,
"grad_norm": 2.6013102531433105,
"learning_rate": 1.7077534966650766e-07,
"loss": 0.5942,
"step": 195
},
{
"epoch": 0.011404784307016793,
"grad_norm": 3.067657947540283,
"learning_rate": 0.0,
"loss": 0.7097,
"step": 200
},
{
"epoch": 0.011404784307016793,
"eval_loss": 0.7022152543067932,
"eval_runtime": 602.7818,
"eval_samples_per_second": 12.25,
"eval_steps_per_second": 6.125,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.39825699258368e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}