lesso03's picture
Training in progress, step 30, checkpoint
0a39f5d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.00641094133988674,
"eval_steps": 4,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00021369804466289135,
"grad_norm": 2.0482637882232666,
"learning_rate": 2.0000000000000003e-06,
"loss": 5.0098,
"step": 1
},
{
"epoch": 0.00021369804466289135,
"eval_loss": 2.4674696922302246,
"eval_runtime": 411.0943,
"eval_samples_per_second": 4.795,
"eval_steps_per_second": 1.199,
"step": 1
},
{
"epoch": 0.0004273960893257827,
"grad_norm": 2.238199234008789,
"learning_rate": 4.000000000000001e-06,
"loss": 5.2527,
"step": 2
},
{
"epoch": 0.000641094133988674,
"grad_norm": 1.965638518333435,
"learning_rate": 6e-06,
"loss": 4.9741,
"step": 3
},
{
"epoch": 0.0008547921786515654,
"grad_norm": 2.046441078186035,
"learning_rate": 8.000000000000001e-06,
"loss": 5.1034,
"step": 4
},
{
"epoch": 0.0008547921786515654,
"eval_loss": 2.4658350944519043,
"eval_runtime": 411.1677,
"eval_samples_per_second": 4.794,
"eval_steps_per_second": 1.199,
"step": 4
},
{
"epoch": 0.0010684902233144566,
"grad_norm": 2.407226085662842,
"learning_rate": 1e-05,
"loss": 4.818,
"step": 5
},
{
"epoch": 0.001282188267977348,
"grad_norm": 2.8345041275024414,
"learning_rate": 9.960573506572391e-06,
"loss": 5.3071,
"step": 6
},
{
"epoch": 0.0014958863126402393,
"grad_norm": 2.036144256591797,
"learning_rate": 9.842915805643156e-06,
"loss": 4.7743,
"step": 7
},
{
"epoch": 0.0017095843573031308,
"grad_norm": 2.0334997177124023,
"learning_rate": 9.648882429441258e-06,
"loss": 4.9792,
"step": 8
},
{
"epoch": 0.0017095843573031308,
"eval_loss": 2.4481966495513916,
"eval_runtime": 411.0364,
"eval_samples_per_second": 4.795,
"eval_steps_per_second": 1.199,
"step": 8
},
{
"epoch": 0.001923282401966022,
"grad_norm": 1.955418586730957,
"learning_rate": 9.381533400219319e-06,
"loss": 4.6347,
"step": 9
},
{
"epoch": 0.0021369804466289132,
"grad_norm": 2.774395704269409,
"learning_rate": 9.045084971874738e-06,
"loss": 5.2668,
"step": 10
},
{
"epoch": 0.0023506784912918047,
"grad_norm": 2.4080920219421387,
"learning_rate": 8.644843137107058e-06,
"loss": 4.669,
"step": 11
},
{
"epoch": 0.002564376535954696,
"grad_norm": 2.503000020980835,
"learning_rate": 8.18711994874345e-06,
"loss": 4.6545,
"step": 12
},
{
"epoch": 0.002564376535954696,
"eval_loss": 2.4184906482696533,
"eval_runtime": 410.9238,
"eval_samples_per_second": 4.797,
"eval_steps_per_second": 1.2,
"step": 12
},
{
"epoch": 0.002778074580617587,
"grad_norm": 2.407099485397339,
"learning_rate": 7.679133974894984e-06,
"loss": 5.1754,
"step": 13
},
{
"epoch": 0.0029917726252804786,
"grad_norm": 2.5891873836517334,
"learning_rate": 7.128896457825364e-06,
"loss": 4.9656,
"step": 14
},
{
"epoch": 0.00320547066994337,
"grad_norm": 2.612175464630127,
"learning_rate": 6.545084971874738e-06,
"loss": 4.9479,
"step": 15
},
{
"epoch": 0.0034191687146062615,
"grad_norm": 2.592885732650757,
"learning_rate": 5.936906572928625e-06,
"loss": 4.8353,
"step": 16
},
{
"epoch": 0.0034191687146062615,
"eval_loss": 2.3783442974090576,
"eval_runtime": 411.1282,
"eval_samples_per_second": 4.794,
"eval_steps_per_second": 1.199,
"step": 16
},
{
"epoch": 0.0036328667592691525,
"grad_norm": 2.204364538192749,
"learning_rate": 5.3139525976465675e-06,
"loss": 4.4296,
"step": 17
},
{
"epoch": 0.003846564803932044,
"grad_norm": 2.751991033554077,
"learning_rate": 4.686047402353433e-06,
"loss": 4.6077,
"step": 18
},
{
"epoch": 0.004060262848594935,
"grad_norm": 2.8248343467712402,
"learning_rate": 4.063093427071376e-06,
"loss": 4.8522,
"step": 19
},
{
"epoch": 0.0042739608932578265,
"grad_norm": 2.5259201526641846,
"learning_rate": 3.4549150281252635e-06,
"loss": 4.4364,
"step": 20
},
{
"epoch": 0.0042739608932578265,
"eval_loss": 2.3454465866088867,
"eval_runtime": 411.3946,
"eval_samples_per_second": 4.791,
"eval_steps_per_second": 1.198,
"step": 20
},
{
"epoch": 0.004487658937920718,
"grad_norm": 2.3242313861846924,
"learning_rate": 2.871103542174637e-06,
"loss": 4.729,
"step": 21
},
{
"epoch": 0.004701356982583609,
"grad_norm": 2.1803932189941406,
"learning_rate": 2.320866025105016e-06,
"loss": 4.4493,
"step": 22
},
{
"epoch": 0.004915055027246501,
"grad_norm": 3.0520858764648438,
"learning_rate": 1.8128800512565514e-06,
"loss": 4.7408,
"step": 23
},
{
"epoch": 0.005128753071909392,
"grad_norm": 3.3410630226135254,
"learning_rate": 1.3551568628929434e-06,
"loss": 4.6984,
"step": 24
},
{
"epoch": 0.005128753071909392,
"eval_loss": 2.328244924545288,
"eval_runtime": 411.355,
"eval_samples_per_second": 4.791,
"eval_steps_per_second": 1.198,
"step": 24
},
{
"epoch": 0.005342451116572284,
"grad_norm": 2.358137845993042,
"learning_rate": 9.549150281252633e-07,
"loss": 4.4836,
"step": 25
},
{
"epoch": 0.005556149161235174,
"grad_norm": 2.9494125843048096,
"learning_rate": 6.184665997806832e-07,
"loss": 5.1003,
"step": 26
},
{
"epoch": 0.005769847205898066,
"grad_norm": 3.0665676593780518,
"learning_rate": 3.511175705587433e-07,
"loss": 4.7526,
"step": 27
},
{
"epoch": 0.005983545250560957,
"grad_norm": 2.681220293045044,
"learning_rate": 1.5708419435684463e-07,
"loss": 4.6398,
"step": 28
},
{
"epoch": 0.005983545250560957,
"eval_loss": 2.3202476501464844,
"eval_runtime": 411.2537,
"eval_samples_per_second": 4.793,
"eval_steps_per_second": 1.199,
"step": 28
},
{
"epoch": 0.006197243295223849,
"grad_norm": 3.1423282623291016,
"learning_rate": 3.9426493427611177e-08,
"loss": 4.7534,
"step": 29
},
{
"epoch": 0.00641094133988674,
"grad_norm": 2.7747702598571777,
"learning_rate": 0.0,
"loss": 4.8573,
"step": 30
}
],
"logging_steps": 1,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 20,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.90434898870272e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}