|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 11115, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1349527665317139, |
|
"grad_norm": 7.277841567993164, |
|
"learning_rate": 4.775078722447144e-05, |
|
"loss": 3.0654, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.2699055330634278, |
|
"grad_norm": 7.536296844482422, |
|
"learning_rate": 4.550157444894287e-05, |
|
"loss": 2.4088, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.4048582995951417, |
|
"grad_norm": 6.857296466827393, |
|
"learning_rate": 4.3252361673414306e-05, |
|
"loss": 2.1502, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.5398110661268556, |
|
"grad_norm": 20.20909309387207, |
|
"learning_rate": 4.100314889788574e-05, |
|
"loss": 1.9977, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.6747638326585695, |
|
"grad_norm": 6.654603004455566, |
|
"learning_rate": 3.8753936122357176e-05, |
|
"loss": 1.8983, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.8097165991902834, |
|
"grad_norm": 7.12253999710083, |
|
"learning_rate": 3.650472334682861e-05, |
|
"loss": 1.8103, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.9446693657219973, |
|
"grad_norm": 6.6242170333862305, |
|
"learning_rate": 3.4255510571300045e-05, |
|
"loss": 1.7543, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.0796221322537112, |
|
"grad_norm": 5.963022232055664, |
|
"learning_rate": 3.2006297795771486e-05, |
|
"loss": 1.6822, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.214574898785425, |
|
"grad_norm": 6.769837856292725, |
|
"learning_rate": 2.9757085020242914e-05, |
|
"loss": 1.6501, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.349527665317139, |
|
"grad_norm": 6.467240333557129, |
|
"learning_rate": 2.750787224471435e-05, |
|
"loss": 1.612, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.484480431848853, |
|
"grad_norm": 6.431339740753174, |
|
"learning_rate": 2.5258659469185787e-05, |
|
"loss": 1.5606, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.6194331983805668, |
|
"grad_norm": 7.2527289390563965, |
|
"learning_rate": 2.300944669365722e-05, |
|
"loss": 1.5413, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.7543859649122808, |
|
"grad_norm": 6.609572887420654, |
|
"learning_rate": 2.0760233918128656e-05, |
|
"loss": 1.5367, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.8893387314439947, |
|
"grad_norm": 6.267154216766357, |
|
"learning_rate": 1.851102114260009e-05, |
|
"loss": 1.5087, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.0242914979757085, |
|
"grad_norm": 6.8475117683410645, |
|
"learning_rate": 1.6261808367071525e-05, |
|
"loss": 1.4727, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 2.1592442645074224, |
|
"grad_norm": 6.945182800292969, |
|
"learning_rate": 1.401259559154296e-05, |
|
"loss": 1.4564, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.294197031039136, |
|
"grad_norm": 6.858353137969971, |
|
"learning_rate": 1.1763382816014395e-05, |
|
"loss": 1.4325, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 2.42914979757085, |
|
"grad_norm": 6.653971195220947, |
|
"learning_rate": 9.51417004048583e-06, |
|
"loss": 1.4041, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.564102564102564, |
|
"grad_norm": 5.984017848968506, |
|
"learning_rate": 7.264957264957266e-06, |
|
"loss": 1.4085, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 2.699055330634278, |
|
"grad_norm": 6.64699649810791, |
|
"learning_rate": 5.0157444894287e-06, |
|
"loss": 1.365, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.834008097165992, |
|
"grad_norm": 6.052412509918213, |
|
"learning_rate": 2.766531713900135e-06, |
|
"loss": 1.3799, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 2.968960863697706, |
|
"grad_norm": 6.4499711990356445, |
|
"learning_rate": 5.1731893837157e-07, |
|
"loss": 1.3994, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 11115, |
|
"total_flos": 1.1730479287894016e+16, |
|
"train_loss": 1.7010253453222524, |
|
"train_runtime": 4017.837, |
|
"train_samples_per_second": 11.064, |
|
"train_steps_per_second": 2.766 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 11115, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.1730479287894016e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|