|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.0396000396000396, |
|
"eval_steps": 50, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.000198000198000198, |
|
"eval_loss": 1.105994701385498, |
|
"eval_runtime": 178.9466, |
|
"eval_samples_per_second": 11.886, |
|
"eval_steps_per_second": 5.946, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00198000198000198, |
|
"grad_norm": 1.0191270112991333, |
|
"learning_rate": 0.00019967573081342103, |
|
"loss": 4.1654, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00396000396000396, |
|
"grad_norm": 1.2664660215377808, |
|
"learning_rate": 0.0001970941817426052, |
|
"loss": 3.8117, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.00594000594000594, |
|
"grad_norm": 0.9743459224700928, |
|
"learning_rate": 0.00019199794436588243, |
|
"loss": 3.8164, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.00792000792000792, |
|
"grad_norm": 1.1033498048782349, |
|
"learning_rate": 0.0001845190085543795, |
|
"loss": 3.657, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0099000099000099, |
|
"grad_norm": 1.9051858186721802, |
|
"learning_rate": 0.00017485107481711012, |
|
"loss": 3.9283, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0099000099000099, |
|
"eval_loss": 0.8947995901107788, |
|
"eval_runtime": 178.7865, |
|
"eval_samples_per_second": 11.897, |
|
"eval_steps_per_second": 5.951, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.01188001188001188, |
|
"grad_norm": 1.570327639579773, |
|
"learning_rate": 0.00016324453755953773, |
|
"loss": 3.783, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.01386001386001386, |
|
"grad_norm": 1.0465843677520752, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 3.9758, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.01584001584001584, |
|
"grad_norm": 1.319621205329895, |
|
"learning_rate": 0.00013546048870425356, |
|
"loss": 3.6048, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.01782001782001782, |
|
"grad_norm": 1.6770116090774536, |
|
"learning_rate": 0.00012000256937760445, |
|
"loss": 3.5316, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.0198000198000198, |
|
"grad_norm": 0.9391918778419495, |
|
"learning_rate": 0.00010402659401094152, |
|
"loss": 3.4507, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0198000198000198, |
|
"eval_loss": 0.8597908616065979, |
|
"eval_runtime": 178.7551, |
|
"eval_samples_per_second": 11.899, |
|
"eval_steps_per_second": 5.952, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.02178002178002178, |
|
"grad_norm": 0.9822765588760376, |
|
"learning_rate": 8.79463319744677e-05, |
|
"loss": 3.1689, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.02376002376002376, |
|
"grad_norm": 1.117750883102417, |
|
"learning_rate": 7.217825360835473e-05, |
|
"loss": 3.1231, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.02574002574002574, |
|
"grad_norm": 2.2837934494018555, |
|
"learning_rate": 5.713074385969457e-05, |
|
"loss": 3.7971, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.02772002772002772, |
|
"grad_norm": 1.7023260593414307, |
|
"learning_rate": 4.3193525326884435e-05, |
|
"loss": 3.367, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.0297000297000297, |
|
"grad_norm": 1.4030767679214478, |
|
"learning_rate": 3.072756464904006e-05, |
|
"loss": 3.2886, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.0297000297000297, |
|
"eval_loss": 0.8444181680679321, |
|
"eval_runtime": 178.7934, |
|
"eval_samples_per_second": 11.896, |
|
"eval_steps_per_second": 5.951, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.03168003168003168, |
|
"grad_norm": 1.8827073574066162, |
|
"learning_rate": 2.0055723659649904e-05, |
|
"loss": 3.3219, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.03366003366003366, |
|
"grad_norm": 1.6092329025268555, |
|
"learning_rate": 1.1454397434679021e-05, |
|
"loss": 3.3611, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.03564003564003564, |
|
"grad_norm": 1.6704670190811157, |
|
"learning_rate": 5.146355805285452e-06, |
|
"loss": 3.8902, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.03762003762003762, |
|
"grad_norm": 1.419443964958191, |
|
"learning_rate": 1.2949737362087156e-06, |
|
"loss": 3.5671, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.0396000396000396, |
|
"grad_norm": 1.2906014919281006, |
|
"learning_rate": 0.0, |
|
"loss": 3.1066, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.0396000396000396, |
|
"eval_loss": 0.8420388102531433, |
|
"eval_runtime": 178.7659, |
|
"eval_samples_per_second": 11.898, |
|
"eval_steps_per_second": 5.952, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.680730813628416e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|