|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 67, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 321.7783631563187, |
|
"epoch": 0.14925373134328357, |
|
"grad_norm": 844.2005004882812, |
|
"kl": 0.26083906888961794, |
|
"learning_rate": 1.9876883405951378e-05, |
|
"loss": 0.0104, |
|
"reward": 0.8205357508268207, |
|
"reward_std": 0.3335492295213044, |
|
"rewards/accuracy_reward": 0.28281251178123057, |
|
"rewards/format_reward": 0.5377232346101664, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 957.0839672088623, |
|
"epoch": 0.29850746268656714, |
|
"grad_norm": 12.81620979309082, |
|
"kl": 1.9741607666015626, |
|
"learning_rate": 1.777145961456971e-05, |
|
"loss": 0.0792, |
|
"reward": 0.2691964411525987, |
|
"reward_std": 0.21028053476475178, |
|
"rewards/accuracy_reward": 0.23214286774164067, |
|
"rewards/format_reward": 0.03705357332946733, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 422.6559352874756, |
|
"epoch": 0.44776119402985076, |
|
"grad_norm": 5.399049758911133, |
|
"kl": 0.2251617431640625, |
|
"learning_rate": 1.3583679495453e-05, |
|
"loss": 0.009, |
|
"reward": 1.2597098806872964, |
|
"reward_std": 0.36119014574214814, |
|
"rewards/accuracy_reward": 0.47042412888258694, |
|
"rewards/format_reward": 0.7892857480794191, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 268.14699935913086, |
|
"epoch": 0.5970149253731343, |
|
"grad_norm": 0.5281020402908325, |
|
"kl": 0.1599884033203125, |
|
"learning_rate": 8.43565534959769e-06, |
|
"loss": 0.0064, |
|
"reward": 1.3410714894533158, |
|
"reward_std": 0.3691547209396958, |
|
"rewards/accuracy_reward": 0.41651787590235473, |
|
"rewards/format_reward": 0.9245536047965288, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 327.67679929733276, |
|
"epoch": 0.746268656716418, |
|
"grad_norm": 6.650570869445801, |
|
"kl": 0.163006591796875, |
|
"learning_rate": 3.7067960895016277e-06, |
|
"loss": 0.0065, |
|
"reward": 1.4162947066128253, |
|
"reward_std": 0.4063243476208299, |
|
"rewards/accuracy_reward": 0.5241071661934257, |
|
"rewards/format_reward": 0.8921875409781933, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 262.67713165283203, |
|
"epoch": 0.8955223880597015, |
|
"grad_norm": 0.2625133991241455, |
|
"kl": 0.1500396728515625, |
|
"learning_rate": 6.641957350279838e-07, |
|
"loss": 0.006, |
|
"reward": 1.5008929297327995, |
|
"reward_std": 0.31554799154400826, |
|
"rewards/accuracy_reward": 0.5521205643191933, |
|
"rewards/format_reward": 0.9487723533064127, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 279.1561552456447, |
|
"epoch": 1.0, |
|
"kl": 0.14893450055803573, |
|
"reward": 1.5041454716452531, |
|
"reward_std": 0.3543888473484133, |
|
"rewards/accuracy_reward": 0.5596301265593085, |
|
"rewards/format_reward": 0.9445153373692717, |
|
"step": 67, |
|
"total_flos": 0.0, |
|
"train_loss": 0.018138731037502857, |
|
"train_runtime": 11014.2043, |
|
"train_samples_per_second": 0.681, |
|
"train_steps_per_second": 0.006 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 67, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|