{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.0526315789473684, "eval_steps": 100, "global_step": 60, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.017543859649122806, "grad_norm": 44.84535598754883, "learning_rate": 1.111111111111111e-06, "loss": 1.3672, "step": 1 }, { "epoch": 0.03508771929824561, "grad_norm": 46.05864715576172, "learning_rate": 2.222222222222222e-06, "loss": 1.3719, "step": 2 }, { "epoch": 0.05263157894736842, "grad_norm": 9.057656288146973, "learning_rate": 3.3333333333333333e-06, "loss": 0.7838, "step": 3 }, { "epoch": 0.07017543859649122, "grad_norm": 8.365191459655762, "learning_rate": 4.444444444444444e-06, "loss": 0.7781, "step": 4 }, { "epoch": 0.08771929824561403, "grad_norm": 6.402659893035889, "learning_rate": 5.555555555555557e-06, "loss": 0.7132, "step": 5 }, { "epoch": 0.10526315789473684, "grad_norm": 5.508246898651123, "learning_rate": 6.666666666666667e-06, "loss": 0.6863, "step": 6 }, { "epoch": 0.12280701754385964, "grad_norm": 4.964734077453613, "learning_rate": 7.77777777777778e-06, "loss": 0.6237, "step": 7 }, { "epoch": 0.14035087719298245, "grad_norm": 4.011112689971924, "learning_rate": 8.888888888888888e-06, "loss": 0.618, "step": 8 }, { "epoch": 0.15789473684210525, "grad_norm": 3.7118031978607178, "learning_rate": 1e-05, "loss": 0.5955, "step": 9 }, { "epoch": 0.17543859649122806, "grad_norm": 3.4124467372894287, "learning_rate": 1.1111111111111113e-05, "loss": 0.5661, "step": 10 }, { "epoch": 0.19298245614035087, "grad_norm": 2.9919450283050537, "learning_rate": 1.2222222222222224e-05, "loss": 0.5436, "step": 11 }, { "epoch": 0.21052631578947367, "grad_norm": 2.6217074394226074, "learning_rate": 1.3333333333333333e-05, "loss": 0.4991, "step": 12 }, { "epoch": 0.22807017543859648, "grad_norm": 3.101696014404297, "learning_rate": 1.4444444444444446e-05, "loss": 0.4889, "step": 13 }, { "epoch": 0.24561403508771928, "grad_norm": 2.632870674133301, "learning_rate": 1.555555555555556e-05, "loss": 0.4727, "step": 14 }, { "epoch": 0.2631578947368421, "grad_norm": 2.158155679702759, "learning_rate": 1.6666666666666667e-05, "loss": 0.4647, "step": 15 }, { "epoch": 0.2807017543859649, "grad_norm": 2.006129264831543, "learning_rate": 1.7777777777777777e-05, "loss": 0.4476, "step": 16 }, { "epoch": 0.2982456140350877, "grad_norm": 2.096951961517334, "learning_rate": 1.888888888888889e-05, "loss": 0.4387, "step": 17 }, { "epoch": 0.3157894736842105, "grad_norm": 1.9885540008544922, "learning_rate": 2e-05, "loss": 0.412, "step": 18 }, { "epoch": 0.3333333333333333, "grad_norm": 1.9787037372589111, "learning_rate": 1.9869281045751635e-05, "loss": 0.415, "step": 19 }, { "epoch": 0.3508771929824561, "grad_norm": 2.428654193878174, "learning_rate": 1.973856209150327e-05, "loss": 0.4068, "step": 20 }, { "epoch": 0.3684210526315789, "grad_norm": 1.8156601190567017, "learning_rate": 1.9607843137254903e-05, "loss": 0.3895, "step": 21 }, { "epoch": 0.38596491228070173, "grad_norm": 1.9325244426727295, "learning_rate": 1.9477124183006536e-05, "loss": 0.3793, "step": 22 }, { "epoch": 0.40350877192982454, "grad_norm": 1.5293307304382324, "learning_rate": 1.9346405228758173e-05, "loss": 0.3753, "step": 23 }, { "epoch": 0.42105263157894735, "grad_norm": 2.211817741394043, "learning_rate": 1.9215686274509807e-05, "loss": 0.3858, "step": 24 }, { "epoch": 0.43859649122807015, "grad_norm": 1.7881850004196167, "learning_rate": 1.908496732026144e-05, "loss": 0.3735, "step": 25 }, { "epoch": 0.45614035087719296, "grad_norm": 1.7260500192642212, "learning_rate": 1.8954248366013074e-05, "loss": 0.3733, "step": 26 }, { "epoch": 0.47368421052631576, "grad_norm": 1.6085237264633179, "learning_rate": 1.8823529411764708e-05, "loss": 0.355, "step": 27 }, { "epoch": 0.49122807017543857, "grad_norm": 2.8392181396484375, "learning_rate": 1.869281045751634e-05, "loss": 0.3551, "step": 28 }, { "epoch": 0.5087719298245614, "grad_norm": 1.978279948234558, "learning_rate": 1.8562091503267975e-05, "loss": 0.3337, "step": 29 }, { "epoch": 0.5263157894736842, "grad_norm": 2.016209125518799, "learning_rate": 1.843137254901961e-05, "loss": 0.3408, "step": 30 }, { "epoch": 0.543859649122807, "grad_norm": 1.4631725549697876, "learning_rate": 1.8300653594771242e-05, "loss": 0.3434, "step": 31 }, { "epoch": 0.5614035087719298, "grad_norm": 1.4324421882629395, "learning_rate": 1.8169934640522876e-05, "loss": 0.3468, "step": 32 }, { "epoch": 0.5789473684210527, "grad_norm": 5.047348976135254, "learning_rate": 1.8039215686274513e-05, "loss": 0.3284, "step": 33 }, { "epoch": 0.5964912280701754, "grad_norm": 1.515346884727478, "learning_rate": 1.7908496732026146e-05, "loss": 0.3377, "step": 34 }, { "epoch": 0.6140350877192983, "grad_norm": 1.883217692375183, "learning_rate": 1.7777777777777777e-05, "loss": 0.333, "step": 35 }, { "epoch": 0.631578947368421, "grad_norm": 1.6431008577346802, "learning_rate": 1.7647058823529414e-05, "loss": 0.3319, "step": 36 }, { "epoch": 0.6491228070175439, "grad_norm": 1.630910873413086, "learning_rate": 1.7516339869281047e-05, "loss": 0.3214, "step": 37 }, { "epoch": 0.6666666666666666, "grad_norm": 1.4808597564697266, "learning_rate": 1.738562091503268e-05, "loss": 0.3258, "step": 38 }, { "epoch": 0.6842105263157895, "grad_norm": 1.6400136947631836, "learning_rate": 1.7254901960784314e-05, "loss": 0.3225, "step": 39 }, { "epoch": 0.7017543859649122, "grad_norm": 1.3743259906768799, "learning_rate": 1.7124183006535948e-05, "loss": 0.3192, "step": 40 }, { "epoch": 0.7192982456140351, "grad_norm": 1.4262139797210693, "learning_rate": 1.6993464052287582e-05, "loss": 0.3121, "step": 41 }, { "epoch": 0.7368421052631579, "grad_norm": 1.4537426233291626, "learning_rate": 1.686274509803922e-05, "loss": 0.3164, "step": 42 }, { "epoch": 0.7543859649122807, "grad_norm": 1.2737575769424438, "learning_rate": 1.6732026143790852e-05, "loss": 0.3021, "step": 43 }, { "epoch": 0.7719298245614035, "grad_norm": 1.2681057453155518, "learning_rate": 1.6601307189542486e-05, "loss": 0.3166, "step": 44 }, { "epoch": 0.7894736842105263, "grad_norm": 5.779806137084961, "learning_rate": 1.647058823529412e-05, "loss": 0.3093, "step": 45 }, { "epoch": 0.8070175438596491, "grad_norm": 1.4256994724273682, "learning_rate": 1.6339869281045753e-05, "loss": 0.2968, "step": 46 }, { "epoch": 0.8245614035087719, "grad_norm": 1.5250773429870605, "learning_rate": 1.6209150326797387e-05, "loss": 0.2972, "step": 47 }, { "epoch": 0.8421052631578947, "grad_norm": 1.2680127620697021, "learning_rate": 1.607843137254902e-05, "loss": 0.2914, "step": 48 }, { "epoch": 0.8596491228070176, "grad_norm": 1.6887532472610474, "learning_rate": 1.5947712418300657e-05, "loss": 0.2951, "step": 49 }, { "epoch": 0.8771929824561403, "grad_norm": 2.2416443824768066, "learning_rate": 1.5816993464052288e-05, "loss": 0.3059, "step": 50 }, { "epoch": 0.8947368421052632, "grad_norm": 1.3144830465316772, "learning_rate": 1.568627450980392e-05, "loss": 0.3011, "step": 51 }, { "epoch": 0.9122807017543859, "grad_norm": 2.3271305561065674, "learning_rate": 1.555555555555556e-05, "loss": 0.2908, "step": 52 }, { "epoch": 0.9298245614035088, "grad_norm": 1.2403310537338257, "learning_rate": 1.5424836601307192e-05, "loss": 0.3001, "step": 53 }, { "epoch": 0.9473684210526315, "grad_norm": 2.7490832805633545, "learning_rate": 1.5294117647058822e-05, "loss": 0.2987, "step": 54 }, { "epoch": 0.9649122807017544, "grad_norm": 2.321964979171753, "learning_rate": 1.5163398692810458e-05, "loss": 0.287, "step": 55 }, { "epoch": 0.9824561403508771, "grad_norm": 1.1607296466827393, "learning_rate": 1.5032679738562093e-05, "loss": 0.2868, "step": 56 }, { "epoch": 1.0, "grad_norm": 1.5422321557998657, "learning_rate": 1.4901960784313726e-05, "loss": 0.293, "step": 57 }, { "epoch": 1.0175438596491229, "grad_norm": 1.5632988214492798, "learning_rate": 1.4771241830065362e-05, "loss": 0.2768, "step": 58 }, { "epoch": 1.0350877192982457, "grad_norm": 1.620436668395996, "learning_rate": 1.4640522875816994e-05, "loss": 0.2727, "step": 59 }, { "epoch": 1.0526315789473684, "grad_norm": 1.2848402261734009, "learning_rate": 1.4509803921568629e-05, "loss": 0.2659, "step": 60 } ], "logging_steps": 1, "max_steps": 171, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 12, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 0.0, "train_batch_size": 4096, "trial_name": null, "trial_params": null }