|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.24, |
|
"eval_steps": 500, |
|
"global_step": 450, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 496.2109546661377, |
|
"epoch": 0.0010666666666666667, |
|
"grad_norm": 0.11954631782793526, |
|
"kl": 0.0, |
|
"learning_rate": 7.142857142857142e-08, |
|
"loss": -0.0, |
|
"reward": 0.3255208423361182, |
|
"reward_std": 0.41301608085632324, |
|
"rewards/equation_reward_func": 0.0729166679084301, |
|
"rewards/format_reward_func": 0.2526041716337204, |
|
"step": 2 |
|
}, |
|
{ |
|
"completion_length": 502.083345413208, |
|
"epoch": 0.0021333333333333334, |
|
"grad_norm": 0.13612224468593256, |
|
"kl": 0.0004134178161621094, |
|
"learning_rate": 1.4285714285714285e-07, |
|
"loss": 0.0, |
|
"reward": 0.32552084419876337, |
|
"reward_std": 0.46909805946052074, |
|
"rewards/equation_reward_func": 0.07031250256113708, |
|
"rewards/format_reward_func": 0.2552083390764892, |
|
"step": 4 |
|
}, |
|
{ |
|
"completion_length": 478.82554054260254, |
|
"epoch": 0.0032, |
|
"grad_norm": 0.14255152767670767, |
|
"kl": 0.0003870725631713867, |
|
"learning_rate": 2.1428571428571426e-07, |
|
"loss": 0.0, |
|
"reward": 0.32291667349636555, |
|
"reward_std": 0.5039449613541365, |
|
"rewards/equation_reward_func": 0.06770833465270698, |
|
"rewards/format_reward_func": 0.25520834140479565, |
|
"step": 6 |
|
}, |
|
{ |
|
"completion_length": 488.739595413208, |
|
"epoch": 0.004266666666666667, |
|
"grad_norm": 0.13619929714048676, |
|
"kl": 0.00038945674896240234, |
|
"learning_rate": 2.857142857142857e-07, |
|
"loss": 0.0, |
|
"reward": 0.33333334047347307, |
|
"reward_std": 0.4558701850473881, |
|
"rewards/equation_reward_func": 0.054687500931322575, |
|
"rewards/format_reward_func": 0.2786458423361182, |
|
"step": 8 |
|
}, |
|
{ |
|
"completion_length": 489.1224060058594, |
|
"epoch": 0.005333333333333333, |
|
"grad_norm": 0.13326587315385535, |
|
"kl": 0.0003973245620727539, |
|
"learning_rate": 3.5714285714285716e-07, |
|
"loss": 0.0, |
|
"reward": 0.37760417629033327, |
|
"reward_std": 0.4923101980239153, |
|
"rewards/equation_reward_func": 0.03645833395421505, |
|
"rewards/format_reward_func": 0.3411458460614085, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 493.63543128967285, |
|
"epoch": 0.0064, |
|
"grad_norm": 0.14488433794501374, |
|
"kl": 0.0005027055740356445, |
|
"learning_rate": 4.285714285714285e-07, |
|
"loss": 0.0, |
|
"reward": 0.3333333423361182, |
|
"reward_std": 0.4829239808022976, |
|
"rewards/equation_reward_func": 0.0416666679084301, |
|
"rewards/format_reward_func": 0.2916666753590107, |
|
"step": 12 |
|
}, |
|
{ |
|
"completion_length": 495.4921989440918, |
|
"epoch": 0.007466666666666667, |
|
"grad_norm": 0.13465334279118427, |
|
"kl": 0.0008780956268310547, |
|
"learning_rate": 5e-07, |
|
"loss": 0.0, |
|
"reward": 0.4635416828095913, |
|
"reward_std": 0.533896965906024, |
|
"rewards/equation_reward_func": 0.05989583441987634, |
|
"rewards/format_reward_func": 0.4036458469927311, |
|
"step": 14 |
|
}, |
|
{ |
|
"completion_length": 451.22657203674316, |
|
"epoch": 0.008533333333333334, |
|
"grad_norm": 0.15101250146425993, |
|
"kl": 0.0014376640319824219, |
|
"learning_rate": 4.999740409224932e-07, |
|
"loss": 0.0, |
|
"reward": 0.5651041828095913, |
|
"reward_std": 0.5559330955147743, |
|
"rewards/equation_reward_func": 0.06510416767559946, |
|
"rewards/format_reward_func": 0.500000013038516, |
|
"step": 16 |
|
}, |
|
{ |
|
"completion_length": 465.8593864440918, |
|
"epoch": 0.0096, |
|
"grad_norm": 0.10999101009073732, |
|
"kl": 0.0033292770385742188, |
|
"learning_rate": 4.998961690809627e-07, |
|
"loss": 0.0, |
|
"reward": 0.661458345130086, |
|
"reward_std": 0.5498605705797672, |
|
"rewards/equation_reward_func": 0.07552083651535213, |
|
"rewards/format_reward_func": 0.585937513038516, |
|
"step": 18 |
|
}, |
|
{ |
|
"completion_length": 484.29168128967285, |
|
"epoch": 0.010666666666666666, |
|
"grad_norm": 0.10614223340608818, |
|
"kl": 0.005473136901855469, |
|
"learning_rate": 4.997664006472578e-07, |
|
"loss": 0.0, |
|
"reward": 0.7630208544433117, |
|
"reward_std": 0.4911875668913126, |
|
"rewards/equation_reward_func": 0.054687501629814506, |
|
"rewards/format_reward_func": 0.7083333507180214, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 459.4166851043701, |
|
"epoch": 0.011733333333333333, |
|
"grad_norm": 0.10241538861175935, |
|
"kl": 0.0071125030517578125, |
|
"learning_rate": 4.995847625707292e-07, |
|
"loss": 0.0, |
|
"reward": 0.8463541828095913, |
|
"reward_std": 0.4817237760871649, |
|
"rewards/equation_reward_func": 0.06770833488553762, |
|
"rewards/format_reward_func": 0.778645858168602, |
|
"step": 22 |
|
}, |
|
{ |
|
"completion_length": 460.716157913208, |
|
"epoch": 0.0128, |
|
"grad_norm": 0.08441145858520885, |
|
"kl": 0.0069980621337890625, |
|
"learning_rate": 4.993512925726318e-07, |
|
"loss": 0.0, |
|
"reward": 0.8906250298023224, |
|
"reward_std": 0.4445338100194931, |
|
"rewards/equation_reward_func": 0.07812500256113708, |
|
"rewards/format_reward_func": 0.8125000186264515, |
|
"step": 24 |
|
}, |
|
{ |
|
"completion_length": 483.37761878967285, |
|
"epoch": 0.013866666666666666, |
|
"grad_norm": 0.09439729466177939, |
|
"kl": 0.007844924926757812, |
|
"learning_rate": 4.990660391382923e-07, |
|
"loss": 0.0, |
|
"reward": 0.8984375149011612, |
|
"reward_std": 0.4334138287231326, |
|
"rewards/equation_reward_func": 0.06770833535119891, |
|
"rewards/format_reward_func": 0.8307291865348816, |
|
"step": 26 |
|
}, |
|
{ |
|
"completion_length": 487.11720085144043, |
|
"epoch": 0.014933333333333333, |
|
"grad_norm": 0.07829204100262112, |
|
"kl": 0.008859634399414062, |
|
"learning_rate": 4.987290615070384e-07, |
|
"loss": 0.0, |
|
"reward": 0.9244792051613331, |
|
"reward_std": 0.36052799224853516, |
|
"rewards/equation_reward_func": 0.07291666860692203, |
|
"rewards/format_reward_func": 0.8515625186264515, |
|
"step": 28 |
|
}, |
|
{ |
|
"completion_length": 485.40625953674316, |
|
"epoch": 0.016, |
|
"grad_norm": 0.07302798822426318, |
|
"kl": 0.0127105712890625, |
|
"learning_rate": 4.983404296598978e-07, |
|
"loss": 0.0, |
|
"reward": 0.9479167014360428, |
|
"reward_std": 0.33515751641243696, |
|
"rewards/equation_reward_func": 0.06510416837409139, |
|
"rewards/format_reward_func": 0.8828125111758709, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 450.005220413208, |
|
"epoch": 0.017066666666666667, |
|
"grad_norm": 0.09195588444312232, |
|
"kl": 0.013427734375, |
|
"learning_rate": 4.979002243050646e-07, |
|
"loss": 0.0, |
|
"reward": 1.0833333656191826, |
|
"reward_std": 0.39109824877232313, |
|
"rewards/equation_reward_func": 0.15104167233221233, |
|
"rewards/format_reward_func": 0.9322916828095913, |
|
"step": 32 |
|
}, |
|
{ |
|
"completion_length": 450.716157913208, |
|
"epoch": 0.018133333333333335, |
|
"grad_norm": 0.07847022887495707, |
|
"kl": 0.01331329345703125, |
|
"learning_rate": 4.974085368611381e-07, |
|
"loss": 0.0, |
|
"reward": 1.057291690260172, |
|
"reward_std": 0.32324595795944333, |
|
"rewards/equation_reward_func": 0.12239583651535213, |
|
"rewards/format_reward_func": 0.9348958544433117, |
|
"step": 34 |
|
}, |
|
{ |
|
"completion_length": 460.44792556762695, |
|
"epoch": 0.0192, |
|
"grad_norm": 0.07189632688173346, |
|
"kl": 0.015232086181640625, |
|
"learning_rate": 4.968654694381379e-07, |
|
"loss": 0.0, |
|
"reward": 1.0520833805203438, |
|
"reward_std": 0.3206865997053683, |
|
"rewards/equation_reward_func": 0.1145833374466747, |
|
"rewards/format_reward_func": 0.9375000149011612, |
|
"step": 36 |
|
}, |
|
{ |
|
"completion_length": 433.51563453674316, |
|
"epoch": 0.020266666666666665, |
|
"grad_norm": 0.08532850510429227, |
|
"kl": 0.01625823974609375, |
|
"learning_rate": 4.962711348162987e-07, |
|
"loss": 0.0, |
|
"reward": 1.0833333656191826, |
|
"reward_std": 0.32565778447315097, |
|
"rewards/equation_reward_func": 0.1328125037252903, |
|
"rewards/format_reward_func": 0.9505208507180214, |
|
"step": 38 |
|
}, |
|
{ |
|
"completion_length": 440.76042556762695, |
|
"epoch": 0.021333333333333333, |
|
"grad_norm": 0.0822050428393239, |
|
"kl": 0.016002655029296875, |
|
"learning_rate": 4.956256564226487e-07, |
|
"loss": 0.0, |
|
"reward": 1.0520833618938923, |
|
"reward_std": 0.3361439718864858, |
|
"rewards/equation_reward_func": 0.11979167070239782, |
|
"rewards/format_reward_func": 0.9322916977107525, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 416.8619899749756, |
|
"epoch": 0.0224, |
|
"grad_norm": 0.06432799392042395, |
|
"kl": 0.0177764892578125, |
|
"learning_rate": 4.949291683053768e-07, |
|
"loss": 0.0, |
|
"reward": 1.0729166939854622, |
|
"reward_std": 0.2632621508091688, |
|
"rewards/equation_reward_func": 0.1119791695382446, |
|
"rewards/format_reward_func": 0.9609375260770321, |
|
"step": 42 |
|
}, |
|
{ |
|
"completion_length": 407.489595413208, |
|
"epoch": 0.023466666666666667, |
|
"grad_norm": 0.0816513301308808, |
|
"kl": 0.019321441650390625, |
|
"learning_rate": 4.941818151059955e-07, |
|
"loss": 0.0, |
|
"reward": 1.0859375298023224, |
|
"reward_std": 0.2334962603636086, |
|
"rewards/equation_reward_func": 0.1067708374466747, |
|
"rewards/format_reward_func": 0.9791666865348816, |
|
"step": 44 |
|
}, |
|
{ |
|
"completion_length": 437.7265739440918, |
|
"epoch": 0.024533333333333334, |
|
"grad_norm": 0.07098712073151407, |
|
"kl": 0.020320892333984375, |
|
"learning_rate": 4.933837520293017e-07, |
|
"loss": 0.0, |
|
"reward": 1.0494791977107525, |
|
"reward_std": 0.24554855143651366, |
|
"rewards/equation_reward_func": 0.08593750093132257, |
|
"rewards/format_reward_func": 0.963541679084301, |
|
"step": 46 |
|
}, |
|
{ |
|
"completion_length": 409.68750953674316, |
|
"epoch": 0.0256, |
|
"grad_norm": 0.08052848471910203, |
|
"kl": 0.01833343505859375, |
|
"learning_rate": 4.925351448111454e-07, |
|
"loss": 0.0, |
|
"reward": 1.1171875335276127, |
|
"reward_std": 0.3026689598336816, |
|
"rewards/equation_reward_func": 0.14583333558402956, |
|
"rewards/format_reward_func": 0.971354179084301, |
|
"step": 48 |
|
}, |
|
{ |
|
"completion_length": 408.2890739440918, |
|
"epoch": 0.02666666666666667, |
|
"grad_norm": 0.06883175239258556, |
|
"kl": 0.0229949951171875, |
|
"learning_rate": 4.91636169684011e-07, |
|
"loss": 0.0, |
|
"reward": 1.0859375409781933, |
|
"reward_std": 0.2610642076469958, |
|
"rewards/equation_reward_func": 0.12500000349245965, |
|
"rewards/format_reward_func": 0.9609375149011612, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 406.5026149749756, |
|
"epoch": 0.027733333333333332, |
|
"grad_norm": 0.08432747124886808, |
|
"kl": 0.02112579345703125, |
|
"learning_rate": 4.906870133404186e-07, |
|
"loss": 0.0, |
|
"reward": 1.1276042088866234, |
|
"reward_std": 0.30215284787118435, |
|
"rewards/equation_reward_func": 0.1588541716337204, |
|
"rewards/format_reward_func": 0.9687500186264515, |
|
"step": 52 |
|
}, |
|
{ |
|
"completion_length": 414.50782203674316, |
|
"epoch": 0.0288, |
|
"grad_norm": 0.08259119762071515, |
|
"kl": 0.02127838134765625, |
|
"learning_rate": 4.896878728941531e-07, |
|
"loss": 0.0, |
|
"reward": 1.0755208618938923, |
|
"reward_std": 0.27385374438017607, |
|
"rewards/equation_reward_func": 0.1119791695382446, |
|
"rewards/format_reward_func": 0.9635416865348816, |
|
"step": 54 |
|
}, |
|
{ |
|
"completion_length": 403.9401149749756, |
|
"epoch": 0.029866666666666666, |
|
"grad_norm": 0.06375644666852129, |
|
"kl": 0.0231781005859375, |
|
"learning_rate": 4.886389558393284e-07, |
|
"loss": 0.0, |
|
"reward": 1.093750026077032, |
|
"reward_std": 0.23606119910255075, |
|
"rewards/equation_reward_func": 0.12239583674818277, |
|
"rewards/format_reward_func": 0.971354179084301, |
|
"step": 56 |
|
}, |
|
{ |
|
"completion_length": 399.2760524749756, |
|
"epoch": 0.030933333333333334, |
|
"grad_norm": 0.08378363145506801, |
|
"kl": 0.02516937255859375, |
|
"learning_rate": 4.875404800072976e-07, |
|
"loss": 0.0, |
|
"reward": 1.1015625298023224, |
|
"reward_std": 0.25781850097700953, |
|
"rewards/equation_reward_func": 0.1276041683740914, |
|
"rewards/format_reward_func": 0.9739583432674408, |
|
"step": 58 |
|
}, |
|
{ |
|
"completion_length": 385.61719703674316, |
|
"epoch": 0.032, |
|
"grad_norm": 0.07405086579557206, |
|
"kl": 0.02643585205078125, |
|
"learning_rate": 4.86392673521415e-07, |
|
"loss": 0.0, |
|
"reward": 1.1354167088866234, |
|
"reward_std": 0.2644475535489619, |
|
"rewards/equation_reward_func": 0.1562500053551048, |
|
"rewards/format_reward_func": 0.9791666753590107, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 388.5390682220459, |
|
"epoch": 0.03306666666666667, |
|
"grad_norm": 0.07936857349188958, |
|
"kl": 0.0322113037109375, |
|
"learning_rate": 4.851957747496606e-07, |
|
"loss": 0.0, |
|
"reward": 1.1093750409781933, |
|
"reward_std": 0.24481901014223695, |
|
"rewards/equation_reward_func": 0.13020833767950535, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 62 |
|
}, |
|
{ |
|
"completion_length": 374.6614685058594, |
|
"epoch": 0.034133333333333335, |
|
"grad_norm": 0.08002757038050164, |
|
"kl": 0.0284271240234375, |
|
"learning_rate": 4.839500322551386e-07, |
|
"loss": 0.0, |
|
"reward": 1.1302083693444729, |
|
"reward_std": 0.2239288128912449, |
|
"rewards/equation_reward_func": 0.14322917046956718, |
|
"rewards/format_reward_func": 0.9869791753590107, |
|
"step": 64 |
|
}, |
|
{ |
|
"completion_length": 374.56250953674316, |
|
"epoch": 0.0352, |
|
"grad_norm": 0.08882377481939042, |
|
"kl": 0.03044891357421875, |
|
"learning_rate": 4.826557047444563e-07, |
|
"loss": 0.0, |
|
"reward": 1.1250000298023224, |
|
"reward_std": 0.25083459448069334, |
|
"rewards/equation_reward_func": 0.14322917070239782, |
|
"rewards/format_reward_func": 0.9817708395421505, |
|
"step": 66 |
|
}, |
|
{ |
|
"completion_length": 360.7890748977661, |
|
"epoch": 0.03626666666666667, |
|
"grad_norm": 0.09104736623524047, |
|
"kl": 0.03575897216796875, |
|
"learning_rate": 4.813130610139993e-07, |
|
"loss": 0.0, |
|
"reward": 1.1588542014360428, |
|
"reward_std": 0.24613964278250933, |
|
"rewards/equation_reward_func": 0.16927083674818277, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 68 |
|
}, |
|
{ |
|
"completion_length": 363.380220413208, |
|
"epoch": 0.037333333333333336, |
|
"grad_norm": 0.08230130539588183, |
|
"kl": 0.03208160400390625, |
|
"learning_rate": 4.799223798941089e-07, |
|
"loss": 0.0, |
|
"reward": 1.1380208805203438, |
|
"reward_std": 0.26733159739524126, |
|
"rewards/equation_reward_func": 0.16406250488944352, |
|
"rewards/format_reward_func": 0.9739583507180214, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 344.23959255218506, |
|
"epoch": 0.0384, |
|
"grad_norm": 0.10598274417848667, |
|
"kl": 0.0363922119140625, |
|
"learning_rate": 4.78483950191177e-07, |
|
"loss": 0.0, |
|
"reward": 1.1901042014360428, |
|
"reward_std": 0.2702939258888364, |
|
"rewards/equation_reward_func": 0.19531250488944352, |
|
"rewards/format_reward_func": 0.9947916716337204, |
|
"step": 72 |
|
}, |
|
{ |
|
"completion_length": 355.94271659851074, |
|
"epoch": 0.039466666666666664, |
|
"grad_norm": 0.0868727954216351, |
|
"kl": 0.0372161865234375, |
|
"learning_rate": 4.769980706276687e-07, |
|
"loss": 0.0, |
|
"reward": 1.1276042088866234, |
|
"reward_std": 0.24663167539983988, |
|
"rewards/equation_reward_func": 0.14583334070630372, |
|
"rewards/format_reward_func": 0.9817708432674408, |
|
"step": 74 |
|
}, |
|
{ |
|
"completion_length": 377.73959732055664, |
|
"epoch": 0.04053333333333333, |
|
"grad_norm": 0.07917788675218197, |
|
"kl": 0.0357513427734375, |
|
"learning_rate": 4.7546504978008595e-07, |
|
"loss": 0.0, |
|
"reward": 1.1432292088866234, |
|
"reward_std": 0.24032680923119187, |
|
"rewards/equation_reward_func": 0.1536458374466747, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 76 |
|
}, |
|
{ |
|
"completion_length": 360.8307399749756, |
|
"epoch": 0.0416, |
|
"grad_norm": 0.09413557754270317, |
|
"kl": 0.038055419921875, |
|
"learning_rate": 4.738852060148848e-07, |
|
"loss": 0.0, |
|
"reward": 1.1796875521540642, |
|
"reward_std": 0.3384167607873678, |
|
"rewards/equation_reward_func": 0.20833333837799728, |
|
"rewards/format_reward_func": 0.971354179084301, |
|
"step": 78 |
|
}, |
|
{ |
|
"completion_length": 361.27084159851074, |
|
"epoch": 0.042666666666666665, |
|
"grad_norm": 0.09743025798738646, |
|
"kl": 0.04107666015625, |
|
"learning_rate": 4.722588674223593e-07, |
|
"loss": 0.0, |
|
"reward": 1.2343750447034836, |
|
"reward_std": 0.3094023009762168, |
|
"rewards/equation_reward_func": 0.24739584070630372, |
|
"rewards/format_reward_func": 0.986979179084301, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 368.3567810058594, |
|
"epoch": 0.04373333333333333, |
|
"grad_norm": 0.0716687709123018, |
|
"kl": 0.04241943359375, |
|
"learning_rate": 4.70586371748506e-07, |
|
"loss": 0.0, |
|
"reward": 1.119791705161333, |
|
"reward_std": 0.2431359770707786, |
|
"rewards/equation_reward_func": 0.13541667046956718, |
|
"rewards/format_reward_func": 0.9843750074505806, |
|
"step": 82 |
|
}, |
|
{ |
|
"completion_length": 373.70834159851074, |
|
"epoch": 0.0448, |
|
"grad_norm": 0.08456888547469173, |
|
"kl": 0.043853759765625, |
|
"learning_rate": 4.6886806632488363e-07, |
|
"loss": 0.0, |
|
"reward": 1.2239583805203438, |
|
"reward_std": 0.32931508449837565, |
|
"rewards/equation_reward_func": 0.2500000074505806, |
|
"rewards/format_reward_func": 0.9739583507180214, |
|
"step": 84 |
|
}, |
|
{ |
|
"completion_length": 363.72136306762695, |
|
"epoch": 0.04586666666666667, |
|
"grad_norm": 0.09097792518227367, |
|
"kl": 0.0495147705078125, |
|
"learning_rate": 4.6710430799648143e-07, |
|
"loss": 0.0, |
|
"reward": 1.2343750447034836, |
|
"reward_std": 0.3150203409604728, |
|
"rewards/equation_reward_func": 0.25260417466051877, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 86 |
|
}, |
|
{ |
|
"completion_length": 368.83594703674316, |
|
"epoch": 0.046933333333333334, |
|
"grad_norm": 0.08075857490244749, |
|
"kl": 0.04754638671875, |
|
"learning_rate": 4.652954630476127e-07, |
|
"loss": 0.0, |
|
"reward": 1.226562537252903, |
|
"reward_std": 0.33071469189599156, |
|
"rewards/equation_reward_func": 0.247395837912336, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 88 |
|
}, |
|
{ |
|
"completion_length": 383.08073806762695, |
|
"epoch": 0.048, |
|
"grad_norm": 0.07265214510146589, |
|
"kl": 0.0546112060546875, |
|
"learning_rate": 4.6344190712584713e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2057291977107525, |
|
"reward_std": 0.2948309718631208, |
|
"rewards/equation_reward_func": 0.231770841171965, |
|
"rewards/format_reward_func": 0.9739583432674408, |
|
"step": 90 |
|
}, |
|
{ |
|
"completion_length": 383.3177185058594, |
|
"epoch": 0.04906666666666667, |
|
"grad_norm": 0.1051600515841224, |
|
"kl": 0.052886962890625, |
|
"learning_rate": 4.615440251639995e-07, |
|
"loss": 0.0001, |
|
"reward": 1.229166716337204, |
|
"reward_std": 0.33057287614792585, |
|
"rewards/equation_reward_func": 0.24739584000781178, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 92 |
|
}, |
|
{ |
|
"completion_length": 404.4583396911621, |
|
"epoch": 0.050133333333333335, |
|
"grad_norm": 0.0785283851923137, |
|
"kl": 0.0536956787109375, |
|
"learning_rate": 4.596022113001894e-07, |
|
"loss": 0.0001, |
|
"reward": 1.1718750298023224, |
|
"reward_std": 0.2629168755374849, |
|
"rewards/equation_reward_func": 0.19531250256113708, |
|
"rewards/format_reward_func": 0.9765625149011612, |
|
"step": 94 |
|
}, |
|
{ |
|
"completion_length": 393.22136306762695, |
|
"epoch": 0.0512, |
|
"grad_norm": 0.08425171673805154, |
|
"kl": 0.06390380859375, |
|
"learning_rate": 4.576168687959895e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2109375447034836, |
|
"reward_std": 0.31305414671078324, |
|
"rewards/equation_reward_func": 0.23437500419095159, |
|
"rewards/format_reward_func": 0.9765625149011612, |
|
"step": 96 |
|
}, |
|
{ |
|
"completion_length": 380.39584827423096, |
|
"epoch": 0.05226666666666667, |
|
"grad_norm": 0.0719108143516897, |
|
"kl": 0.0635528564453125, |
|
"learning_rate": 4.555884099526793e-07, |
|
"loss": 0.0001, |
|
"reward": 1.255208358168602, |
|
"reward_std": 0.2745982948690653, |
|
"rewards/equation_reward_func": 0.26822917140088975, |
|
"rewards/format_reward_func": 0.9869791753590107, |
|
"step": 98 |
|
}, |
|
{ |
|
"completion_length": 382.8046989440918, |
|
"epoch": 0.05333333333333334, |
|
"grad_norm": 0.0824603010214514, |
|
"kl": 0.0673675537109375, |
|
"learning_rate": 4.5351725602562174e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2447917014360428, |
|
"reward_std": 0.28847640473395586, |
|
"rewards/equation_reward_func": 0.26822917349636555, |
|
"rewards/format_reward_func": 0.9765625111758709, |
|
"step": 100 |
|
}, |
|
{ |
|
"completion_length": 442.31771659851074, |
|
"epoch": 0.0544, |
|
"grad_norm": 0.07580652402214415, |
|
"kl": 0.063507080078125, |
|
"learning_rate": 4.514038371367791e-07, |
|
"loss": 0.0001, |
|
"reward": 1.213541705161333, |
|
"reward_std": 0.3326714150607586, |
|
"rewards/equation_reward_func": 0.25260417303070426, |
|
"rewards/format_reward_func": 0.9609375111758709, |
|
"step": 102 |
|
}, |
|
{ |
|
"completion_length": 366.58855056762695, |
|
"epoch": 0.055466666666666664, |
|
"grad_norm": 0.08608427800244613, |
|
"kl": 0.0719146728515625, |
|
"learning_rate": 4.4924859218538936e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2786458730697632, |
|
"reward_std": 0.3109776326455176, |
|
"rewards/equation_reward_func": 0.2994791774544865, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 104 |
|
}, |
|
{ |
|
"completion_length": 397.6562623977661, |
|
"epoch": 0.05653333333333333, |
|
"grad_norm": 0.08077416683670491, |
|
"kl": 0.0741424560546875, |
|
"learning_rate": 4.470519687568185e-07, |
|
"loss": 0.0001, |
|
"reward": 1.236979190260172, |
|
"reward_std": 0.306100333109498, |
|
"rewards/equation_reward_func": 0.26822917466051877, |
|
"rewards/format_reward_func": 0.9687500223517418, |
|
"step": 106 |
|
}, |
|
{ |
|
"completion_length": 392.052095413208, |
|
"epoch": 0.0576, |
|
"grad_norm": 0.11099671542231605, |
|
"kl": 0.072998046875, |
|
"learning_rate": 4.4481442302960923e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2760416939854622, |
|
"reward_std": 0.3399705649353564, |
|
"rewards/equation_reward_func": 0.2994791765231639, |
|
"rewards/format_reward_func": 0.9765625149011612, |
|
"step": 108 |
|
}, |
|
{ |
|
"completion_length": 370.3906316757202, |
|
"epoch": 0.058666666666666666, |
|
"grad_norm": 0.09213409066857482, |
|
"kl": 0.08636474609375, |
|
"learning_rate": 4.4253641968074505e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3437500298023224, |
|
"reward_std": 0.3902808427810669, |
|
"rewards/equation_reward_func": 0.38281250931322575, |
|
"rewards/format_reward_func": 0.9609375149011612, |
|
"step": 110 |
|
}, |
|
{ |
|
"completion_length": 420.138032913208, |
|
"epoch": 0.05973333333333333, |
|
"grad_norm": 0.07358301230035058, |
|
"kl": 0.081298828125, |
|
"learning_rate": 4.402184317891501e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2005208805203438, |
|
"reward_std": 0.28474497282877564, |
|
"rewards/equation_reward_func": 0.23437500977888703, |
|
"rewards/format_reward_func": 0.9661458544433117, |
|
"step": 112 |
|
}, |
|
{ |
|
"completion_length": 432.9974060058594, |
|
"epoch": 0.0608, |
|
"grad_norm": 0.069887196509218, |
|
"kl": 0.0760345458984375, |
|
"learning_rate": 4.37860940737443e-07, |
|
"loss": 0.0001, |
|
"reward": 1.1875000335276127, |
|
"reward_std": 0.2837864141911268, |
|
"rewards/equation_reward_func": 0.23697917396202683, |
|
"rewards/format_reward_func": 0.9505208507180214, |
|
"step": 114 |
|
}, |
|
{ |
|
"completion_length": 422.9088649749756, |
|
"epoch": 0.06186666666666667, |
|
"grad_norm": 0.09261482901163447, |
|
"kl": 0.081817626953125, |
|
"learning_rate": 4.354644361119671e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2031250298023224, |
|
"reward_std": 0.2831558440811932, |
|
"rewards/equation_reward_func": 0.25260417303070426, |
|
"rewards/format_reward_func": 0.9505208544433117, |
|
"step": 116 |
|
}, |
|
{ |
|
"completion_length": 396.2109489440918, |
|
"epoch": 0.06293333333333333, |
|
"grad_norm": 0.084517772171227, |
|
"kl": 0.0916595458984375, |
|
"learning_rate": 4.3302941560111716e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2864583618938923, |
|
"reward_std": 0.2684871470555663, |
|
"rewards/equation_reward_func": 0.31510417349636555, |
|
"rewards/format_reward_func": 0.9713541716337204, |
|
"step": 118 |
|
}, |
|
{ |
|
"completion_length": 406.57813358306885, |
|
"epoch": 0.064, |
|
"grad_norm": 0.09081750104097751, |
|
"kl": 0.087799072265625, |
|
"learning_rate": 4.3055638489198236e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2630208805203438, |
|
"reward_std": 0.3080101846717298, |
|
"rewards/equation_reward_func": 0.2890625100117177, |
|
"rewards/format_reward_func": 0.9739583507180214, |
|
"step": 120 |
|
}, |
|
{ |
|
"completion_length": 355.86459255218506, |
|
"epoch": 0.06506666666666666, |
|
"grad_norm": 0.08971890990350641, |
|
"kl": 0.099761962890625, |
|
"learning_rate": 4.280458575653296e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3619792014360428, |
|
"reward_std": 0.33915683487430215, |
|
"rewards/equation_reward_func": 0.38541667675599456, |
|
"rewards/format_reward_func": 0.9765625074505806, |
|
"step": 122 |
|
}, |
|
{ |
|
"completion_length": 390.567720413208, |
|
"epoch": 0.06613333333333334, |
|
"grad_norm": 0.07864318983071089, |
|
"kl": 0.09515380859375, |
|
"learning_rate": 4.2549835498894665e-07, |
|
"loss": 0.0001, |
|
"reward": 1.338541705161333, |
|
"reward_std": 0.29398106783628464, |
|
"rewards/equation_reward_func": 0.3697916779201478, |
|
"rewards/format_reward_func": 0.9687500186264515, |
|
"step": 124 |
|
}, |
|
{ |
|
"completion_length": 410.3906364440918, |
|
"epoch": 0.0672, |
|
"grad_norm": 0.08089125478754702, |
|
"kl": 0.12396240234375, |
|
"learning_rate": 4.229144062093679e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3229167014360428, |
|
"reward_std": 0.2598429606296122, |
|
"rewards/equation_reward_func": 0.3411458423361182, |
|
"rewards/format_reward_func": 0.9817708432674408, |
|
"step": 126 |
|
}, |
|
{ |
|
"completion_length": 403.84896659851074, |
|
"epoch": 0.06826666666666667, |
|
"grad_norm": 0.06893337797991321, |
|
"kl": 0.11248779296875, |
|
"learning_rate": 4.2029454784200675e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3437500447034836, |
|
"reward_std": 0.29728816309943795, |
|
"rewards/equation_reward_func": 0.37239584303461015, |
|
"rewards/format_reward_func": 0.9713541865348816, |
|
"step": 128 |
|
}, |
|
{ |
|
"completion_length": 424.90365409851074, |
|
"epoch": 0.06933333333333333, |
|
"grad_norm": 0.07036206011933169, |
|
"kl": 0.094207763671875, |
|
"learning_rate": 4.1763932395971433e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2317708767950535, |
|
"reward_std": 0.271359003148973, |
|
"rewards/equation_reward_func": 0.2656250079162419, |
|
"rewards/format_reward_func": 0.9661458432674408, |
|
"step": 130 |
|
}, |
|
{ |
|
"completion_length": 427.7578239440918, |
|
"epoch": 0.0704, |
|
"grad_norm": 0.08260904981702806, |
|
"kl": 0.097991943359375, |
|
"learning_rate": 4.1494928597979117e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3203125409781933, |
|
"reward_std": 0.35679714987054467, |
|
"rewards/equation_reward_func": 0.36718751303851604, |
|
"rewards/format_reward_func": 0.9531250111758709, |
|
"step": 132 |
|
}, |
|
{ |
|
"completion_length": 434.59896659851074, |
|
"epoch": 0.07146666666666666, |
|
"grad_norm": 0.07272128415662933, |
|
"kl": 0.099395751953125, |
|
"learning_rate": 4.122249925494726e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2864583656191826, |
|
"reward_std": 0.2933720201253891, |
|
"rewards/equation_reward_func": 0.333333341171965, |
|
"rewards/format_reward_func": 0.9531250223517418, |
|
"step": 134 |
|
}, |
|
{ |
|
"completion_length": 441.93230628967285, |
|
"epoch": 0.07253333333333334, |
|
"grad_norm": 0.05907301307740773, |
|
"kl": 0.1224365234375, |
|
"learning_rate": 4.094670094299131e-07, |
|
"loss": 0.0001, |
|
"reward": 1.299479205161333, |
|
"reward_std": 0.27806125255301595, |
|
"rewards/equation_reward_func": 0.3385416797827929, |
|
"rewards/format_reward_func": 0.9609375186264515, |
|
"step": 136 |
|
}, |
|
{ |
|
"completion_length": 423.520845413208, |
|
"epoch": 0.0736, |
|
"grad_norm": 0.07954765389278069, |
|
"kl": 0.099212646484375, |
|
"learning_rate": 4.066759093786931e-07, |
|
"loss": 0.0001, |
|
"reward": 1.296875037252903, |
|
"reward_std": 0.30517464876174927, |
|
"rewards/equation_reward_func": 0.3385416753590107, |
|
"rewards/format_reward_func": 0.9583333507180214, |
|
"step": 138 |
|
}, |
|
{ |
|
"completion_length": 440.27344512939453, |
|
"epoch": 0.07466666666666667, |
|
"grad_norm": 0.08211855704808459, |
|
"kl": 0.09521484375, |
|
"learning_rate": 4.038522720308732e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3125000335276127, |
|
"reward_std": 0.26301832078024745, |
|
"rewards/equation_reward_func": 0.35416667629033327, |
|
"rewards/format_reward_func": 0.9583333507180214, |
|
"step": 140 |
|
}, |
|
{ |
|
"completion_length": 432.4817810058594, |
|
"epoch": 0.07573333333333333, |
|
"grad_norm": 0.07820576762133163, |
|
"kl": 0.098785400390625, |
|
"learning_rate": 4.009966837786194e-07, |
|
"loss": 0.0001, |
|
"reward": 1.367187537252903, |
|
"reward_std": 0.3508401014842093, |
|
"rewards/equation_reward_func": 0.4088541781529784, |
|
"rewards/format_reward_func": 0.9583333544433117, |
|
"step": 142 |
|
}, |
|
{ |
|
"completion_length": 412.177095413208, |
|
"epoch": 0.0768, |
|
"grad_norm": 0.07018309285646165, |
|
"kl": 0.116241455078125, |
|
"learning_rate": 3.981097376494259e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4244792014360428, |
|
"reward_std": 0.27811811538413167, |
|
"rewards/equation_reward_func": 0.45572917629033327, |
|
"rewards/format_reward_func": 0.9687500260770321, |
|
"step": 144 |
|
}, |
|
{ |
|
"completion_length": 471.1041736602783, |
|
"epoch": 0.07786666666666667, |
|
"grad_norm": 0.07517814151054342, |
|
"kl": 0.100799560546875, |
|
"learning_rate": 3.951920331829592e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2526041977107525, |
|
"reward_std": 0.24458088353276253, |
|
"rewards/equation_reward_func": 0.3020833383779973, |
|
"rewards/format_reward_func": 0.9505208507180214, |
|
"step": 146 |
|
}, |
|
{ |
|
"completion_length": 445.95053482055664, |
|
"epoch": 0.07893333333333333, |
|
"grad_norm": 0.06913271066183226, |
|
"kl": 0.0982666015625, |
|
"learning_rate": 3.922441763065506e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3229166939854622, |
|
"reward_std": 0.2779015698470175, |
|
"rewards/equation_reward_func": 0.3619791720993817, |
|
"rewards/format_reward_func": 0.9609375186264515, |
|
"step": 148 |
|
}, |
|
{ |
|
"completion_length": 426.8359489440918, |
|
"epoch": 0.08, |
|
"grad_norm": 0.06291758584225125, |
|
"kl": 0.107147216796875, |
|
"learning_rate": 3.8926677920936093e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3671875447034836, |
|
"reward_std": 0.2879944946616888, |
|
"rewards/equation_reward_func": 0.40364584792405367, |
|
"rewards/format_reward_func": 0.963541679084301, |
|
"step": 150 |
|
}, |
|
{ |
|
"completion_length": 475.89323806762695, |
|
"epoch": 0.08106666666666666, |
|
"grad_norm": 0.07348149982487694, |
|
"kl": 0.097625732421875, |
|
"learning_rate": 3.862604602152464e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3229167014360428, |
|
"reward_std": 0.3060847264714539, |
|
"rewards/equation_reward_func": 0.3671875102445483, |
|
"rewards/format_reward_func": 0.955729179084301, |
|
"step": 152 |
|
}, |
|
{ |
|
"completion_length": 455.48959732055664, |
|
"epoch": 0.08213333333333334, |
|
"grad_norm": 0.08190829109797806, |
|
"kl": 0.10845947265625, |
|
"learning_rate": 3.8322584365434934e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3489583656191826, |
|
"reward_std": 0.2808792185969651, |
|
"rewards/equation_reward_func": 0.3828125107102096, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 154 |
|
}, |
|
{ |
|
"completion_length": 452.7239713668823, |
|
"epoch": 0.0832, |
|
"grad_norm": 0.07277189051173198, |
|
"kl": 0.101318359375, |
|
"learning_rate": 3.8016355973344173e-07, |
|
"loss": 0.0001, |
|
"reward": 1.346354205161333, |
|
"reward_std": 0.3634042153134942, |
|
"rewards/equation_reward_func": 0.40625000814907253, |
|
"rewards/format_reward_func": 0.9401041828095913, |
|
"step": 156 |
|
}, |
|
{ |
|
"completion_length": 407.30209255218506, |
|
"epoch": 0.08426666666666667, |
|
"grad_norm": 0.08347690411933845, |
|
"kl": 0.105987548828125, |
|
"learning_rate": 3.7707424440504863e-07, |
|
"loss": 0.0001, |
|
"reward": 1.468750037252903, |
|
"reward_std": 0.25727377785369754, |
|
"rewards/equation_reward_func": 0.49218750931322575, |
|
"rewards/format_reward_func": 0.9765625111758709, |
|
"step": 158 |
|
}, |
|
{ |
|
"completion_length": 471.96095085144043, |
|
"epoch": 0.08533333333333333, |
|
"grad_norm": 0.08122691638733275, |
|
"kl": 0.109375, |
|
"learning_rate": 3.739585392353787e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3151041865348816, |
|
"reward_std": 0.366784714628011, |
|
"rewards/equation_reward_func": 0.37239584187045693, |
|
"rewards/format_reward_func": 0.9427083507180214, |
|
"step": 160 |
|
}, |
|
{ |
|
"completion_length": 498.70574378967285, |
|
"epoch": 0.0864, |
|
"grad_norm": 0.06406400573830406, |
|
"kl": 0.09796142578125, |
|
"learning_rate": 3.7081709127108767e-07, |
|
"loss": 0.0001, |
|
"reward": 1.1979167014360428, |
|
"reward_std": 0.24064096482470632, |
|
"rewards/equation_reward_func": 0.24218750675208867, |
|
"rewards/format_reward_func": 0.955729179084301, |
|
"step": 162 |
|
}, |
|
{ |
|
"completion_length": 443.57813453674316, |
|
"epoch": 0.08746666666666666, |
|
"grad_norm": 0.08343605647479183, |
|
"kl": 0.11444091796875, |
|
"learning_rate": 3.6765055290490513e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3750000447034836, |
|
"reward_std": 0.2543401522561908, |
|
"rewards/equation_reward_func": 0.4114583474583924, |
|
"rewards/format_reward_func": 0.9635416865348816, |
|
"step": 164 |
|
}, |
|
{ |
|
"completion_length": 461.13282585144043, |
|
"epoch": 0.08853333333333334, |
|
"grad_norm": 0.07081891374265102, |
|
"kl": 0.1104736328125, |
|
"learning_rate": 3.644595817401501e-07, |
|
"loss": 0.0001, |
|
"reward": 1.372395858168602, |
|
"reward_std": 0.28344128327444196, |
|
"rewards/equation_reward_func": 0.39583334675990045, |
|
"rewards/format_reward_func": 0.9765625186264515, |
|
"step": 166 |
|
}, |
|
{ |
|
"completion_length": 406.8073034286499, |
|
"epoch": 0.0896, |
|
"grad_norm": 0.06876988951117273, |
|
"kl": 0.13555908203125, |
|
"learning_rate": 3.6124484045416483e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4661458805203438, |
|
"reward_std": 0.2285827100276947, |
|
"rewards/equation_reward_func": 0.4869791753590107, |
|
"rewards/format_reward_func": 0.9791666865348816, |
|
"step": 168 |
|
}, |
|
{ |
|
"completion_length": 441.6354293823242, |
|
"epoch": 0.09066666666666667, |
|
"grad_norm": 0.05847654146320862, |
|
"kl": 0.113525390625, |
|
"learning_rate": 3.580069966606949e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3489583805203438, |
|
"reward_std": 0.22638437105342746, |
|
"rewards/equation_reward_func": 0.39322918094694614, |
|
"rewards/format_reward_func": 0.9557291902601719, |
|
"step": 170 |
|
}, |
|
{ |
|
"completion_length": 396.231782913208, |
|
"epoch": 0.09173333333333333, |
|
"grad_norm": 0.08174583936359092, |
|
"kl": 0.13916015625, |
|
"learning_rate": 3.547467227712444e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4375000335276127, |
|
"reward_std": 0.24320211308076978, |
|
"rewards/equation_reward_func": 0.45833334140479565, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 172 |
|
}, |
|
{ |
|
"completion_length": 469.96876525878906, |
|
"epoch": 0.0928, |
|
"grad_norm": 0.07764419704527421, |
|
"kl": 0.115997314453125, |
|
"learning_rate": 3.5146469585543386e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2734375484287739, |
|
"reward_std": 0.2251833681948483, |
|
"rewards/equation_reward_func": 0.3098958428017795, |
|
"rewards/format_reward_func": 0.9635416865348816, |
|
"step": 174 |
|
}, |
|
{ |
|
"completion_length": 418.3151168823242, |
|
"epoch": 0.09386666666666667, |
|
"grad_norm": 0.07721346430307077, |
|
"kl": 0.12945556640625, |
|
"learning_rate": 3.481615975003922e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3671875447034836, |
|
"reward_std": 0.21094412356615067, |
|
"rewards/equation_reward_func": 0.3880208432674408, |
|
"rewards/format_reward_func": 0.979166679084301, |
|
"step": 176 |
|
}, |
|
{ |
|
"completion_length": 420.74740409851074, |
|
"epoch": 0.09493333333333333, |
|
"grad_norm": 0.08462924690149998, |
|
"kl": 0.13238525390625, |
|
"learning_rate": 3.448381136692089e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3489583805203438, |
|
"reward_std": 0.22936821915209293, |
|
"rewards/equation_reward_func": 0.3723958458285779, |
|
"rewards/format_reward_func": 0.9765625186264515, |
|
"step": 178 |
|
}, |
|
{ |
|
"completion_length": 394.8541784286499, |
|
"epoch": 0.096, |
|
"grad_norm": 0.06954554621938241, |
|
"kl": 0.137664794921875, |
|
"learning_rate": 3.4149493455847897e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4583333656191826, |
|
"reward_std": 0.22180799348279834, |
|
"rewards/equation_reward_func": 0.4817708448972553, |
|
"rewards/format_reward_func": 0.9765625186264515, |
|
"step": 180 |
|
}, |
|
{ |
|
"completion_length": 410.1119956970215, |
|
"epoch": 0.09706666666666666, |
|
"grad_norm": 0.08884342643437457, |
|
"kl": 0.12933349609375, |
|
"learning_rate": 3.3813275445496766e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3906250223517418, |
|
"reward_std": 0.2675261232070625, |
|
"rewards/equation_reward_func": 0.42187501094304025, |
|
"rewards/format_reward_func": 0.9687500186264515, |
|
"step": 182 |
|
}, |
|
{ |
|
"completion_length": 425.8515729904175, |
|
"epoch": 0.09813333333333334, |
|
"grad_norm": 0.0807749834414267, |
|
"kl": 0.12939453125, |
|
"learning_rate": 3.347522715914262e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3619792014360428, |
|
"reward_std": 0.260163695551455, |
|
"rewards/equation_reward_func": 0.3880208448972553, |
|
"rewards/format_reward_func": 0.9739583507180214, |
|
"step": 184 |
|
}, |
|
{ |
|
"completion_length": 414.5390739440918, |
|
"epoch": 0.0992, |
|
"grad_norm": 0.0995709776761296, |
|
"kl": 0.12603759765625, |
|
"learning_rate": 3.313541880015877e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3567708805203438, |
|
"reward_std": 0.281164659652859, |
|
"rewards/equation_reward_func": 0.37500001629814506, |
|
"rewards/format_reward_func": 0.9817708507180214, |
|
"step": 186 |
|
}, |
|
{ |
|
"completion_length": 434.77605628967285, |
|
"epoch": 0.10026666666666667, |
|
"grad_norm": 0.08274171398010909, |
|
"kl": 0.1339111328125, |
|
"learning_rate": 3.279392093743747e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3046875409781933, |
|
"reward_std": 0.2186322333291173, |
|
"rewards/equation_reward_func": 0.3333333448972553, |
|
"rewards/format_reward_func": 0.9713541865348816, |
|
"step": 188 |
|
}, |
|
{ |
|
"completion_length": 394.0599060058594, |
|
"epoch": 0.10133333333333333, |
|
"grad_norm": 0.09797007100506365, |
|
"kl": 0.16094970703125, |
|
"learning_rate": 3.245080449073459e-07, |
|
"loss": 0.0002, |
|
"reward": 1.377604216337204, |
|
"reward_std": 0.24230501474812627, |
|
"rewards/equation_reward_func": 0.39322918048128486, |
|
"rewards/format_reward_func": 0.9843750149011612, |
|
"step": 190 |
|
}, |
|
{ |
|
"completion_length": 360.8489685058594, |
|
"epoch": 0.1024, |
|
"grad_norm": 0.08415245014993132, |
|
"kl": 0.161041259765625, |
|
"learning_rate": 3.210614071594162e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4322917088866234, |
|
"reward_std": 0.1579188979230821, |
|
"rewards/equation_reward_func": 0.4505208469927311, |
|
"rewards/format_reward_func": 0.9817708507180214, |
|
"step": 192 |
|
}, |
|
{ |
|
"completion_length": 394.7786560058594, |
|
"epoch": 0.10346666666666667, |
|
"grad_norm": 0.09090919423012042, |
|
"kl": 0.15216064453125, |
|
"learning_rate": 3.1760001190287695e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3437500298023224, |
|
"reward_std": 0.19206074811518192, |
|
"rewards/equation_reward_func": 0.36979167675599456, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 194 |
|
}, |
|
{ |
|
"completion_length": 349.3645906448364, |
|
"epoch": 0.10453333333333334, |
|
"grad_norm": 0.06511093952525322, |
|
"kl": 0.163055419921875, |
|
"learning_rate": 3.141245779747502e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4505208730697632, |
|
"reward_std": 0.20295705134049058, |
|
"rewards/equation_reward_func": 0.4739583423361182, |
|
"rewards/format_reward_func": 0.9765625149011612, |
|
"step": 196 |
|
}, |
|
{ |
|
"completion_length": 347.56771659851074, |
|
"epoch": 0.1056, |
|
"grad_norm": 0.12249257016949373, |
|
"kl": 0.15631103515625, |
|
"learning_rate": 3.106358271275056e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4713542014360428, |
|
"reward_std": 0.2677521426230669, |
|
"rewards/equation_reward_func": 0.497395847691223, |
|
"rewards/format_reward_func": 0.9739583507180214, |
|
"step": 198 |
|
}, |
|
{ |
|
"completion_length": 353.79949378967285, |
|
"epoch": 0.10666666666666667, |
|
"grad_norm": 0.11642634796564656, |
|
"kl": 0.169464111328125, |
|
"learning_rate": 3.0713448387917227e-07, |
|
"loss": 0.0002, |
|
"reward": 1.424479205161333, |
|
"reward_std": 0.24417872913181782, |
|
"rewards/equation_reward_func": 0.455729179084301, |
|
"rewards/format_reward_func": 0.9687500111758709, |
|
"step": 200 |
|
}, |
|
{ |
|
"completion_length": 353.62240409851074, |
|
"epoch": 0.10773333333333333, |
|
"grad_norm": 0.10966758903292496, |
|
"kl": 0.17657470703125, |
|
"learning_rate": 3.0362127536287636e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3880208805203438, |
|
"reward_std": 0.22897254303097725, |
|
"rewards/equation_reward_func": 0.41145834466442466, |
|
"rewards/format_reward_func": 0.9765625074505806, |
|
"step": 202 |
|
}, |
|
{ |
|
"completion_length": 352.95313358306885, |
|
"epoch": 0.1088, |
|
"grad_norm": 0.08287713096000299, |
|
"kl": 0.174285888671875, |
|
"learning_rate": 3.0009693117583523e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4453125298023224, |
|
"reward_std": 0.23830332793295383, |
|
"rewards/equation_reward_func": 0.4687500107102096, |
|
"rewards/format_reward_func": 0.9765625111758709, |
|
"step": 204 |
|
}, |
|
{ |
|
"completion_length": 342.9895896911621, |
|
"epoch": 0.10986666666666667, |
|
"grad_norm": 0.11889984004997693, |
|
"kl": 0.2039794921875, |
|
"learning_rate": 2.965621832278401e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4661458879709244, |
|
"reward_std": 0.26223280746489763, |
|
"rewards/equation_reward_func": 0.4921875160653144, |
|
"rewards/format_reward_func": 0.9739583544433117, |
|
"step": 206 |
|
}, |
|
{ |
|
"completion_length": 371.7474021911621, |
|
"epoch": 0.11093333333333333, |
|
"grad_norm": 0.08722890145319173, |
|
"kl": 0.167755126953125, |
|
"learning_rate": 2.9301776558925875e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3437500409781933, |
|
"reward_std": 0.17667264631018043, |
|
"rewards/equation_reward_func": 0.380208341171965, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 208 |
|
}, |
|
{ |
|
"completion_length": 371.341157913208, |
|
"epoch": 0.112, |
|
"grad_norm": 0.11118990694564043, |
|
"kl": 0.1689453125, |
|
"learning_rate": 2.894644143385885e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3880208730697632, |
|
"reward_std": 0.2672252501361072, |
|
"rewards/equation_reward_func": 0.41145834187045693, |
|
"rewards/format_reward_func": 0.9765625111758709, |
|
"step": 210 |
|
}, |
|
{ |
|
"completion_length": 337.9843807220459, |
|
"epoch": 0.11306666666666666, |
|
"grad_norm": 0.0739582977873293, |
|
"kl": 0.1724853515625, |
|
"learning_rate": 2.859028674095937e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4583333805203438, |
|
"reward_std": 0.1776336650364101, |
|
"rewards/equation_reward_func": 0.4739583544433117, |
|
"rewards/format_reward_func": 0.9843750149011612, |
|
"step": 212 |
|
}, |
|
{ |
|
"completion_length": 339.21876335144043, |
|
"epoch": 0.11413333333333334, |
|
"grad_norm": 0.09143345815857122, |
|
"kl": 0.17596435546875, |
|
"learning_rate": 2.823338644380566e-07, |
|
"loss": 0.0002, |
|
"reward": 1.492187537252903, |
|
"reward_std": 0.22370707942172885, |
|
"rewards/equation_reward_func": 0.5130208460614085, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 214 |
|
}, |
|
{ |
|
"completion_length": 368.21876430511475, |
|
"epoch": 0.1152, |
|
"grad_norm": 0.09155592641067002, |
|
"kl": 0.19281005859375, |
|
"learning_rate": 2.7875814660817504e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3906250447034836, |
|
"reward_std": 0.22471351828426123, |
|
"rewards/equation_reward_func": 0.41666668211109936, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 216 |
|
}, |
|
{ |
|
"completion_length": 366.52084732055664, |
|
"epoch": 0.11626666666666667, |
|
"grad_norm": 0.08581764717856366, |
|
"kl": 0.17510986328125, |
|
"learning_rate": 2.751764564986396e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4010416939854622, |
|
"reward_std": 0.2523862696252763, |
|
"rewards/equation_reward_func": 0.4375000102445483, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 218 |
|
}, |
|
{ |
|
"completion_length": 382.44011211395264, |
|
"epoch": 0.11733333333333333, |
|
"grad_norm": 0.08641987214494386, |
|
"kl": 0.16357421875, |
|
"learning_rate": 2.715895379284194e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3463542014360428, |
|
"reward_std": 0.20577920693904161, |
|
"rewards/equation_reward_func": 0.3776041753590107, |
|
"rewards/format_reward_func": 0.9687500149011612, |
|
"step": 220 |
|
}, |
|
{ |
|
"completion_length": 346.23698902130127, |
|
"epoch": 0.1184, |
|
"grad_norm": 0.11469342843068384, |
|
"kl": 0.187591552734375, |
|
"learning_rate": 2.6799813580229174e-07, |
|
"loss": 0.0002, |
|
"reward": 1.414062537252903, |
|
"reward_std": 0.22339888894930482, |
|
"rewards/equation_reward_func": 0.4479166741948575, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 222 |
|
}, |
|
{ |
|
"completion_length": 343.7395906448364, |
|
"epoch": 0.11946666666666667, |
|
"grad_norm": 0.10710582787702641, |
|
"kl": 0.17193603515625, |
|
"learning_rate": 2.6440299595614606e-07, |
|
"loss": 0.0002, |
|
"reward": 1.460937537252903, |
|
"reward_std": 0.25045704562216997, |
|
"rewards/equation_reward_func": 0.4921875111758709, |
|
"rewards/format_reward_func": 0.9687500149011612, |
|
"step": 224 |
|
}, |
|
{ |
|
"completion_length": 349.78907203674316, |
|
"epoch": 0.12053333333333334, |
|
"grad_norm": 0.0754366633012716, |
|
"kl": 0.180908203125, |
|
"learning_rate": 2.6080486500209347e-07, |
|
"loss": 0.0002, |
|
"reward": 1.421875037252903, |
|
"reward_std": 0.24725741427391768, |
|
"rewards/equation_reward_func": 0.46354167629033327, |
|
"rewards/format_reward_func": 0.9583333507180214, |
|
"step": 226 |
|
}, |
|
{ |
|
"completion_length": 336.4166774749756, |
|
"epoch": 0.1216, |
|
"grad_norm": 0.10961018063074526, |
|
"kl": 0.1932373046875, |
|
"learning_rate": 2.572044901734166e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4609375521540642, |
|
"reward_std": 0.24446540977805853, |
|
"rewards/equation_reward_func": 0.5130208514165133, |
|
"rewards/format_reward_func": 0.9479166828095913, |
|
"step": 228 |
|
}, |
|
{ |
|
"completion_length": 362.3151149749756, |
|
"epoch": 0.12266666666666666, |
|
"grad_norm": 0.0750432050842772, |
|
"kl": 0.1806640625, |
|
"learning_rate": 2.536026191693893e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3437500484287739, |
|
"reward_std": 0.228640450630337, |
|
"rewards/equation_reward_func": 0.38281250977888703, |
|
"rewards/format_reward_func": 0.9609375149011612, |
|
"step": 230 |
|
}, |
|
{ |
|
"completion_length": 346.28646659851074, |
|
"epoch": 0.12373333333333333, |
|
"grad_norm": 0.0866455945339987, |
|
"kl": 0.19073486328125, |
|
"learning_rate": 2.5e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4166667014360428, |
|
"reward_std": 0.25847903126850724, |
|
"rewards/equation_reward_func": 0.46875001350417733, |
|
"rewards/format_reward_func": 0.9479166865348816, |
|
"step": 232 |
|
}, |
|
{ |
|
"completion_length": 387.15625953674316, |
|
"epoch": 0.1248, |
|
"grad_norm": 0.08835289541018657, |
|
"kl": 0.18060302734375, |
|
"learning_rate": 2.4639738083061073e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3307292126119137, |
|
"reward_std": 0.2538083433173597, |
|
"rewards/equation_reward_func": 0.3854166828095913, |
|
"rewards/format_reward_func": 0.9453125186264515, |
|
"step": 234 |
|
}, |
|
{ |
|
"completion_length": 371.65625953674316, |
|
"epoch": 0.12586666666666665, |
|
"grad_norm": 0.09102684058299942, |
|
"kl": 0.18902587890625, |
|
"learning_rate": 2.4279550982658345e-07, |
|
"loss": 0.0002, |
|
"reward": 1.369791705161333, |
|
"reward_std": 0.21475306199863553, |
|
"rewards/equation_reward_func": 0.4244791795499623, |
|
"rewards/format_reward_func": 0.9453125186264515, |
|
"step": 236 |
|
}, |
|
{ |
|
"completion_length": 341.49740505218506, |
|
"epoch": 0.12693333333333334, |
|
"grad_norm": 0.08633591554429869, |
|
"kl": 0.21234130859375, |
|
"learning_rate": 2.3919513499790646e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4531250447034836, |
|
"reward_std": 0.24704138981178403, |
|
"rewards/equation_reward_func": 0.5000000195577741, |
|
"rewards/format_reward_func": 0.9531250149011612, |
|
"step": 238 |
|
}, |
|
{ |
|
"completion_length": 405.6484489440918, |
|
"epoch": 0.128, |
|
"grad_norm": 0.10763242906498482, |
|
"kl": 0.2197265625, |
|
"learning_rate": 2.3559700404385394e-07, |
|
"loss": 0.0002, |
|
"reward": 1.2890625335276127, |
|
"reward_std": 0.31284062657505274, |
|
"rewards/equation_reward_func": 0.3671875048894435, |
|
"rewards/format_reward_func": 0.9218750223517418, |
|
"step": 240 |
|
}, |
|
{ |
|
"completion_length": 395.661470413208, |
|
"epoch": 0.12906666666666666, |
|
"grad_norm": 0.08038592157957923, |
|
"kl": 0.2103271484375, |
|
"learning_rate": 2.3200186419770823e-07, |
|
"loss": 0.0002, |
|
"reward": 1.252604205161333, |
|
"reward_std": 0.2701655197888613, |
|
"rewards/equation_reward_func": 0.317708337912336, |
|
"rewards/format_reward_func": 0.934895858168602, |
|
"step": 242 |
|
}, |
|
{ |
|
"completion_length": 314.59636306762695, |
|
"epoch": 0.13013333333333332, |
|
"grad_norm": 0.09192187591314832, |
|
"kl": 0.22662353515625, |
|
"learning_rate": 2.284104620715807e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4947917088866234, |
|
"reward_std": 0.3372342623770237, |
|
"rewards/equation_reward_func": 0.5572916788514704, |
|
"rewards/format_reward_func": 0.9375000111758709, |
|
"step": 244 |
|
}, |
|
{ |
|
"completion_length": 329.73698806762695, |
|
"epoch": 0.1312, |
|
"grad_norm": 0.10838571395133541, |
|
"kl": 0.24334716796875, |
|
"learning_rate": 2.2482354350136043e-07, |
|
"loss": 0.0002, |
|
"reward": 1.455729216337204, |
|
"reward_std": 0.2566018928773701, |
|
"rewards/equation_reward_func": 0.5104166800156236, |
|
"rewards/format_reward_func": 0.9453125149011612, |
|
"step": 246 |
|
}, |
|
{ |
|
"completion_length": 340.03907108306885, |
|
"epoch": 0.13226666666666667, |
|
"grad_norm": 0.07566512386282394, |
|
"kl": 0.2659912109375, |
|
"learning_rate": 2.2124185339182496e-07, |
|
"loss": 0.0003, |
|
"reward": 1.377604205161333, |
|
"reward_std": 0.2088687182404101, |
|
"rewards/equation_reward_func": 0.4401041811797768, |
|
"rewards/format_reward_func": 0.9375000149011612, |
|
"step": 248 |
|
}, |
|
{ |
|
"completion_length": 391.1614685058594, |
|
"epoch": 0.13333333333333333, |
|
"grad_norm": 0.08542314001357776, |
|
"kl": 0.21875, |
|
"learning_rate": 2.1766613556194344e-07, |
|
"loss": 0.0002, |
|
"reward": 1.2083333767950535, |
|
"reward_std": 0.24805923039093614, |
|
"rewards/equation_reward_func": 0.27864584047347307, |
|
"rewards/format_reward_func": 0.9296875111758709, |
|
"step": 250 |
|
}, |
|
{ |
|
"completion_length": 338.18751430511475, |
|
"epoch": 0.1344, |
|
"grad_norm": 0.08749740784410187, |
|
"kl": 0.24298095703125, |
|
"learning_rate": 2.1409713259040628e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4062500149011612, |
|
"reward_std": 0.27646134374663234, |
|
"rewards/equation_reward_func": 0.4557291779201478, |
|
"rewards/format_reward_func": 0.9505208507180214, |
|
"step": 252 |
|
}, |
|
{ |
|
"completion_length": 350.28386306762695, |
|
"epoch": 0.13546666666666668, |
|
"grad_norm": 0.10711337890017471, |
|
"kl": 0.223388671875, |
|
"learning_rate": 2.105355856614115e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3151042014360428, |
|
"reward_std": 0.25063526490703225, |
|
"rewards/equation_reward_func": 0.38020834513008595, |
|
"rewards/format_reward_func": 0.9348958544433117, |
|
"step": 254 |
|
}, |
|
{ |
|
"completion_length": 339.2656364440918, |
|
"epoch": 0.13653333333333334, |
|
"grad_norm": 0.09589698286961762, |
|
"kl": 0.21533203125, |
|
"learning_rate": 2.069822344107413e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4036458656191826, |
|
"reward_std": 0.26786494068801403, |
|
"rewards/equation_reward_func": 0.4453125095460564, |
|
"rewards/format_reward_func": 0.9583333544433117, |
|
"step": 256 |
|
}, |
|
{ |
|
"completion_length": 319.2291774749756, |
|
"epoch": 0.1376, |
|
"grad_norm": 0.07780726214703354, |
|
"kl": 0.2373046875, |
|
"learning_rate": 2.034378167721599e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4817708618938923, |
|
"reward_std": 0.20678498363122344, |
|
"rewards/equation_reward_func": 0.5234375149011612, |
|
"rewards/format_reward_func": 0.9583333507180214, |
|
"step": 258 |
|
}, |
|
{ |
|
"completion_length": 346.63803005218506, |
|
"epoch": 0.13866666666666666, |
|
"grad_norm": 0.11641174762994905, |
|
"kl": 0.22607421875, |
|
"learning_rate": 1.9990306882416485e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3255208656191826, |
|
"reward_std": 0.24585285549983382, |
|
"rewards/equation_reward_func": 0.3828125111758709, |
|
"rewards/format_reward_func": 0.9427083469927311, |
|
"step": 260 |
|
}, |
|
{ |
|
"completion_length": 337.49740409851074, |
|
"epoch": 0.13973333333333332, |
|
"grad_norm": 0.15077694381297332, |
|
"kl": 0.2508544921875, |
|
"learning_rate": 1.9637872463712362e-07, |
|
"loss": 0.0003, |
|
"reward": 1.4114583656191826, |
|
"reward_std": 0.3120162379927933, |
|
"rewards/equation_reward_func": 0.46614584838971496, |
|
"rewards/format_reward_func": 0.9453125186264515, |
|
"step": 262 |
|
}, |
|
{ |
|
"completion_length": 350.4427185058594, |
|
"epoch": 0.1408, |
|
"grad_norm": 0.09105767195307653, |
|
"kl": 0.2108154296875, |
|
"learning_rate": 1.9286551612082773e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3541667088866234, |
|
"reward_std": 0.26035504834726453, |
|
"rewards/equation_reward_func": 0.4036458460614085, |
|
"rewards/format_reward_func": 0.9505208507180214, |
|
"step": 264 |
|
}, |
|
{ |
|
"completion_length": 354.94532108306885, |
|
"epoch": 0.14186666666666667, |
|
"grad_norm": 0.11114474779974037, |
|
"kl": 0.2325439453125, |
|
"learning_rate": 1.8936417287249446e-07, |
|
"loss": 0.0002, |
|
"reward": 1.390625037252903, |
|
"reward_std": 0.24211309384554625, |
|
"rewards/equation_reward_func": 0.4296875118743628, |
|
"rewards/format_reward_func": 0.9609375260770321, |
|
"step": 266 |
|
}, |
|
{ |
|
"completion_length": 332.6849031448364, |
|
"epoch": 0.14293333333333333, |
|
"grad_norm": 0.1123031021076321, |
|
"kl": 0.229736328125, |
|
"learning_rate": 1.8587542202524985e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3906250409781933, |
|
"reward_std": 0.2365088271908462, |
|
"rewards/equation_reward_func": 0.4348958439659327, |
|
"rewards/format_reward_func": 0.9557291828095913, |
|
"step": 268 |
|
}, |
|
{ |
|
"completion_length": 345.1770944595337, |
|
"epoch": 0.144, |
|
"grad_norm": 0.11158112162119024, |
|
"kl": 0.21636962890625, |
|
"learning_rate": 1.82399988097123e-07, |
|
"loss": 0.0002, |
|
"reward": 1.361979190260172, |
|
"reward_std": 0.3079025028273463, |
|
"rewards/equation_reward_func": 0.43750000395812094, |
|
"rewards/format_reward_func": 0.9244791865348816, |
|
"step": 270 |
|
}, |
|
{ |
|
"completion_length": 323.8698034286499, |
|
"epoch": 0.14506666666666668, |
|
"grad_norm": 0.09689920100803472, |
|
"kl": 0.2325439453125, |
|
"learning_rate": 1.7893859284058378e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4296875447034836, |
|
"reward_std": 0.18631823640316725, |
|
"rewards/equation_reward_func": 0.46875001303851604, |
|
"rewards/format_reward_func": 0.9609375186264515, |
|
"step": 272 |
|
}, |
|
{ |
|
"completion_length": 368.67188262939453, |
|
"epoch": 0.14613333333333334, |
|
"grad_norm": 0.13615031599545702, |
|
"kl": 0.20782470703125, |
|
"learning_rate": 1.7549195509265407e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3203125409781933, |
|
"reward_std": 0.24673572788015008, |
|
"rewards/equation_reward_func": 0.3593750079162419, |
|
"rewards/format_reward_func": 0.9609375186264515, |
|
"step": 274 |
|
}, |
|
{ |
|
"completion_length": 388.29948806762695, |
|
"epoch": 0.1472, |
|
"grad_norm": 0.09073524977124178, |
|
"kl": 0.20733642578125, |
|
"learning_rate": 1.7206079062562536e-07, |
|
"loss": 0.0002, |
|
"reward": 1.2708333767950535, |
|
"reward_std": 0.21955121494829655, |
|
"rewards/equation_reward_func": 0.3281250111758709, |
|
"rewards/format_reward_func": 0.9427083507180214, |
|
"step": 276 |
|
}, |
|
{ |
|
"completion_length": 345.38542652130127, |
|
"epoch": 0.14826666666666666, |
|
"grad_norm": 5220.06372637961, |
|
"kl": 7072.191650390625, |
|
"learning_rate": 1.6864581199841226e-07, |
|
"loss": 7.0509, |
|
"reward": 1.3932292088866234, |
|
"reward_std": 0.25684024626389146, |
|
"rewards/equation_reward_func": 0.44791668001562357, |
|
"rewards/format_reward_func": 0.9453125149011612, |
|
"step": 278 |
|
}, |
|
{ |
|
"completion_length": 319.3724060058594, |
|
"epoch": 0.14933333333333335, |
|
"grad_norm": 79.14680202266041, |
|
"kl": 49.96136474609375, |
|
"learning_rate": 1.6524772840857388e-07, |
|
"loss": 0.0497, |
|
"reward": 1.4244792014360428, |
|
"reward_std": 0.22301306808367372, |
|
"rewards/equation_reward_func": 0.46875001303851604, |
|
"rewards/format_reward_func": 0.9557291902601719, |
|
"step": 280 |
|
}, |
|
{ |
|
"completion_length": 327.8385486602783, |
|
"epoch": 0.1504, |
|
"grad_norm": 0.08745470520702019, |
|
"kl": 0.24444580078125, |
|
"learning_rate": 1.6186724554503237e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4192708693444729, |
|
"reward_std": 0.22730597108602524, |
|
"rewards/equation_reward_func": 0.45833334093913436, |
|
"rewards/format_reward_func": 0.9609375186264515, |
|
"step": 282 |
|
}, |
|
{ |
|
"completion_length": 321.70053005218506, |
|
"epoch": 0.15146666666666667, |
|
"grad_norm": 0.09168203339141828, |
|
"kl": 0.20928955078125, |
|
"learning_rate": 1.5850506544152103e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4739583656191826, |
|
"reward_std": 0.19128448516130447, |
|
"rewards/equation_reward_func": 0.5052083432674408, |
|
"rewards/format_reward_func": 0.9687500149011612, |
|
"step": 284 |
|
}, |
|
{ |
|
"completion_length": 322.1458406448364, |
|
"epoch": 0.15253333333333333, |
|
"grad_norm": 0.12387430706161645, |
|
"kl": 0.21209716796875, |
|
"learning_rate": 1.5516188633079107e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4322917088866234, |
|
"reward_std": 0.2435653922148049, |
|
"rewards/equation_reward_func": 0.4817708421032876, |
|
"rewards/format_reward_func": 0.9505208507180214, |
|
"step": 286 |
|
}, |
|
{ |
|
"completion_length": 335.86459159851074, |
|
"epoch": 0.1536, |
|
"grad_norm": 0.11042480316797151, |
|
"kl": 0.2279052734375, |
|
"learning_rate": 1.5183840249960784e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4062500409781933, |
|
"reward_std": 0.2505862903781235, |
|
"rewards/equation_reward_func": 0.4791666779201478, |
|
"rewards/format_reward_func": 0.9270833507180214, |
|
"step": 288 |
|
}, |
|
{ |
|
"completion_length": 332.4531373977661, |
|
"epoch": 0.15466666666666667, |
|
"grad_norm": 0.11068445556167467, |
|
"kl": 0.226318359375, |
|
"learning_rate": 1.4853530414456612e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3932291939854622, |
|
"reward_std": 0.2191076222807169, |
|
"rewards/equation_reward_func": 0.44791667349636555, |
|
"rewards/format_reward_func": 0.9453125149011612, |
|
"step": 290 |
|
}, |
|
{ |
|
"completion_length": 346.98959159851074, |
|
"epoch": 0.15573333333333333, |
|
"grad_norm": 0.10537288030437299, |
|
"kl": 0.2379150390625, |
|
"learning_rate": 1.4525327722875568e-07, |
|
"loss": 0.0002, |
|
"reward": 1.367187537252903, |
|
"reward_std": 0.25823584850877523, |
|
"rewards/equation_reward_func": 0.4270833458285779, |
|
"rewards/format_reward_func": 0.9401041828095913, |
|
"step": 292 |
|
}, |
|
{ |
|
"completion_length": 325.7448043823242, |
|
"epoch": 0.1568, |
|
"grad_norm": 0.10313889262076706, |
|
"kl": 0.21075439453125, |
|
"learning_rate": 1.4199300333930515e-07, |
|
"loss": 0.0002, |
|
"reward": 1.390625037252903, |
|
"reward_std": 0.24558233935385942, |
|
"rewards/equation_reward_func": 0.4505208465270698, |
|
"rewards/format_reward_func": 0.9401041865348816, |
|
"step": 294 |
|
}, |
|
{ |
|
"completion_length": 324.1849069595337, |
|
"epoch": 0.15786666666666666, |
|
"grad_norm": 0.10081124744539481, |
|
"kl": 0.23394775390625, |
|
"learning_rate": 1.3875515954583523e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3984375447034836, |
|
"reward_std": 0.24683487368747592, |
|
"rewards/equation_reward_func": 0.4505208465270698, |
|
"rewards/format_reward_func": 0.9479166828095913, |
|
"step": 296 |
|
}, |
|
{ |
|
"completion_length": 321.01303005218506, |
|
"epoch": 0.15893333333333334, |
|
"grad_norm": 0.08166984314299437, |
|
"kl": 0.21270751953125, |
|
"learning_rate": 1.3554041825985e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4427083656191826, |
|
"reward_std": 0.15349334990605712, |
|
"rewards/equation_reward_func": 0.48177084513008595, |
|
"rewards/format_reward_func": 0.9609375223517418, |
|
"step": 298 |
|
}, |
|
{ |
|
"completion_length": 326.8463659286499, |
|
"epoch": 0.16, |
|
"grad_norm": 0.07831370889731273, |
|
"kl": 0.23455810546875, |
|
"learning_rate": 1.323494470950949e-07, |
|
"loss": 0.0002, |
|
"reward": 1.351562537252903, |
|
"reward_std": 0.22833695402368903, |
|
"rewards/equation_reward_func": 0.4036458421032876, |
|
"rewards/format_reward_func": 0.9479166865348816, |
|
"step": 300 |
|
}, |
|
{ |
|
"completion_length": 344.11719512939453, |
|
"epoch": 0.16106666666666666, |
|
"grad_norm": 0.08232548207891384, |
|
"kl": 0.22637939453125, |
|
"learning_rate": 1.2918290872891236e-07, |
|
"loss": 0.0002, |
|
"reward": 1.377604205161333, |
|
"reward_std": 0.2291308455169201, |
|
"rewards/equation_reward_func": 0.42968751303851604, |
|
"rewards/format_reward_func": 0.9479166865348816, |
|
"step": 302 |
|
}, |
|
{ |
|
"completion_length": 326.5494899749756, |
|
"epoch": 0.16213333333333332, |
|
"grad_norm": 0.10750065685022393, |
|
"kl": 0.21270751953125, |
|
"learning_rate": 1.260414607646213e-07, |
|
"loss": 0.0002, |
|
"reward": 1.408854205161333, |
|
"reward_std": 0.24541507568210363, |
|
"rewards/equation_reward_func": 0.45572917931713164, |
|
"rewards/format_reward_func": 0.9531250149011612, |
|
"step": 304 |
|
}, |
|
{ |
|
"completion_length": 310.8593854904175, |
|
"epoch": 0.1632, |
|
"grad_norm": 0.0890480361990402, |
|
"kl": 0.2191162109375, |
|
"learning_rate": 1.2292575559495143e-07, |
|
"loss": 0.0002, |
|
"reward": 1.455729205161333, |
|
"reward_std": 0.18851999705657363, |
|
"rewards/equation_reward_func": 0.5104166793171316, |
|
"rewards/format_reward_func": 0.9453125223517418, |
|
"step": 306 |
|
}, |
|
{ |
|
"completion_length": 336.3177185058594, |
|
"epoch": 0.16426666666666667, |
|
"grad_norm": 0.12539643357527022, |
|
"kl": 0.22491455078125, |
|
"learning_rate": 1.1983644026655835e-07, |
|
"loss": 0.0002, |
|
"reward": 1.398437526077032, |
|
"reward_std": 0.24129127291962504, |
|
"rewards/equation_reward_func": 0.43489584792405367, |
|
"rewards/format_reward_func": 0.963541679084301, |
|
"step": 308 |
|
}, |
|
{ |
|
"completion_length": 363.4453248977661, |
|
"epoch": 0.16533333333333333, |
|
"grad_norm": 0.10147317845374816, |
|
"kl": 0.22625732421875, |
|
"learning_rate": 1.1677415634565066e-07, |
|
"loss": 0.0002, |
|
"reward": 1.2890625298023224, |
|
"reward_std": 0.2331953472457826, |
|
"rewards/equation_reward_func": 0.3359375095460564, |
|
"rewards/format_reward_func": 0.9531250186264515, |
|
"step": 310 |
|
}, |
|
{ |
|
"completion_length": 334.07552909851074, |
|
"epoch": 0.1664, |
|
"grad_norm": 0.07071630516660898, |
|
"kl": 0.21331787109375, |
|
"learning_rate": 1.1373953978475353e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3776042088866234, |
|
"reward_std": 0.2111625811085105, |
|
"rewards/equation_reward_func": 0.42708334140479565, |
|
"rewards/format_reward_func": 0.9505208469927311, |
|
"step": 312 |
|
}, |
|
{ |
|
"completion_length": 345.9349031448364, |
|
"epoch": 0.16746666666666668, |
|
"grad_norm": 0.09304504389856937, |
|
"kl": 0.22332763671875, |
|
"learning_rate": 1.1073322079063913e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3802083693444729, |
|
"reward_std": 0.24213288770988584, |
|
"rewards/equation_reward_func": 0.4453125111758709, |
|
"rewards/format_reward_func": 0.9348958507180214, |
|
"step": 314 |
|
}, |
|
{ |
|
"completion_length": 343.2890691757202, |
|
"epoch": 0.16853333333333334, |
|
"grad_norm": 0.1097380319531818, |
|
"kl": 0.22332763671875, |
|
"learning_rate": 1.0775582369344946e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3098958730697632, |
|
"reward_std": 0.29304072819650173, |
|
"rewards/equation_reward_func": 0.3776041774544865, |
|
"rewards/format_reward_func": 0.9322916828095913, |
|
"step": 316 |
|
}, |
|
{ |
|
"completion_length": 294.8020906448364, |
|
"epoch": 0.1696, |
|
"grad_norm": 1.43305351784789, |
|
"kl": 3.03558349609375, |
|
"learning_rate": 1.0480796681704077e-07, |
|
"loss": 0.003, |
|
"reward": 1.5338542014360428, |
|
"reward_std": 0.18905250960960984, |
|
"rewards/equation_reward_func": 0.5651041741948575, |
|
"rewards/format_reward_func": 0.9687500186264515, |
|
"step": 318 |
|
}, |
|
{ |
|
"completion_length": 301.0208396911621, |
|
"epoch": 0.17066666666666666, |
|
"grad_norm": 0.09915602747223792, |
|
"kl": 0.23931884765625, |
|
"learning_rate": 1.018902623505741e-07, |
|
"loss": 0.0002, |
|
"reward": 1.5182292088866234, |
|
"reward_std": 0.19097376568242908, |
|
"rewards/equation_reward_func": 0.5625000158324838, |
|
"rewards/format_reward_func": 0.9557291902601719, |
|
"step": 320 |
|
}, |
|
{ |
|
"completion_length": 320.85417461395264, |
|
"epoch": 0.17173333333333332, |
|
"grad_norm": 0.09117919942079071, |
|
"kl": 0.214111328125, |
|
"learning_rate": 9.900331622138063e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4140625521540642, |
|
"reward_std": 0.2838053968735039, |
|
"rewards/equation_reward_func": 0.460937516996637, |
|
"rewards/format_reward_func": 0.9531250223517418, |
|
"step": 322 |
|
}, |
|
{ |
|
"completion_length": 311.2395896911621, |
|
"epoch": 0.1728, |
|
"grad_norm": 0.0815049784953199, |
|
"kl": 0.22808837890625, |
|
"learning_rate": 9.614772796912681e-08, |
|
"loss": 0.0002, |
|
"reward": 1.453125037252903, |
|
"reward_std": 0.2417630972340703, |
|
"rewards/equation_reward_func": 0.5182291767559946, |
|
"rewards/format_reward_func": 0.9348958469927311, |
|
"step": 324 |
|
}, |
|
{ |
|
"completion_length": 333.16146755218506, |
|
"epoch": 0.17386666666666667, |
|
"grad_norm": 0.10328198816513103, |
|
"kl": 0.22216796875, |
|
"learning_rate": 9.332409062130686e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3906250335276127, |
|
"reward_std": 0.237897964194417, |
|
"rewards/equation_reward_func": 0.43750001583248377, |
|
"rewards/format_reward_func": 0.9531250149011612, |
|
"step": 326 |
|
}, |
|
{ |
|
"completion_length": 310.9401111602783, |
|
"epoch": 0.17493333333333333, |
|
"grad_norm": 0.11902359297904073, |
|
"kl": 0.2431640625, |
|
"learning_rate": 9.053299057008699e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4453125409781933, |
|
"reward_std": 0.26797239342704415, |
|
"rewards/equation_reward_func": 0.5000000176951289, |
|
"rewards/format_reward_func": 0.9453125111758709, |
|
"step": 328 |
|
}, |
|
{ |
|
"completion_length": 290.20573902130127, |
|
"epoch": 0.176, |
|
"grad_norm": 0.0870223579021212, |
|
"kl": 0.244873046875, |
|
"learning_rate": 8.777500745052743e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5052083805203438, |
|
"reward_std": 0.21289193397387862, |
|
"rewards/equation_reward_func": 0.5468750149011612, |
|
"rewards/format_reward_func": 0.9583333469927311, |
|
"step": 330 |
|
}, |
|
{ |
|
"completion_length": 317.96094512939453, |
|
"epoch": 0.17706666666666668, |
|
"grad_norm": 0.09923025666525324, |
|
"kl": 0.24334716796875, |
|
"learning_rate": 8.505071402020892e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3723958656191826, |
|
"reward_std": 0.19928161799907684, |
|
"rewards/equation_reward_func": 0.42187501303851604, |
|
"rewards/format_reward_func": 0.9505208507180214, |
|
"step": 332 |
|
}, |
|
{ |
|
"completion_length": 282.44271659851074, |
|
"epoch": 0.17813333333333334, |
|
"grad_norm": 0.11232051976601577, |
|
"kl": 0.25335693359375, |
|
"learning_rate": 8.236067604028562e-08, |
|
"loss": 0.0003, |
|
"reward": 1.5130208656191826, |
|
"reward_std": 0.24858005391433835, |
|
"rewards/equation_reward_func": 0.5546875162981451, |
|
"rewards/format_reward_func": 0.9583333544433117, |
|
"step": 334 |
|
}, |
|
{ |
|
"completion_length": 318.4244842529297, |
|
"epoch": 0.1792, |
|
"grad_norm": 0.1083015800637841, |
|
"kl": 0.213134765625, |
|
"learning_rate": 7.970545215799327e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4739583730697632, |
|
"reward_std": 0.2625010758638382, |
|
"rewards/equation_reward_func": 0.531250013038516, |
|
"rewards/format_reward_func": 0.9427083507180214, |
|
"step": 336 |
|
}, |
|
{ |
|
"completion_length": 340.153657913208, |
|
"epoch": 0.18026666666666666, |
|
"grad_norm": 0.09142278500214475, |
|
"kl": 0.23583984375, |
|
"learning_rate": 7.708559379063204e-08, |
|
"loss": 0.0002, |
|
"reward": 1.406250037252903, |
|
"reward_std": 0.22931140661239624, |
|
"rewards/equation_reward_func": 0.4453125102445483, |
|
"rewards/format_reward_func": 0.9609375111758709, |
|
"step": 338 |
|
}, |
|
{ |
|
"completion_length": 316.268235206604, |
|
"epoch": 0.18133333333333335, |
|
"grad_norm": 0.08608717054817108, |
|
"kl": 0.2203369140625, |
|
"learning_rate": 7.45016450110534e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3828125335276127, |
|
"reward_std": 0.1976271690800786, |
|
"rewards/equation_reward_func": 0.43229167512618005, |
|
"rewards/format_reward_func": 0.950520858168602, |
|
"step": 340 |
|
}, |
|
{ |
|
"completion_length": 332.22657108306885, |
|
"epoch": 0.1824, |
|
"grad_norm": 0.08626389120583622, |
|
"kl": 0.22198486328125, |
|
"learning_rate": 7.195414243467029e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3906250409781933, |
|
"reward_std": 0.20977461012080312, |
|
"rewards/equation_reward_func": 0.45833334675990045, |
|
"rewards/format_reward_func": 0.9322916865348816, |
|
"step": 342 |
|
}, |
|
{ |
|
"completion_length": 321.02605152130127, |
|
"epoch": 0.18346666666666667, |
|
"grad_norm": 0.11346266373826759, |
|
"kl": 0.210693359375, |
|
"learning_rate": 6.944361510801763e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4661458730697632, |
|
"reward_std": 0.2054482251405716, |
|
"rewards/equation_reward_func": 0.4973958432674408, |
|
"rewards/format_reward_func": 0.9687500149011612, |
|
"step": 344 |
|
}, |
|
{ |
|
"completion_length": 298.59636211395264, |
|
"epoch": 0.18453333333333333, |
|
"grad_norm": 0.1067949932858023, |
|
"kl": 0.29071044921875, |
|
"learning_rate": 6.697058439888283e-08, |
|
"loss": 0.0003, |
|
"reward": 1.390625037252903, |
|
"reward_std": 0.24251853488385677, |
|
"rewards/equation_reward_func": 0.4531250107102096, |
|
"rewards/format_reward_func": 0.9375000223517418, |
|
"step": 346 |
|
}, |
|
{ |
|
"completion_length": 323.62761211395264, |
|
"epoch": 0.1856, |
|
"grad_norm": 0.10153580772849999, |
|
"kl": 0.20916748046875, |
|
"learning_rate": 6.453556388803288e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3697917088866234, |
|
"reward_std": 0.20808159746229649, |
|
"rewards/equation_reward_func": 0.42447917722165585, |
|
"rewards/format_reward_func": 0.9453125223517418, |
|
"step": 348 |
|
}, |
|
{ |
|
"completion_length": 309.18751096725464, |
|
"epoch": 0.18666666666666668, |
|
"grad_norm": 0.08506346440717698, |
|
"kl": 0.2176513671875, |
|
"learning_rate": 6.213905926255697e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3958333693444729, |
|
"reward_std": 0.16924588894471526, |
|
"rewards/equation_reward_func": 0.43229168001562357, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 350 |
|
}, |
|
{ |
|
"completion_length": 276.46875858306885, |
|
"epoch": 0.18773333333333334, |
|
"grad_norm": 0.07380858754669188, |
|
"kl": 0.23443603515625, |
|
"learning_rate": 5.978156821084987e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5572916865348816, |
|
"reward_std": 0.1799696683883667, |
|
"rewards/equation_reward_func": 0.5781250111758709, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 352 |
|
}, |
|
{ |
|
"completion_length": 317.0286521911621, |
|
"epoch": 0.1888, |
|
"grad_norm": 0.08221686789004773, |
|
"kl": 0.221435546875, |
|
"learning_rate": 5.7463580319254853e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4348958693444729, |
|
"reward_std": 0.18001093342900276, |
|
"rewards/equation_reward_func": 0.4713541779201478, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 354 |
|
}, |
|
{ |
|
"completion_length": 341.17709255218506, |
|
"epoch": 0.18986666666666666, |
|
"grad_norm": 0.1006850259811907, |
|
"kl": 0.21112060546875, |
|
"learning_rate": 5.518557697039081e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3802083730697632, |
|
"reward_std": 0.24440749734640121, |
|
"rewards/equation_reward_func": 0.4322916748933494, |
|
"rewards/format_reward_func": 0.9479166865348816, |
|
"step": 356 |
|
}, |
|
{ |
|
"completion_length": 282.05209159851074, |
|
"epoch": 0.19093333333333334, |
|
"grad_norm": 0.0784480555352209, |
|
"kl": 0.23577880859375, |
|
"learning_rate": 5.294803124318145e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5859375223517418, |
|
"reward_std": 0.19743689289316535, |
|
"rewards/equation_reward_func": 0.6302083439659327, |
|
"rewards/format_reward_func": 0.9557291828095913, |
|
"step": 358 |
|
}, |
|
{ |
|
"completion_length": 312.36459255218506, |
|
"epoch": 0.192, |
|
"grad_norm": 0.09968321821935404, |
|
"kl": 0.2149658203125, |
|
"learning_rate": 5.07514078146106e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3828125409781933, |
|
"reward_std": 0.23581918003037572, |
|
"rewards/equation_reward_func": 0.42708334885537624, |
|
"rewards/format_reward_func": 0.9557291828095913, |
|
"step": 360 |
|
}, |
|
{ |
|
"completion_length": 310.42968940734863, |
|
"epoch": 0.19306666666666666, |
|
"grad_norm": 0.08047116966625914, |
|
"kl": 0.2113037109375, |
|
"learning_rate": 4.859616286322094e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5208333730697632, |
|
"reward_std": 0.18892288114875555, |
|
"rewards/equation_reward_func": 0.5625000111758709, |
|
"rewards/format_reward_func": 0.9583333469927311, |
|
"step": 362 |
|
}, |
|
{ |
|
"completion_length": 334.89063262939453, |
|
"epoch": 0.19413333333333332, |
|
"grad_norm": 0.11871865226917534, |
|
"kl": 0.22442626953125, |
|
"learning_rate": 4.648274397437829e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3359375186264515, |
|
"reward_std": 0.24623532965779305, |
|
"rewards/equation_reward_func": 0.3984375074505806, |
|
"rewards/format_reward_func": 0.9375000149011612, |
|
"step": 364 |
|
}, |
|
{ |
|
"completion_length": 321.46875953674316, |
|
"epoch": 0.1952, |
|
"grad_norm": 0.08169405576123293, |
|
"kl": 0.21612548828125, |
|
"learning_rate": 4.4411590047320617e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4192708805203438, |
|
"reward_std": 0.1580515131354332, |
|
"rewards/equation_reward_func": 0.45833334466442466, |
|
"rewards/format_reward_func": 0.9609375223517418, |
|
"step": 366 |
|
}, |
|
{ |
|
"completion_length": 268.88021659851074, |
|
"epoch": 0.19626666666666667, |
|
"grad_norm": 0.09213898057952202, |
|
"kl": 0.2569580078125, |
|
"learning_rate": 4.2383131204010494e-08, |
|
"loss": 0.0003, |
|
"reward": 1.5286458656191826, |
|
"reward_std": 0.19515367224812508, |
|
"rewards/equation_reward_func": 0.5677083488553762, |
|
"rewards/format_reward_func": 0.9609375186264515, |
|
"step": 368 |
|
}, |
|
{ |
|
"completion_length": 308.2838611602783, |
|
"epoch": 0.19733333333333333, |
|
"grad_norm": 0.102066920867919, |
|
"kl": 0.2474365234375, |
|
"learning_rate": 4.039778869981064e-08, |
|
"loss": 0.0002, |
|
"reward": 1.424479205161333, |
|
"reward_std": 0.16935323411598802, |
|
"rewards/equation_reward_func": 0.45312500931322575, |
|
"rewards/format_reward_func": 0.971354179084301, |
|
"step": 370 |
|
}, |
|
{ |
|
"completion_length": 328.89323806762695, |
|
"epoch": 0.1984, |
|
"grad_norm": 0.10475529028007942, |
|
"kl": 0.218505859375, |
|
"learning_rate": 3.845597483600049e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3697917014360428, |
|
"reward_std": 0.21701573207974434, |
|
"rewards/equation_reward_func": 0.42708334466442466, |
|
"rewards/format_reward_func": 0.9427083544433117, |
|
"step": 372 |
|
}, |
|
{ |
|
"completion_length": 336.2552156448364, |
|
"epoch": 0.19946666666666665, |
|
"grad_norm": 0.0856518942239391, |
|
"kl": 0.2081298828125, |
|
"learning_rate": 3.655809287415284e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3567708916962147, |
|
"reward_std": 0.1945798760280013, |
|
"rewards/equation_reward_func": 0.40364584513008595, |
|
"rewards/format_reward_func": 0.9531250260770321, |
|
"step": 374 |
|
}, |
|
{ |
|
"completion_length": 341.63542652130127, |
|
"epoch": 0.20053333333333334, |
|
"grad_norm": 0.1246882559841069, |
|
"kl": 0.21630859375, |
|
"learning_rate": 3.4704536952387285e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3333333656191826, |
|
"reward_std": 0.23673815419897437, |
|
"rewards/equation_reward_func": 0.3723958423361182, |
|
"rewards/format_reward_func": 0.9609375186264515, |
|
"step": 376 |
|
}, |
|
{ |
|
"completion_length": 306.4270896911621, |
|
"epoch": 0.2016, |
|
"grad_norm": 0.1209625715448826, |
|
"kl": 0.23431396484375, |
|
"learning_rate": 3.2895692003518575e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3958333805203438, |
|
"reward_std": 0.22822495363652706, |
|
"rewards/equation_reward_func": 0.44531251303851604, |
|
"rewards/format_reward_func": 0.9505208544433117, |
|
"step": 378 |
|
}, |
|
{ |
|
"completion_length": 309.70313453674316, |
|
"epoch": 0.20266666666666666, |
|
"grad_norm": 0.12006215018778782, |
|
"kl": 0.2227783203125, |
|
"learning_rate": 3.113193367511635e-08, |
|
"loss": 0.0002, |
|
"reward": 1.468750037252903, |
|
"reward_std": 0.23679165728390217, |
|
"rewards/equation_reward_func": 0.5208333502523601, |
|
"rewards/format_reward_func": 0.9479166828095913, |
|
"step": 380 |
|
}, |
|
{ |
|
"completion_length": 317.74740409851074, |
|
"epoch": 0.20373333333333332, |
|
"grad_norm": 0.10748128632252552, |
|
"kl": 0.21893310546875, |
|
"learning_rate": 2.9413628251493934e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4244792088866234, |
|
"reward_std": 0.197633049916476, |
|
"rewards/equation_reward_func": 0.458333347691223, |
|
"rewards/format_reward_func": 0.9661458544433117, |
|
"step": 382 |
|
}, |
|
{ |
|
"completion_length": 300.8619918823242, |
|
"epoch": 0.2048, |
|
"grad_norm": 0.09907271619590914, |
|
"kl": 0.31658935546875, |
|
"learning_rate": 2.774113257764066e-08, |
|
"loss": 0.0003, |
|
"reward": 1.486979216337204, |
|
"reward_std": 0.262174597941339, |
|
"rewards/equation_reward_func": 0.536458345130086, |
|
"rewards/format_reward_func": 0.950520858168602, |
|
"step": 384 |
|
}, |
|
{ |
|
"completion_length": 326.85417461395264, |
|
"epoch": 0.20586666666666667, |
|
"grad_norm": 0.07810355072932347, |
|
"kl": 0.22857666015625, |
|
"learning_rate": 2.611479398511518e-08, |
|
"loss": 0.0002, |
|
"reward": 1.322916705161333, |
|
"reward_std": 0.17400957271456718, |
|
"rewards/equation_reward_func": 0.3750000100117177, |
|
"rewards/format_reward_func": 0.9479166865348816, |
|
"step": 386 |
|
}, |
|
{ |
|
"completion_length": 299.80730056762695, |
|
"epoch": 0.20693333333333333, |
|
"grad_norm": 0.10017158729015543, |
|
"kl": 0.22186279296875, |
|
"learning_rate": 2.4534950219914057e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4869792088866234, |
|
"reward_std": 0.23186511639505625, |
|
"rewards/equation_reward_func": 0.5286458488553762, |
|
"rewards/format_reward_func": 0.958333358168602, |
|
"step": 388 |
|
}, |
|
{ |
|
"completion_length": 320.9270896911621, |
|
"epoch": 0.208, |
|
"grad_norm": 0.1099683165292792, |
|
"kl": 0.21923828125, |
|
"learning_rate": 2.300192937233128e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4114583618938923, |
|
"reward_std": 0.22744439914822578, |
|
"rewards/equation_reward_func": 0.460937513737008, |
|
"rewards/format_reward_func": 0.9505208544433117, |
|
"step": 390 |
|
}, |
|
{ |
|
"completion_length": 325.35417652130127, |
|
"epoch": 0.20906666666666668, |
|
"grad_norm": 0.0949023163219697, |
|
"kl": 0.2340087890625, |
|
"learning_rate": 2.1516049808822935e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3906250484287739, |
|
"reward_std": 0.22641806630417705, |
|
"rewards/equation_reward_func": 0.44270834838971496, |
|
"rewards/format_reward_func": 0.9479166828095913, |
|
"step": 392 |
|
}, |
|
{ |
|
"completion_length": 297.90365505218506, |
|
"epoch": 0.21013333333333334, |
|
"grad_norm": 0.1317731635826343, |
|
"kl": 0.22271728515625, |
|
"learning_rate": 2.007762010589098e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5130208693444729, |
|
"reward_std": 0.25085952738299966, |
|
"rewards/equation_reward_func": 0.5520833525806665, |
|
"rewards/format_reward_func": 0.9609375186264515, |
|
"step": 394 |
|
}, |
|
{ |
|
"completion_length": 308.4817838668823, |
|
"epoch": 0.2112, |
|
"grad_norm": 0.18333770809820993, |
|
"kl": 0.3961181640625, |
|
"learning_rate": 1.8686938986000627e-08, |
|
"loss": 0.0004, |
|
"reward": 1.377604205161333, |
|
"reward_std": 0.16871334984898567, |
|
"rewards/equation_reward_func": 0.416666679084301, |
|
"rewards/format_reward_func": 0.9609375186264515, |
|
"step": 396 |
|
}, |
|
{ |
|
"completion_length": 323.61198902130127, |
|
"epoch": 0.21226666666666666, |
|
"grad_norm": 0.10822818670117657, |
|
"kl": 0.22650146484375, |
|
"learning_rate": 1.734429525554365e-08, |
|
"loss": 0.0002, |
|
"reward": 1.328125037252903, |
|
"reward_std": 0.22037217440083623, |
|
"rewards/equation_reward_func": 0.3828125118743628, |
|
"rewards/format_reward_func": 0.9453125223517418, |
|
"step": 398 |
|
}, |
|
{ |
|
"completion_length": 310.49480152130127, |
|
"epoch": 0.21333333333333335, |
|
"grad_norm": 0.08677594432490206, |
|
"kl": 0.2261962890625, |
|
"learning_rate": 1.604996774486145e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3958333730697632, |
|
"reward_std": 0.16300127794966102, |
|
"rewards/equation_reward_func": 0.44270834466442466, |
|
"rewards/format_reward_func": 0.9531250186264515, |
|
"step": 400 |
|
}, |
|
{ |
|
"completion_length": 315.83594608306885, |
|
"epoch": 0.2144, |
|
"grad_norm": 0.09329093411722503, |
|
"kl": 0.21697998046875, |
|
"learning_rate": 1.4804225250339281e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4583333656191826, |
|
"reward_std": 0.2248752093873918, |
|
"rewards/equation_reward_func": 0.4947916781529784, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 402 |
|
}, |
|
{ |
|
"completion_length": 305.27605056762695, |
|
"epoch": 0.21546666666666667, |
|
"grad_norm": 0.09309626955656056, |
|
"kl": 0.22406005859375, |
|
"learning_rate": 1.360732647858498e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4114583805203438, |
|
"reward_std": 0.25712150149047375, |
|
"rewards/equation_reward_func": 0.46093751303851604, |
|
"rewards/format_reward_func": 0.9505208507180214, |
|
"step": 404 |
|
}, |
|
{ |
|
"completion_length": 300.04948902130127, |
|
"epoch": 0.21653333333333333, |
|
"grad_norm": 0.06996801120635364, |
|
"kl": 0.22509765625, |
|
"learning_rate": 1.2459519992702311e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4218750409781933, |
|
"reward_std": 0.15984355006366968, |
|
"rewards/equation_reward_func": 0.4583333460614085, |
|
"rewards/format_reward_func": 0.9635416753590107, |
|
"step": 406 |
|
}, |
|
{ |
|
"completion_length": 353.1041774749756, |
|
"epoch": 0.2176, |
|
"grad_norm": 0.08583468471450732, |
|
"kl": 0.231201171875, |
|
"learning_rate": 1.1361044160671629e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3619791977107525, |
|
"reward_std": 0.21500447858124971, |
|
"rewards/equation_reward_func": 0.41145834792405367, |
|
"rewards/format_reward_func": 0.9505208544433117, |
|
"step": 408 |
|
}, |
|
{ |
|
"completion_length": 315.5390739440918, |
|
"epoch": 0.21866666666666668, |
|
"grad_norm": 0.10411756903686771, |
|
"kl": 0.20782470703125, |
|
"learning_rate": 1.0312127105846947e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3854167088866234, |
|
"reward_std": 0.1989023443311453, |
|
"rewards/equation_reward_func": 0.41666667722165585, |
|
"rewards/format_reward_func": 0.9687500149011612, |
|
"step": 410 |
|
}, |
|
{ |
|
"completion_length": 289.3567810058594, |
|
"epoch": 0.21973333333333334, |
|
"grad_norm": 0.08432337279172568, |
|
"kl": 0.2266845703125, |
|
"learning_rate": 9.312986659581301e-09, |
|
"loss": 0.0002, |
|
"reward": 1.5078125409781933, |
|
"reward_std": 0.16473140195012093, |
|
"rewards/equation_reward_func": 0.5286458446644247, |
|
"rewards/format_reward_func": 0.979166679084301, |
|
"step": 412 |
|
}, |
|
{ |
|
"completion_length": 328.8099031448364, |
|
"epoch": 0.2208, |
|
"grad_norm": 0.08818627291080708, |
|
"kl": 0.21533203125, |
|
"learning_rate": 8.363830315988945e-09, |
|
"loss": 0.0002, |
|
"reward": 1.403645858168602, |
|
"reward_std": 0.20907884137704968, |
|
"rewards/equation_reward_func": 0.4531250139698386, |
|
"rewards/format_reward_func": 0.9505208507180214, |
|
"step": 414 |
|
}, |
|
{ |
|
"completion_length": 294.4974031448364, |
|
"epoch": 0.22186666666666666, |
|
"grad_norm": 0.09531400720095902, |
|
"kl": 0.2193603515625, |
|
"learning_rate": 7.46485518885462e-09, |
|
"loss": 0.0002, |
|
"reward": 1.442708384245634, |
|
"reward_std": 0.18354794522747397, |
|
"rewards/equation_reward_func": 0.4791666781529784, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 416 |
|
}, |
|
{ |
|
"completion_length": 324.81511211395264, |
|
"epoch": 0.22293333333333334, |
|
"grad_norm": 0.1078543321671686, |
|
"kl": 0.2205810546875, |
|
"learning_rate": 6.616247970698319e-09, |
|
"loss": 0.0002, |
|
"reward": 1.4036458656191826, |
|
"reward_std": 0.20324593503028154, |
|
"rewards/equation_reward_func": 0.42968750931322575, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 418 |
|
}, |
|
{ |
|
"completion_length": 329.2942781448364, |
|
"epoch": 0.224, |
|
"grad_norm": 0.07753331154087116, |
|
"kl": 0.2059326171875, |
|
"learning_rate": 5.8181848940044855e-09, |
|
"loss": 0.0002, |
|
"reward": 1.343750037252903, |
|
"reward_std": 0.20026329206302762, |
|
"rewards/equation_reward_func": 0.39322917629033327, |
|
"rewards/format_reward_func": 0.9505208469927311, |
|
"step": 420 |
|
}, |
|
{ |
|
"completion_length": 258.8515682220459, |
|
"epoch": 0.22506666666666666, |
|
"grad_norm": 1.0694633359031593, |
|
"kl": 1.15753173828125, |
|
"learning_rate": 5.070831694623135e-09, |
|
"loss": 0.0012, |
|
"reward": 1.575520858168602, |
|
"reward_std": 0.1907555889338255, |
|
"rewards/equation_reward_func": 0.6197916809469461, |
|
"rewards/format_reward_func": 0.9557291828095913, |
|
"step": 422 |
|
}, |
|
{ |
|
"completion_length": 340.5260534286499, |
|
"epoch": 0.22613333333333333, |
|
"grad_norm": 0.13996783116669323, |
|
"kl": 0.2117919921875, |
|
"learning_rate": 4.374343577351336e-09, |
|
"loss": 0.0002, |
|
"reward": 1.3437500596046448, |
|
"reward_std": 0.21045360853895545, |
|
"rewards/equation_reward_func": 0.398437513737008, |
|
"rewards/format_reward_func": 0.9453125149011612, |
|
"step": 424 |
|
}, |
|
{ |
|
"completion_length": 314.54167461395264, |
|
"epoch": 0.2272, |
|
"grad_norm": 0.11553621912416584, |
|
"kl": 0.55157470703125, |
|
"learning_rate": 3.7288651837012745e-09, |
|
"loss": 0.0006, |
|
"reward": 1.377604205161333, |
|
"reward_std": 0.2037918628193438, |
|
"rewards/equation_reward_func": 0.42968751140870154, |
|
"rewards/format_reward_func": 0.9479166828095913, |
|
"step": 426 |
|
}, |
|
{ |
|
"completion_length": 304.3593854904175, |
|
"epoch": 0.22826666666666667, |
|
"grad_norm": 0.14316318289656946, |
|
"kl": 0.23516845703125, |
|
"learning_rate": 3.134530561862081e-09, |
|
"loss": 0.0002, |
|
"reward": 1.4114583805203438, |
|
"reward_std": 0.2521082772873342, |
|
"rewards/equation_reward_func": 0.4687500149011612, |
|
"rewards/format_reward_func": 0.9427083544433117, |
|
"step": 428 |
|
}, |
|
{ |
|
"completion_length": 299.0755262374878, |
|
"epoch": 0.22933333333333333, |
|
"grad_norm": 0.12545727015621191, |
|
"kl": 0.2213134765625, |
|
"learning_rate": 2.5914631388619103e-09, |
|
"loss": 0.0002, |
|
"reward": 1.4453125447034836, |
|
"reward_std": 0.18874157266691327, |
|
"rewards/equation_reward_func": 0.4869791781529784, |
|
"rewards/format_reward_func": 0.9583333507180214, |
|
"step": 430 |
|
}, |
|
{ |
|
"completion_length": 265.3099060058594, |
|
"epoch": 0.2304, |
|
"grad_norm": 0.10719374815625937, |
|
"kl": 0.2373046875, |
|
"learning_rate": 2.0997756949353297e-09, |
|
"loss": 0.0002, |
|
"reward": 1.5390625298023224, |
|
"reward_std": 0.15120991226285696, |
|
"rewards/equation_reward_func": 0.5546875158324838, |
|
"rewards/format_reward_func": 0.9843750074505806, |
|
"step": 432 |
|
}, |
|
{ |
|
"completion_length": 321.8854274749756, |
|
"epoch": 0.23146666666666665, |
|
"grad_norm": 0.1258623045036253, |
|
"kl": 0.20965576171875, |
|
"learning_rate": 1.6595703401020844e-09, |
|
"loss": 0.0002, |
|
"reward": 1.401041690260172, |
|
"reward_std": 0.23577453382313251, |
|
"rewards/equation_reward_func": 0.45312501303851604, |
|
"rewards/format_reward_func": 0.9479166902601719, |
|
"step": 434 |
|
}, |
|
{ |
|
"completion_length": 322.6406316757202, |
|
"epoch": 0.23253333333333334, |
|
"grad_norm": 0.10043219855641766, |
|
"kl": 0.21881103515625, |
|
"learning_rate": 1.2709384929615596e-09, |
|
"loss": 0.0002, |
|
"reward": 1.3489583618938923, |
|
"reward_std": 0.24870790215209126, |
|
"rewards/equation_reward_func": 0.41145834466442466, |
|
"rewards/format_reward_func": 0.9375000186264515, |
|
"step": 436 |
|
}, |
|
{ |
|
"completion_length": 335.68751335144043, |
|
"epoch": 0.2336, |
|
"grad_norm": 0.07244703059521732, |
|
"kl": 0.234619140625, |
|
"learning_rate": 9.339608617077165e-10, |
|
"loss": 0.0002, |
|
"reward": 1.380208358168602, |
|
"reward_std": 0.23140405910089612, |
|
"rewards/equation_reward_func": 0.4427083460614085, |
|
"rewards/format_reward_func": 0.9375000186264515, |
|
"step": 438 |
|
}, |
|
{ |
|
"completion_length": 291.8854269981384, |
|
"epoch": 0.23466666666666666, |
|
"grad_norm": 0.08547530539534628, |
|
"kl": 0.23675537109375, |
|
"learning_rate": 6.487074273681114e-10, |
|
"loss": 0.0002, |
|
"reward": 1.4973958656191826, |
|
"reward_std": 0.15349498065188527, |
|
"rewards/equation_reward_func": 0.5234375093132257, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 440 |
|
}, |
|
{ |
|
"completion_length": 312.1250047683716, |
|
"epoch": 0.23573333333333332, |
|
"grad_norm": 0.074850637657335, |
|
"kl": 0.22784423828125, |
|
"learning_rate": 4.152374292708538e-10, |
|
"loss": 0.0002, |
|
"reward": 1.3593750521540642, |
|
"reward_std": 0.22883992129936814, |
|
"rewards/equation_reward_func": 0.4192708432674408, |
|
"rewards/format_reward_func": 0.9401041902601719, |
|
"step": 442 |
|
}, |
|
{ |
|
"completion_length": 294.8177156448364, |
|
"epoch": 0.2368, |
|
"grad_norm": 0.07224815686769165, |
|
"kl": 0.2313232421875, |
|
"learning_rate": 2.3359935274214204e-10, |
|
"loss": 0.0002, |
|
"reward": 1.4661458730697632, |
|
"reward_std": 0.1822908315807581, |
|
"rewards/equation_reward_func": 0.5026041779201478, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 444 |
|
}, |
|
{ |
|
"completion_length": 323.0520935058594, |
|
"epoch": 0.23786666666666667, |
|
"grad_norm": 0.0603941305498588, |
|
"kl": 0.21807861328125, |
|
"learning_rate": 1.0383091903720665e-10, |
|
"loss": 0.0002, |
|
"reward": 1.4427083805203438, |
|
"reward_std": 0.21741202333942056, |
|
"rewards/equation_reward_func": 0.48177084885537624, |
|
"rewards/format_reward_func": 0.9609375149011612, |
|
"step": 446 |
|
}, |
|
{ |
|
"completion_length": 299.1953172683716, |
|
"epoch": 0.23893333333333333, |
|
"grad_norm": 0.09701186524357072, |
|
"kl": 0.20562744140625, |
|
"learning_rate": 2.595907750671533e-11, |
|
"loss": 0.0002, |
|
"reward": 1.5156250298023224, |
|
"reward_std": 0.19597027264535427, |
|
"rewards/equation_reward_func": 0.5546875107102096, |
|
"rewards/format_reward_func": 0.9609375111758709, |
|
"step": 448 |
|
}, |
|
{ |
|
"completion_length": 325.67709255218506, |
|
"epoch": 0.24, |
|
"grad_norm": 0.1517026640806461, |
|
"kl": 0.2586669921875, |
|
"learning_rate": 0.0, |
|
"loss": 0.0003, |
|
"reward": 1.3750000335276127, |
|
"reward_std": 0.18915646150708199, |
|
"rewards/equation_reward_func": 0.41666667629033327, |
|
"rewards/format_reward_func": 0.9583333432674408, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"step": 450, |
|
"total_flos": 0.0, |
|
"train_loss": 3.0447768125062188e-05, |
|
"train_runtime": 1961.2328, |
|
"train_samples_per_second": 5.507, |
|
"train_steps_per_second": 0.229 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 450, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|