|
{ |
|
"best_metric": 0.719188596491228, |
|
"best_model_checkpoint": "videomae-base-finetuned-kinetics-finetuned-shoplifting-dataset/checkpoint-353", |
|
"epoch": 9.098863636363637, |
|
"eval_steps": 500, |
|
"global_step": 880, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.011363636363636364, |
|
"grad_norm": 0.7822037935256958, |
|
"learning_rate": 5.681818181818182e-06, |
|
"loss": 0.1251, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.022727272727272728, |
|
"grad_norm": 6.924298286437988, |
|
"learning_rate": 1.1363636363636365e-05, |
|
"loss": 0.2906, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03409090909090909, |
|
"grad_norm": 0.3597455322742462, |
|
"learning_rate": 1.7045454545454546e-05, |
|
"loss": 0.1046, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.045454545454545456, |
|
"grad_norm": 20.039451599121094, |
|
"learning_rate": 2.272727272727273e-05, |
|
"loss": 0.3495, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.056818181818181816, |
|
"grad_norm": 19.18282127380371, |
|
"learning_rate": 2.8409090909090912e-05, |
|
"loss": 0.3138, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06818181818181818, |
|
"grad_norm": 6.543797016143799, |
|
"learning_rate": 3.409090909090909e-05, |
|
"loss": 0.2203, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.07954545454545454, |
|
"grad_norm": 9.147849082946777, |
|
"learning_rate": 3.9772727272727275e-05, |
|
"loss": 0.3068, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.09090909090909091, |
|
"grad_norm": 1.336655616760254, |
|
"learning_rate": 4.545454545454546e-05, |
|
"loss": 0.1824, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.10113636363636364, |
|
"eval_f1": 0.6832431989511635, |
|
"eval_loss": 1.0067527294158936, |
|
"eval_runtime": 76.7216, |
|
"eval_samples_per_second": 2.503, |
|
"eval_steps_per_second": 0.313, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 1.0011363636363637, |
|
"grad_norm": 8.309718132019043, |
|
"learning_rate": 4.9873737373737375e-05, |
|
"loss": 0.2712, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.0125, |
|
"grad_norm": 1.4587355852127075, |
|
"learning_rate": 4.9242424242424245e-05, |
|
"loss": 0.1706, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.0238636363636364, |
|
"grad_norm": 24.91333770751953, |
|
"learning_rate": 4.8611111111111115e-05, |
|
"loss": 0.2993, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.0352272727272727, |
|
"grad_norm": 2.1657192707061768, |
|
"learning_rate": 4.797979797979798e-05, |
|
"loss": 0.1297, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.0465909090909091, |
|
"grad_norm": 0.5277124643325806, |
|
"learning_rate": 4.7348484848484855e-05, |
|
"loss": 0.2617, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.0579545454545454, |
|
"grad_norm": 1.7243237495422363, |
|
"learning_rate": 4.671717171717172e-05, |
|
"loss": 0.1424, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.0693181818181818, |
|
"grad_norm": 1.7403205633163452, |
|
"learning_rate": 4.608585858585859e-05, |
|
"loss": 0.5873, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.080681818181818, |
|
"grad_norm": 2.7504332065582275, |
|
"learning_rate": 4.545454545454546e-05, |
|
"loss": 0.2966, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.0920454545454545, |
|
"grad_norm": 6.052179336547852, |
|
"learning_rate": 4.482323232323233e-05, |
|
"loss": 0.334, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"eval_f1": 0.6804519337964154, |
|
"eval_loss": 0.9260269999504089, |
|
"eval_runtime": 73.8445, |
|
"eval_samples_per_second": 2.6, |
|
"eval_steps_per_second": 0.325, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 2.003409090909091, |
|
"grad_norm": 0.761626124382019, |
|
"learning_rate": 4.41919191919192e-05, |
|
"loss": 0.6221, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.014772727272727, |
|
"grad_norm": 0.5840346813201904, |
|
"learning_rate": 4.356060606060606e-05, |
|
"loss": 0.1877, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.026136363636364, |
|
"grad_norm": 1.2796881198883057, |
|
"learning_rate": 4.292929292929293e-05, |
|
"loss": 0.1515, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.0375, |
|
"grad_norm": 14.390783309936523, |
|
"learning_rate": 4.2297979797979795e-05, |
|
"loss": 0.1208, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.0488636363636363, |
|
"grad_norm": 8.565354347229004, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.1364, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.0602272727272726, |
|
"grad_norm": 39.79494094848633, |
|
"learning_rate": 4.1035353535353535e-05, |
|
"loss": 0.2243, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.0715909090909093, |
|
"grad_norm": 11.125761985778809, |
|
"learning_rate": 4.0404040404040405e-05, |
|
"loss": 0.1289, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.0829545454545455, |
|
"grad_norm": 0.10636977106332779, |
|
"learning_rate": 3.9772727272727275e-05, |
|
"loss": 0.2634, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.0943181818181817, |
|
"grad_norm": 6.6989617347717285, |
|
"learning_rate": 3.9141414141414145e-05, |
|
"loss": 0.2202, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"eval_f1": 0.7139075032998273, |
|
"eval_loss": 0.9855768084526062, |
|
"eval_runtime": 80.7383, |
|
"eval_samples_per_second": 2.378, |
|
"eval_steps_per_second": 0.297, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 3.0056818181818183, |
|
"grad_norm": 7.529367446899414, |
|
"learning_rate": 3.8510101010101015e-05, |
|
"loss": 0.1857, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 3.0170454545454546, |
|
"grad_norm": 1.7394098043441772, |
|
"learning_rate": 3.787878787878788e-05, |
|
"loss": 0.1633, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 3.028409090909091, |
|
"grad_norm": 35.92728805541992, |
|
"learning_rate": 3.724747474747475e-05, |
|
"loss": 0.147, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 3.039772727272727, |
|
"grad_norm": 6.699644565582275, |
|
"learning_rate": 3.661616161616162e-05, |
|
"loss": 0.1791, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.0511363636363638, |
|
"grad_norm": 7.0385894775390625, |
|
"learning_rate": 3.598484848484849e-05, |
|
"loss": 0.148, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 3.0625, |
|
"grad_norm": 3.4319474697113037, |
|
"learning_rate": 3.535353535353535e-05, |
|
"loss": 0.1414, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 3.0738636363636362, |
|
"grad_norm": 1.3963415622711182, |
|
"learning_rate": 3.472222222222222e-05, |
|
"loss": 0.0888, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 3.085227272727273, |
|
"grad_norm": 0.17751815915107727, |
|
"learning_rate": 3.409090909090909e-05, |
|
"loss": 0.1007, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 3.096590909090909, |
|
"grad_norm": 8.843611717224121, |
|
"learning_rate": 3.345959595959596e-05, |
|
"loss": 0.2074, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"eval_f1": 0.719188596491228, |
|
"eval_loss": 0.9494466781616211, |
|
"eval_runtime": 66.0371, |
|
"eval_samples_per_second": 2.907, |
|
"eval_steps_per_second": 0.363, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 4.007954545454545, |
|
"grad_norm": 0.11681011319160461, |
|
"learning_rate": 3.282828282828283e-05, |
|
"loss": 0.1654, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 4.019318181818182, |
|
"grad_norm": 0.03579840064048767, |
|
"learning_rate": 3.2196969696969696e-05, |
|
"loss": 0.0781, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 4.030681818181818, |
|
"grad_norm": 0.28152555227279663, |
|
"learning_rate": 3.1565656565656566e-05, |
|
"loss": 0.0961, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 4.0420454545454545, |
|
"grad_norm": 0.36765220761299133, |
|
"learning_rate": 3.0934343434343436e-05, |
|
"loss": 0.0591, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 4.053409090909091, |
|
"grad_norm": 13.338018417358398, |
|
"learning_rate": 3.0303030303030306e-05, |
|
"loss": 0.0945, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 4.064772727272727, |
|
"grad_norm": 9.662626266479492, |
|
"learning_rate": 2.9671717171717172e-05, |
|
"loss": 0.1343, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 4.076136363636364, |
|
"grad_norm": 1.916236400604248, |
|
"learning_rate": 2.904040404040404e-05, |
|
"loss": 0.3043, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 4.0875, |
|
"grad_norm": 12.770139694213867, |
|
"learning_rate": 2.8409090909090912e-05, |
|
"loss": 0.2391, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 4.098863636363636, |
|
"grad_norm": 7.032412052154541, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.0916, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"eval_f1": 0.671078431372549, |
|
"eval_loss": 1.3867233991622925, |
|
"eval_runtime": 69.8609, |
|
"eval_samples_per_second": 2.748, |
|
"eval_steps_per_second": 0.344, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 5.010227272727272, |
|
"grad_norm": 9.611908912658691, |
|
"learning_rate": 2.714646464646465e-05, |
|
"loss": 0.1568, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 5.021590909090909, |
|
"grad_norm": 3.7834885120391846, |
|
"learning_rate": 2.6515151515151516e-05, |
|
"loss": 0.3173, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 5.032954545454546, |
|
"grad_norm": 0.057657867670059204, |
|
"learning_rate": 2.5883838383838382e-05, |
|
"loss": 0.1004, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 5.0443181818181815, |
|
"grad_norm": 1.133195161819458, |
|
"learning_rate": 2.5252525252525256e-05, |
|
"loss": 0.0869, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 5.055681818181818, |
|
"grad_norm": 6.4852166175842285, |
|
"learning_rate": 2.4621212121212123e-05, |
|
"loss": 0.0964, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 5.067045454545455, |
|
"grad_norm": 0.01637670025229454, |
|
"learning_rate": 2.398989898989899e-05, |
|
"loss": 0.0511, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 5.078409090909091, |
|
"grad_norm": 20.669986724853516, |
|
"learning_rate": 2.335858585858586e-05, |
|
"loss": 0.2204, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 5.089772727272727, |
|
"grad_norm": 0.07228785008192062, |
|
"learning_rate": 2.272727272727273e-05, |
|
"loss": 0.1092, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"eval_f1": 0.6919812909064125, |
|
"eval_loss": 1.3757562637329102, |
|
"eval_runtime": 68.2311, |
|
"eval_samples_per_second": 2.814, |
|
"eval_steps_per_second": 0.352, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 6.0011363636363635, |
|
"grad_norm": 0.03453134745359421, |
|
"learning_rate": 2.20959595959596e-05, |
|
"loss": 0.1523, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 6.0125, |
|
"grad_norm": 0.4955722689628601, |
|
"learning_rate": 2.1464646464646466e-05, |
|
"loss": 0.1371, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 6.023863636363636, |
|
"grad_norm": 0.6915594339370728, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 0.0278, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 6.035227272727273, |
|
"grad_norm": 0.030776426196098328, |
|
"learning_rate": 2.0202020202020203e-05, |
|
"loss": 0.0441, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 6.046590909090909, |
|
"grad_norm": 0.008703567087650299, |
|
"learning_rate": 1.9570707070707073e-05, |
|
"loss": 0.0117, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 6.057954545454545, |
|
"grad_norm": 12.471619606018066, |
|
"learning_rate": 1.893939393939394e-05, |
|
"loss": 0.0576, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 6.069318181818182, |
|
"grad_norm": 0.023786788806319237, |
|
"learning_rate": 1.830808080808081e-05, |
|
"loss": 0.0282, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 6.0806818181818185, |
|
"grad_norm": 0.24424441158771515, |
|
"learning_rate": 1.7676767676767676e-05, |
|
"loss": 0.1136, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 6.092045454545454, |
|
"grad_norm": 0.014285405166447163, |
|
"learning_rate": 1.7045454545454546e-05, |
|
"loss": 0.0804, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"eval_f1": 0.6967864271457085, |
|
"eval_loss": 1.3787533044815063, |
|
"eval_runtime": 70.4409, |
|
"eval_samples_per_second": 2.726, |
|
"eval_steps_per_second": 0.341, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 7.0034090909090905, |
|
"grad_norm": 0.14964497089385986, |
|
"learning_rate": 1.6414141414141416e-05, |
|
"loss": 0.0217, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 7.014772727272727, |
|
"grad_norm": 22.6522274017334, |
|
"learning_rate": 1.5782828282828283e-05, |
|
"loss": 0.2281, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 7.026136363636364, |
|
"grad_norm": 0.24736760556697845, |
|
"learning_rate": 1.5151515151515153e-05, |
|
"loss": 0.0425, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 7.0375, |
|
"grad_norm": 0.11569665372371674, |
|
"learning_rate": 1.452020202020202e-05, |
|
"loss": 0.004, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 7.048863636363636, |
|
"grad_norm": 7.235473155975342, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 0.0268, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 7.060227272727273, |
|
"grad_norm": 0.006639318075031042, |
|
"learning_rate": 1.3257575757575758e-05, |
|
"loss": 0.0387, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 7.071590909090909, |
|
"grad_norm": 0.07593043893575668, |
|
"learning_rate": 1.2626262626262628e-05, |
|
"loss": 0.0685, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 7.0829545454545455, |
|
"grad_norm": 0.02903686836361885, |
|
"learning_rate": 1.1994949494949495e-05, |
|
"loss": 0.0889, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 7.094318181818182, |
|
"grad_norm": 0.10437527298927307, |
|
"learning_rate": 1.1363636363636365e-05, |
|
"loss": 0.0654, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"eval_f1": 0.697334455667789, |
|
"eval_loss": 1.2969599962234497, |
|
"eval_runtime": 75.4359, |
|
"eval_samples_per_second": 2.545, |
|
"eval_steps_per_second": 0.318, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 8.005681818181818, |
|
"grad_norm": 0.3526234030723572, |
|
"learning_rate": 1.0732323232323233e-05, |
|
"loss": 0.1004, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 8.017045454545455, |
|
"grad_norm": 0.544172465801239, |
|
"learning_rate": 1.0101010101010101e-05, |
|
"loss": 0.0323, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 8.028409090909092, |
|
"grad_norm": 27.582374572753906, |
|
"learning_rate": 9.46969696969697e-06, |
|
"loss": 0.0817, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 8.039772727272727, |
|
"grad_norm": 0.03466358035802841, |
|
"learning_rate": 8.838383838383838e-06, |
|
"loss": 0.033, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 8.051136363636363, |
|
"grad_norm": 0.2677808105945587, |
|
"learning_rate": 8.207070707070708e-06, |
|
"loss": 0.0078, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 8.0625, |
|
"grad_norm": 0.026267457753419876, |
|
"learning_rate": 7.5757575757575764e-06, |
|
"loss": 0.1072, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 8.073863636363637, |
|
"grad_norm": 0.036878038197755814, |
|
"learning_rate": 6.944444444444445e-06, |
|
"loss": 0.175, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 8.085227272727273, |
|
"grad_norm": 3.7212748527526855, |
|
"learning_rate": 6.313131313131314e-06, |
|
"loss": 0.0646, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 8.096590909090908, |
|
"grad_norm": 0.06381477415561676, |
|
"learning_rate": 5.681818181818182e-06, |
|
"loss": 0.0065, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 8.1, |
|
"eval_f1": 0.7006284557943339, |
|
"eval_loss": 1.4780327081680298, |
|
"eval_runtime": 71.4359, |
|
"eval_samples_per_second": 2.688, |
|
"eval_steps_per_second": 0.336, |
|
"step": 793 |
|
}, |
|
{ |
|
"epoch": 9.007954545454545, |
|
"grad_norm": 0.14042355120182037, |
|
"learning_rate": 5.050505050505051e-06, |
|
"loss": 0.0604, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 9.019318181818182, |
|
"grad_norm": 0.007114021107554436, |
|
"learning_rate": 4.419191919191919e-06, |
|
"loss": 0.1103, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 9.030681818181819, |
|
"grad_norm": 3.3601884841918945, |
|
"learning_rate": 3.7878787878787882e-06, |
|
"loss": 0.1139, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 9.042045454545455, |
|
"grad_norm": 0.0043687219731509686, |
|
"learning_rate": 3.156565656565657e-06, |
|
"loss": 0.0034, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 9.05340909090909, |
|
"grad_norm": 0.04162032902240753, |
|
"learning_rate": 2.5252525252525253e-06, |
|
"loss": 0.0108, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 9.064772727272727, |
|
"grad_norm": 1.4939607381820679, |
|
"learning_rate": 1.8939393939393941e-06, |
|
"loss": 0.0889, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 9.076136363636364, |
|
"grad_norm": 0.6243687868118286, |
|
"learning_rate": 1.2626262626262627e-06, |
|
"loss": 0.0082, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 9.0875, |
|
"grad_norm": 0.010640038177371025, |
|
"learning_rate": 6.313131313131313e-07, |
|
"loss": 0.0318, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 9.098863636363637, |
|
"grad_norm": 0.6547775864601135, |
|
"learning_rate": 0.0, |
|
"loss": 0.0024, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 9.098863636363637, |
|
"eval_f1": 0.7006284557943339, |
|
"eval_loss": 1.4463977813720703, |
|
"eval_runtime": 70.5842, |
|
"eval_samples_per_second": 2.72, |
|
"eval_steps_per_second": 0.34, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 9.098863636363637, |
|
"step": 880, |
|
"total_flos": 8.763572257754776e+18, |
|
"train_loss": 0.14181382804474033, |
|
"train_runtime": 4562.3352, |
|
"train_samples_per_second": 1.543, |
|
"train_steps_per_second": 0.193 |
|
}, |
|
{ |
|
"epoch": 9.098863636363637, |
|
"eval_f1": 0.719188596491228, |
|
"eval_loss": 0.9494466781616211, |
|
"eval_runtime": 71.753, |
|
"eval_samples_per_second": 2.676, |
|
"eval_steps_per_second": 0.334, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 9.098863636363637, |
|
"eval_f1": 0.719188596491228, |
|
"eval_loss": 0.9494466781616211, |
|
"eval_runtime": 72.4119, |
|
"eval_samples_per_second": 2.651, |
|
"eval_steps_per_second": 0.331, |
|
"step": 880 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 880, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.763572257754776e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|