GPT-2-Large-32k-steps / trainer_state.json
DrNicefellow's picture
Upload 13 files
5384620 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.29740163905478323,
"eval_steps": 100000000,
"global_step": 32000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 5e-05,
"loss": 7.7296,
"step": 500
},
{
"epoch": 0.0,
"learning_rate": 4.9999535305620204e-05,
"loss": 6.5786,
"step": 1000
},
{
"epoch": 0.0,
"learning_rate": 4.99990706112404e-05,
"loss": 6.2317,
"step": 1500
},
{
"epoch": 0.0,
"learning_rate": 4.99986059168606e-05,
"loss": 6.0053,
"step": 2000
},
{
"epoch": 0.0,
"learning_rate": 4.99981412224808e-05,
"loss": 5.8615,
"step": 2500
},
{
"epoch": 0.0,
"learning_rate": 4.9997676528101e-05,
"loss": 5.6794,
"step": 3000
},
{
"epoch": 0.0,
"learning_rate": 4.99972118337212e-05,
"loss": 5.5481,
"step": 3500
},
{
"epoch": 0.0,
"learning_rate": 4.9996747139341396e-05,
"loss": 5.4375,
"step": 4000
},
{
"epoch": 0.0,
"learning_rate": 4.99962824449616e-05,
"loss": 5.3391,
"step": 4500
},
{
"epoch": 0.0,
"learning_rate": 4.99958177505818e-05,
"loss": 5.2598,
"step": 5000
},
{
"epoch": 0.01,
"learning_rate": 4.9995353056202e-05,
"loss": 5.17,
"step": 5500
},
{
"epoch": 0.01,
"learning_rate": 4.9994888361822204e-05,
"loss": 5.0886,
"step": 6000
},
{
"epoch": 0.01,
"learning_rate": 4.99944236674424e-05,
"loss": 5.0128,
"step": 6500
},
{
"epoch": 0.01,
"learning_rate": 4.99939589730626e-05,
"loss": 4.9504,
"step": 7000
},
{
"epoch": 0.01,
"learning_rate": 4.99934942786828e-05,
"loss": 4.8755,
"step": 7500
},
{
"epoch": 0.01,
"learning_rate": 4.9993029584303e-05,
"loss": 4.8402,
"step": 8000
},
{
"epoch": 0.01,
"learning_rate": 4.99925648899232e-05,
"loss": 4.7674,
"step": 8500
},
{
"epoch": 0.01,
"learning_rate": 4.9992100195543396e-05,
"loss": 4.7254,
"step": 9000
},
{
"epoch": 0.01,
"learning_rate": 4.99916355011636e-05,
"loss": 4.6749,
"step": 9500
},
{
"epoch": 0.01,
"learning_rate": 4.99911708067838e-05,
"loss": 4.6135,
"step": 10000
},
{
"epoch": 0.01,
"learning_rate": 4.9990706112403995e-05,
"loss": 4.5239,
"step": 10500
},
{
"epoch": 0.01,
"learning_rate": 4.99902414180242e-05,
"loss": 4.4729,
"step": 11000
},
{
"epoch": 0.01,
"learning_rate": 4.998977672364439e-05,
"loss": 4.4434,
"step": 11500
},
{
"epoch": 0.01,
"learning_rate": 4.9989312029264594e-05,
"loss": 4.3587,
"step": 12000
},
{
"epoch": 0.01,
"learning_rate": 4.9988847334884796e-05,
"loss": 4.3126,
"step": 12500
},
{
"epoch": 0.01,
"learning_rate": 4.998838264050499e-05,
"loss": 4.2658,
"step": 13000
},
{
"epoch": 0.01,
"learning_rate": 4.998791794612519e-05,
"loss": 4.2412,
"step": 13500
},
{
"epoch": 0.01,
"learning_rate": 4.9987453251745395e-05,
"loss": 4.2035,
"step": 14000
},
{
"epoch": 0.01,
"learning_rate": 4.998698855736559e-05,
"loss": 4.1695,
"step": 14500
},
{
"epoch": 0.01,
"learning_rate": 4.998652386298579e-05,
"loss": 4.1406,
"step": 15000
},
{
"epoch": 0.01,
"learning_rate": 4.9986059168605994e-05,
"loss": 4.1275,
"step": 15500
},
{
"epoch": 0.01,
"learning_rate": 4.9985594474226196e-05,
"loss": 4.1032,
"step": 16000
},
{
"epoch": 0.02,
"learning_rate": 4.99851297798464e-05,
"loss": 4.0588,
"step": 16500
},
{
"epoch": 0.02,
"learning_rate": 4.9984665085466594e-05,
"loss": 4.0536,
"step": 17000
},
{
"epoch": 0.02,
"learning_rate": 4.9984200391086796e-05,
"loss": 3.9979,
"step": 17500
},
{
"epoch": 0.02,
"learning_rate": 4.998373569670699e-05,
"loss": 3.9913,
"step": 18000
},
{
"epoch": 0.02,
"learning_rate": 4.998327100232719e-05,
"loss": 3.988,
"step": 18500
},
{
"epoch": 0.02,
"learning_rate": 4.9982806307947395e-05,
"loss": 3.9575,
"step": 19000
},
{
"epoch": 0.02,
"learning_rate": 4.998234161356759e-05,
"loss": 3.942,
"step": 19500
},
{
"epoch": 0.02,
"learning_rate": 4.998187691918779e-05,
"loss": 3.9258,
"step": 20000
},
{
"epoch": 0.02,
"learning_rate": 4.998141222480799e-05,
"loss": 3.8736,
"step": 20500
},
{
"epoch": 0.02,
"learning_rate": 4.998094753042819e-05,
"loss": 3.903,
"step": 21000
},
{
"epoch": 0.2,
"learning_rate": 4.9804810945458605e-05,
"loss": 3.7108,
"step": 21500
},
{
"epoch": 0.2,
"learning_rate": 4.980016358701714e-05,
"loss": 3.6276,
"step": 22000
},
{
"epoch": 0.21,
"learning_rate": 4.979551622857568e-05,
"loss": 3.568,
"step": 22500
},
{
"epoch": 0.21,
"learning_rate": 4.9790868870134216e-05,
"loss": 3.5307,
"step": 23000
},
{
"epoch": 0.22,
"learning_rate": 4.978622151169275e-05,
"loss": 3.4857,
"step": 23500
},
{
"epoch": 0.22,
"learning_rate": 4.978157415325129e-05,
"loss": 3.4463,
"step": 24000
},
{
"epoch": 0.23,
"learning_rate": 4.977692679480983e-05,
"loss": 3.4396,
"step": 24500
},
{
"epoch": 0.23,
"learning_rate": 4.977227943636837e-05,
"loss": 3.402,
"step": 25000
},
{
"epoch": 0.24,
"learning_rate": 4.9767632077926904e-05,
"loss": 3.3716,
"step": 25500
},
{
"epoch": 0.24,
"learning_rate": 4.9762984719485446e-05,
"loss": 3.3533,
"step": 26000
},
{
"epoch": 0.25,
"learning_rate": 4.975833736104398e-05,
"loss": 3.3084,
"step": 26500
},
{
"epoch": 0.25,
"learning_rate": 4.975369000260252e-05,
"loss": 3.293,
"step": 27000
},
{
"epoch": 0.26,
"learning_rate": 4.974904264416106e-05,
"loss": 3.2707,
"step": 27500
},
{
"epoch": 0.26,
"learning_rate": 4.97443952857196e-05,
"loss": 3.2561,
"step": 28000
},
{
"epoch": 0.26,
"learning_rate": 4.9739747927278134e-05,
"loss": 3.2219,
"step": 28500
},
{
"epoch": 0.27,
"learning_rate": 4.9735100568836676e-05,
"loss": 3.2014,
"step": 29000
},
{
"epoch": 0.27,
"learning_rate": 4.973045321039521e-05,
"loss": 3.1892,
"step": 29500
},
{
"epoch": 0.28,
"learning_rate": 4.972580585195375e-05,
"loss": 3.1734,
"step": 30000
},
{
"epoch": 0.28,
"learning_rate": 4.972115849351229e-05,
"loss": 3.1624,
"step": 30500
},
{
"epoch": 0.29,
"learning_rate": 4.971651113507082e-05,
"loss": 3.1416,
"step": 31000
},
{
"epoch": 0.29,
"learning_rate": 4.9711863776629364e-05,
"loss": 3.1255,
"step": 31500
},
{
"epoch": 0.3,
"learning_rate": 4.97072164181879e-05,
"loss": 3.1074,
"step": 32000
}
],
"logging_steps": 500,
"max_steps": 5379900,
"num_input_tokens_seen": 0,
"num_train_epochs": 50,
"save_steps": 1000,
"total_flos": 1.9955527581696e+18,
"train_batch_size": 7,
"trial_name": null,
"trial_params": null
}