results / trainer_state.json
Edelweisse's picture
Edelweisse/Sentiment-Bpjs
2c726f2 verified
{
"best_metric": 0.8897849462365591,
"best_model_checkpoint": "./results/checkpoint-465",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 465,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.10752688172043011,
"grad_norm": 77.31194305419922,
"learning_rate": 1.5e-06,
"loss": 1.0774,
"step": 10
},
{
"epoch": 0.21505376344086022,
"grad_norm": 21.743606567382812,
"learning_rate": 3e-06,
"loss": 1.205,
"step": 20
},
{
"epoch": 0.3225806451612903,
"grad_norm": 46.53227233886719,
"learning_rate": 4.5e-06,
"loss": 0.9877,
"step": 30
},
{
"epoch": 0.43010752688172044,
"grad_norm": 60.25495147705078,
"learning_rate": 6e-06,
"loss": 0.7626,
"step": 40
},
{
"epoch": 0.5376344086021505,
"grad_norm": 42.291969299316406,
"learning_rate": 7.5e-06,
"loss": 0.6559,
"step": 50
},
{
"epoch": 0.6451612903225806,
"grad_norm": 27.09429931640625,
"learning_rate": 9e-06,
"loss": 0.3539,
"step": 60
},
{
"epoch": 0.7526881720430108,
"grad_norm": 29.117910385131836,
"learning_rate": 1.05e-05,
"loss": 0.516,
"step": 70
},
{
"epoch": 0.8602150537634409,
"grad_norm": 35.026817321777344,
"learning_rate": 1.2e-05,
"loss": 0.5457,
"step": 80
},
{
"epoch": 0.967741935483871,
"grad_norm": 17.851289749145508,
"learning_rate": 1.3500000000000001e-05,
"loss": 0.5821,
"step": 90
},
{
"epoch": 1.0,
"eval_accuracy": 0.8655913978494624,
"eval_f1": 0.8661634583786485,
"eval_loss": 0.3687117397785187,
"eval_runtime": 1.6159,
"eval_samples_per_second": 230.212,
"eval_steps_per_second": 3.713,
"step": 93
},
{
"epoch": 1.075268817204301,
"grad_norm": 13.748136520385742,
"learning_rate": 1.5e-05,
"loss": 0.307,
"step": 100
},
{
"epoch": 1.1827956989247312,
"grad_norm": 3.2135958671569824,
"learning_rate": 1.65e-05,
"loss": 0.4692,
"step": 110
},
{
"epoch": 1.2903225806451613,
"grad_norm": 11.485154151916504,
"learning_rate": 1.8e-05,
"loss": 0.3206,
"step": 120
},
{
"epoch": 1.3978494623655915,
"grad_norm": 7.797122478485107,
"learning_rate": 1.95e-05,
"loss": 0.3138,
"step": 130
},
{
"epoch": 1.5053763440860215,
"grad_norm": 6.313217639923096,
"learning_rate": 2.1e-05,
"loss": 0.3135,
"step": 140
},
{
"epoch": 1.6129032258064515,
"grad_norm": 18.319936752319336,
"learning_rate": 2.25e-05,
"loss": 0.2156,
"step": 150
},
{
"epoch": 1.7204301075268817,
"grad_norm": 21.939163208007812,
"learning_rate": 2.4e-05,
"loss": 0.2749,
"step": 160
},
{
"epoch": 1.827956989247312,
"grad_norm": 20.2148380279541,
"learning_rate": 2.55e-05,
"loss": 0.4502,
"step": 170
},
{
"epoch": 1.935483870967742,
"grad_norm": 22.044946670532227,
"learning_rate": 2.7000000000000002e-05,
"loss": 0.364,
"step": 180
},
{
"epoch": 2.0,
"eval_accuracy": 0.8763440860215054,
"eval_f1": 0.8742605847498066,
"eval_loss": 0.34187108278274536,
"eval_runtime": 1.6404,
"eval_samples_per_second": 226.774,
"eval_steps_per_second": 3.658,
"step": 186
},
{
"epoch": 2.043010752688172,
"grad_norm": 1.5786309242248535,
"learning_rate": 2.8499999999999998e-05,
"loss": 0.251,
"step": 190
},
{
"epoch": 2.150537634408602,
"grad_norm": 0.3100162446498871,
"learning_rate": 3e-05,
"loss": 0.0491,
"step": 200
},
{
"epoch": 2.258064516129032,
"grad_norm": 10.301804542541504,
"learning_rate": 2.8867924528301887e-05,
"loss": 0.0978,
"step": 210
},
{
"epoch": 2.3655913978494625,
"grad_norm": 0.5391497015953064,
"learning_rate": 2.7735849056603773e-05,
"loss": 0.2325,
"step": 220
},
{
"epoch": 2.4731182795698925,
"grad_norm": 33.04327392578125,
"learning_rate": 2.6603773584905663e-05,
"loss": 0.3053,
"step": 230
},
{
"epoch": 2.5806451612903225,
"grad_norm": 1.8449115753173828,
"learning_rate": 2.547169811320755e-05,
"loss": 0.1469,
"step": 240
},
{
"epoch": 2.688172043010753,
"grad_norm": 41.86624526977539,
"learning_rate": 2.4339622641509435e-05,
"loss": 0.0985,
"step": 250
},
{
"epoch": 2.795698924731183,
"grad_norm": 12.837655067443848,
"learning_rate": 2.320754716981132e-05,
"loss": 0.1707,
"step": 260
},
{
"epoch": 2.903225806451613,
"grad_norm": 7.691096305847168,
"learning_rate": 2.2075471698113208e-05,
"loss": 0.1889,
"step": 270
},
{
"epoch": 3.0,
"eval_accuracy": 0.8548387096774194,
"eval_f1": 0.8514191596326964,
"eval_loss": 0.5420617461204529,
"eval_runtime": 1.6403,
"eval_samples_per_second": 226.793,
"eval_steps_per_second": 3.658,
"step": 279
},
{
"epoch": 3.010752688172043,
"grad_norm": 24.820016860961914,
"learning_rate": 2.0943396226415094e-05,
"loss": 0.2798,
"step": 280
},
{
"epoch": 3.118279569892473,
"grad_norm": 0.1231377124786377,
"learning_rate": 1.981132075471698e-05,
"loss": 0.1421,
"step": 290
},
{
"epoch": 3.225806451612903,
"grad_norm": 22.27264404296875,
"learning_rate": 1.8679245283018867e-05,
"loss": 0.1455,
"step": 300
},
{
"epoch": 3.3333333333333335,
"grad_norm": 43.31319046020508,
"learning_rate": 1.7547169811320753e-05,
"loss": 0.1223,
"step": 310
},
{
"epoch": 3.4408602150537635,
"grad_norm": 0.16742512583732605,
"learning_rate": 1.6415094339622643e-05,
"loss": 0.0214,
"step": 320
},
{
"epoch": 3.5483870967741935,
"grad_norm": 44.84831619262695,
"learning_rate": 1.528301886792453e-05,
"loss": 0.0278,
"step": 330
},
{
"epoch": 3.6559139784946235,
"grad_norm": 16.63416862487793,
"learning_rate": 1.4150943396226415e-05,
"loss": 0.0238,
"step": 340
},
{
"epoch": 3.763440860215054,
"grad_norm": 0.019003387540578842,
"learning_rate": 1.3018867924528303e-05,
"loss": 0.0788,
"step": 350
},
{
"epoch": 3.870967741935484,
"grad_norm": 0.062045346945524216,
"learning_rate": 1.188679245283019e-05,
"loss": 0.0247,
"step": 360
},
{
"epoch": 3.978494623655914,
"grad_norm": 36.90345001220703,
"learning_rate": 1.0754716981132076e-05,
"loss": 0.1049,
"step": 370
},
{
"epoch": 4.0,
"eval_accuracy": 0.8844086021505376,
"eval_f1": 0.8851071788381587,
"eval_loss": 0.5284830927848816,
"eval_runtime": 1.6712,
"eval_samples_per_second": 222.589,
"eval_steps_per_second": 3.59,
"step": 372
},
{
"epoch": 4.086021505376344,
"grad_norm": 0.04606785252690315,
"learning_rate": 9.622641509433962e-06,
"loss": 0.0563,
"step": 380
},
{
"epoch": 4.193548387096774,
"grad_norm": 0.11379247903823853,
"learning_rate": 8.49056603773585e-06,
"loss": 0.0007,
"step": 390
},
{
"epoch": 4.301075268817204,
"grad_norm": 0.057167768478393555,
"learning_rate": 7.358490566037736e-06,
"loss": 0.0008,
"step": 400
},
{
"epoch": 4.408602150537634,
"grad_norm": 1.8930367231369019,
"learning_rate": 6.226415094339623e-06,
"loss": 0.0016,
"step": 410
},
{
"epoch": 4.516129032258064,
"grad_norm": 0.10506568104028702,
"learning_rate": 5.094339622641509e-06,
"loss": 0.0183,
"step": 420
},
{
"epoch": 4.623655913978495,
"grad_norm": 0.29419633746147156,
"learning_rate": 3.962264150943396e-06,
"loss": 0.0379,
"step": 430
},
{
"epoch": 4.731182795698925,
"grad_norm": 1.4433554410934448,
"learning_rate": 2.830188679245283e-06,
"loss": 0.0011,
"step": 440
},
{
"epoch": 4.838709677419355,
"grad_norm": 0.05199088156223297,
"learning_rate": 1.69811320754717e-06,
"loss": 0.0276,
"step": 450
},
{
"epoch": 4.946236559139785,
"grad_norm": 0.017505839467048645,
"learning_rate": 5.660377358490566e-07,
"loss": 0.0009,
"step": 460
},
{
"epoch": 5.0,
"eval_accuracy": 0.8897849462365591,
"eval_f1": 0.8902518525126154,
"eval_loss": 0.518719494342804,
"eval_runtime": 1.6655,
"eval_samples_per_second": 223.351,
"eval_steps_per_second": 3.602,
"step": 465
},
{
"epoch": 5.0,
"step": 465,
"total_flos": 312883992936900.0,
"train_loss": 0.26607618075144546,
"train_runtime": 161.9779,
"train_samples_per_second": 45.84,
"train_steps_per_second": 2.871
},
{
"epoch": 5.0,
"eval_accuracy": 0.8897849462365591,
"eval_f1": 0.8902518525126154,
"eval_loss": 0.518719494342804,
"eval_runtime": 1.6415,
"eval_samples_per_second": 226.619,
"eval_steps_per_second": 3.655,
"step": 465
}
],
"logging_steps": 10,
"max_steps": 465,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 312883992936900.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}