DioulaD's picture
DioulaD/classificateur-intention_camembert
062388e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"eval_steps": 10,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 2.0,
"grad_norm": 2.5965750217437744,
"learning_rate": 0.00018,
"loss": 0.7517,
"step": 10
},
{
"epoch": 2.0,
"eval_accuracy": 0.8888888888888888,
"eval_loss": 0.4700092077255249,
"eval_runtime": 0.0309,
"eval_samples_per_second": 582.425,
"eval_steps_per_second": 64.714,
"step": 10
},
{
"epoch": 4.0,
"grad_norm": 0.535392701625824,
"learning_rate": 0.00016,
"loss": 0.2834,
"step": 20
},
{
"epoch": 4.0,
"eval_accuracy": 0.8888888888888888,
"eval_loss": 0.3312886953353882,
"eval_runtime": 0.0245,
"eval_samples_per_second": 735.112,
"eval_steps_per_second": 81.679,
"step": 20
},
{
"epoch": 6.0,
"grad_norm": 0.14222599565982819,
"learning_rate": 0.00014,
"loss": 0.0488,
"step": 30
},
{
"epoch": 6.0,
"eval_accuracy": 0.8888888888888888,
"eval_loss": 0.35279521346092224,
"eval_runtime": 0.0265,
"eval_samples_per_second": 678.105,
"eval_steps_per_second": 75.345,
"step": 30
},
{
"epoch": 8.0,
"grad_norm": 0.09212491661310196,
"learning_rate": 0.00012,
"loss": 0.0181,
"step": 40
},
{
"epoch": 8.0,
"eval_accuracy": 0.8888888888888888,
"eval_loss": 0.6354701519012451,
"eval_runtime": 0.0268,
"eval_samples_per_second": 672.847,
"eval_steps_per_second": 74.761,
"step": 40
},
{
"epoch": 10.0,
"grad_norm": 0.05534437298774719,
"learning_rate": 0.0001,
"loss": 0.0079,
"step": 50
},
{
"epoch": 10.0,
"eval_accuracy": 0.8888888888888888,
"eval_loss": 0.6675615310668945,
"eval_runtime": 0.0382,
"eval_samples_per_second": 471.491,
"eval_steps_per_second": 52.388,
"step": 50
},
{
"epoch": 12.0,
"grad_norm": 0.04630253463983536,
"learning_rate": 8e-05,
"loss": 0.0437,
"step": 60
},
{
"epoch": 12.0,
"eval_accuracy": 0.8888888888888888,
"eval_loss": 0.5816599130630493,
"eval_runtime": 0.0247,
"eval_samples_per_second": 728.213,
"eval_steps_per_second": 80.913,
"step": 60
},
{
"epoch": 14.0,
"grad_norm": 0.04637857526540756,
"learning_rate": 6e-05,
"loss": 0.0049,
"step": 70
},
{
"epoch": 14.0,
"eval_accuracy": 0.8888888888888888,
"eval_loss": 0.4498683512210846,
"eval_runtime": 0.0252,
"eval_samples_per_second": 713.378,
"eval_steps_per_second": 79.264,
"step": 70
},
{
"epoch": 16.0,
"grad_norm": 0.1237090453505516,
"learning_rate": 4e-05,
"loss": 0.0192,
"step": 80
},
{
"epoch": 16.0,
"eval_accuracy": 0.8888888888888888,
"eval_loss": 0.516182541847229,
"eval_runtime": 0.0258,
"eval_samples_per_second": 698.146,
"eval_steps_per_second": 77.572,
"step": 80
},
{
"epoch": 18.0,
"grad_norm": 0.03347891941666603,
"learning_rate": 2e-05,
"loss": 0.0045,
"step": 90
},
{
"epoch": 18.0,
"eval_accuracy": 0.8888888888888888,
"eval_loss": 0.5420387983322144,
"eval_runtime": 0.0267,
"eval_samples_per_second": 673.549,
"eval_steps_per_second": 74.839,
"step": 90
},
{
"epoch": 20.0,
"grad_norm": 0.03203904256224632,
"learning_rate": 0.0,
"loss": 0.0042,
"step": 100
},
{
"epoch": 20.0,
"eval_accuracy": 0.8888888888888888,
"eval_loss": 0.5463488101959229,
"eval_runtime": 0.0257,
"eval_samples_per_second": 699.731,
"eval_steps_per_second": 77.748,
"step": 100
},
{
"epoch": 20.0,
"step": 100,
"total_flos": 16662479407056.0,
"train_loss": 0.11865015212446452,
"train_runtime": 23.7587,
"train_samples_per_second": 67.344,
"train_steps_per_second": 4.209
}
],
"logging_steps": 10,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 20,
"total_flos": 16662479407056.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}