RoyJoy commited on
Commit
7c994e3
·
verified ·
1 Parent(s): 6a90752

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:127b1b7fcb9b574ff4282ba4d59294ba071d97a3909df7c979eb010fcd9b452c
3
  size 97307544
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:344672b76a6eee910620b479fe729ea819e037970416a2da9c1f480b695cc16c
3
  size 97307544
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c260969f6009d482216eacd547de9c0949176b00ff45ca00f6087c08134401c
3
  size 194840426
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2f7b880d09d0c3e663dd277d43931934dd5cae2c72a1d71171a7d9c4cdb79e1
3
  size 194840426
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8fd7e7e2b391fa17b4b3f03a5df7af4b83f05a92f4159d8c450dccd8a2bebdf7
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f040d6a8c201abcf3917e0a17887621ab7f00285dd9628429c57382ef120ecc
3
  size 14960
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:812fff631316bd9fe72a9f6ce9e49bd30575ca32f35699b84a2d6bc9385b5a2d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d96455b0fad3098637e80e0d67d418f96909245c6b651f9a052ab7d0d6a61b37
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fa16ed0829b2165a3e10691c48e4da082bf5bffb3ea8bc98a99ab7a25467149d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8634313385285fd8196210c6d605ebf00c471ef08042ed77baf52e7f3ee383f
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aaebebb482b02cbfec23585c713fba36dfd6d9523e09040459f6e3f1dd1667f6
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45d012378b6142add5604e3eaceff1e6b4f62ab3610e3a42446351ba5865dcab
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:83a4f28ad595f3787795cfa4023a55d79a0c7173d55330463a0fe673589854e9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9638e97525c935b52387c043e18f68d18c825b65e2c95cc7a3dc46bbe55a1e3b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.1133440732955933,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.04612971676353907,
5
  "eval_steps": 25,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -381,6 +381,372 @@
381
  "eval_samples_per_second": 82.801,
382
  "eval_steps_per_second": 8.28,
383
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
  }
385
  ],
386
  "logging_steps": 1,
@@ -409,7 +775,7 @@
409
  "attributes": {}
410
  }
411
  },
412
- "total_flos": 2.0992764116205568e+17,
413
  "train_batch_size": 3,
414
  "trial_name": null,
415
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.0208970308303833,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 0.09225943352707815,
5
  "eval_steps": 25,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
381
  "eval_samples_per_second": 82.801,
382
  "eval_steps_per_second": 8.28,
383
  "step": 50
384
+ },
385
+ {
386
+ "epoch": 0.047052311098809856,
387
+ "grad_norm": 0.5303361415863037,
388
+ "learning_rate": 0.00019380934412829232,
389
+ "loss": 1.1873,
390
+ "step": 51
391
+ },
392
+ {
393
+ "epoch": 0.04797490543408064,
394
+ "grad_norm": 0.717869222164154,
395
+ "learning_rate": 0.00019348327417301517,
396
+ "loss": 1.2218,
397
+ "step": 52
398
+ },
399
+ {
400
+ "epoch": 0.04889749976935141,
401
+ "grad_norm": 0.6033234000205994,
402
+ "learning_rate": 0.00019314915792601581,
403
+ "loss": 1.2588,
404
+ "step": 53
405
+ },
406
+ {
407
+ "epoch": 0.049820094104622195,
408
+ "grad_norm": 0.5174582004547119,
409
+ "learning_rate": 0.00019280702759011354,
410
+ "loss": 1.2331,
411
+ "step": 54
412
+ },
413
+ {
414
+ "epoch": 0.05074268843989298,
415
+ "grad_norm": 0.4686112105846405,
416
+ "learning_rate": 0.00019245691614054226,
417
+ "loss": 1.1302,
418
+ "step": 55
419
+ },
420
+ {
421
+ "epoch": 0.05166528277516376,
422
+ "grad_norm": 0.4974168539047241,
423
+ "learning_rate": 0.0001920988573217721,
424
+ "loss": 1.1626,
425
+ "step": 56
426
+ },
427
+ {
428
+ "epoch": 0.05258787711043454,
429
+ "grad_norm": 0.5215023756027222,
430
+ "learning_rate": 0.00019173288564425733,
431
+ "loss": 1.1399,
432
+ "step": 57
433
+ },
434
+ {
435
+ "epoch": 0.053510471445705324,
436
+ "grad_norm": 0.49643176794052124,
437
+ "learning_rate": 0.00019135903638110993,
438
+ "loss": 1.1402,
439
+ "step": 58
440
+ },
441
+ {
442
+ "epoch": 0.054433065780976106,
443
+ "grad_norm": 0.5159599184989929,
444
+ "learning_rate": 0.00019097734556469995,
445
+ "loss": 1.0672,
446
+ "step": 59
447
+ },
448
+ {
449
+ "epoch": 0.05535566011624689,
450
+ "grad_norm": 0.5096753239631653,
451
+ "learning_rate": 0.00019058784998318273,
452
+ "loss": 1.0373,
453
+ "step": 60
454
+ },
455
+ {
456
+ "epoch": 0.05627825445151767,
457
+ "grad_norm": 0.5594645142555237,
458
+ "learning_rate": 0.0001901905871769531,
459
+ "loss": 1.0682,
460
+ "step": 61
461
+ },
462
+ {
463
+ "epoch": 0.05720084878678845,
464
+ "grad_norm": 0.8526697158813477,
465
+ "learning_rate": 0.0001897855954350272,
466
+ "loss": 0.8273,
467
+ "step": 62
468
+ },
469
+ {
470
+ "epoch": 0.05812344312205923,
471
+ "grad_norm": 0.5586766004562378,
472
+ "learning_rate": 0.00018937291379135196,
473
+ "loss": 1.1242,
474
+ "step": 63
475
+ },
476
+ {
477
+ "epoch": 0.05904603745733001,
478
+ "grad_norm": 0.5625215172767639,
479
+ "learning_rate": 0.0001889525820210432,
480
+ "loss": 1.3812,
481
+ "step": 64
482
+ },
483
+ {
484
+ "epoch": 0.05996863179260079,
485
+ "grad_norm": 0.46945062279701233,
486
+ "learning_rate": 0.00018852464063655176,
487
+ "loss": 1.2655,
488
+ "step": 65
489
+ },
490
+ {
491
+ "epoch": 0.060891226127871574,
492
+ "grad_norm": 0.49760475754737854,
493
+ "learning_rate": 0.000188089130883759,
494
+ "loss": 1.1365,
495
+ "step": 66
496
+ },
497
+ {
498
+ "epoch": 0.061813820463142356,
499
+ "grad_norm": 0.47834065556526184,
500
+ "learning_rate": 0.00018764609473800127,
501
+ "loss": 1.1662,
502
+ "step": 67
503
+ },
504
+ {
505
+ "epoch": 0.06273641479841313,
506
+ "grad_norm": 0.508231520652771,
507
+ "learning_rate": 0.0001871955749000245,
508
+ "loss": 1.1352,
509
+ "step": 68
510
+ },
511
+ {
512
+ "epoch": 0.06365900913368391,
513
+ "grad_norm": 0.5424789786338806,
514
+ "learning_rate": 0.00018673761479186832,
515
+ "loss": 1.1534,
516
+ "step": 69
517
+ },
518
+ {
519
+ "epoch": 0.0645816034689547,
520
+ "grad_norm": 0.546637773513794,
521
+ "learning_rate": 0.0001862722585526811,
522
+ "loss": 1.1505,
523
+ "step": 70
524
+ },
525
+ {
526
+ "epoch": 0.06550419780422548,
527
+ "grad_norm": 0.5321767330169678,
528
+ "learning_rate": 0.0001857995510344658,
529
+ "loss": 1.0851,
530
+ "step": 71
531
+ },
532
+ {
533
+ "epoch": 0.06642679213949626,
534
+ "grad_norm": 0.5386099219322205,
535
+ "learning_rate": 0.00018531953779775683,
536
+ "loss": 1.0585,
537
+ "step": 72
538
+ },
539
+ {
540
+ "epoch": 0.06734938647476704,
541
+ "grad_norm": 0.5138699412345886,
542
+ "learning_rate": 0.0001848322651072291,
543
+ "loss": 1.0444,
544
+ "step": 73
545
+ },
546
+ {
547
+ "epoch": 0.06827198081003782,
548
+ "grad_norm": 0.6210483312606812,
549
+ "learning_rate": 0.0001843377799272386,
550
+ "loss": 0.9183,
551
+ "step": 74
552
+ },
553
+ {
554
+ "epoch": 0.0691945751453086,
555
+ "grad_norm": 0.6338362097740173,
556
+ "learning_rate": 0.00018383612991729623,
557
+ "loss": 0.2692,
558
+ "step": 75
559
+ },
560
+ {
561
+ "epoch": 0.0691945751453086,
562
+ "eval_loss": 1.0709586143493652,
563
+ "eval_runtime": 0.6044,
564
+ "eval_samples_per_second": 82.723,
565
+ "eval_steps_per_second": 8.272,
566
+ "step": 75
567
+ },
568
+ {
569
+ "epoch": 0.07011716948057939,
570
+ "grad_norm": 0.4989747405052185,
571
+ "learning_rate": 0.00018332736342747404,
572
+ "loss": 1.2276,
573
+ "step": 76
574
+ },
575
+ {
576
+ "epoch": 0.07103976381585017,
577
+ "grad_norm": 0.5539114475250244,
578
+ "learning_rate": 0.00018281152949374527,
579
+ "loss": 1.2626,
580
+ "step": 77
581
+ },
582
+ {
583
+ "epoch": 0.07196235815112095,
584
+ "grad_norm": 0.45691347122192383,
585
+ "learning_rate": 0.00018228867783325804,
586
+ "loss": 1.2259,
587
+ "step": 78
588
+ },
589
+ {
590
+ "epoch": 0.07288495248639174,
591
+ "grad_norm": 0.48130282759666443,
592
+ "learning_rate": 0.00018175885883954364,
593
+ "loss": 1.1878,
594
+ "step": 79
595
+ },
596
+ {
597
+ "epoch": 0.07380754682166252,
598
+ "grad_norm": 0.4967883229255676,
599
+ "learning_rate": 0.00018122212357765945,
600
+ "loss": 1.1519,
601
+ "step": 80
602
+ },
603
+ {
604
+ "epoch": 0.0747301411569333,
605
+ "grad_norm": 0.49831900000572205,
606
+ "learning_rate": 0.00018067852377926704,
607
+ "loss": 1.1397,
608
+ "step": 81
609
+ },
610
+ {
611
+ "epoch": 0.07565273549220408,
612
+ "grad_norm": 0.5220170021057129,
613
+ "learning_rate": 0.0001801281118376465,
614
+ "loss": 1.0622,
615
+ "step": 82
616
+ },
617
+ {
618
+ "epoch": 0.07657532982747486,
619
+ "grad_norm": 0.5449658632278442,
620
+ "learning_rate": 0.00017957094080264634,
621
+ "loss": 1.0355,
622
+ "step": 83
623
+ },
624
+ {
625
+ "epoch": 0.07749792416274565,
626
+ "grad_norm": 0.6329752206802368,
627
+ "learning_rate": 0.00017900706437557054,
628
+ "loss": 1.0584,
629
+ "step": 84
630
+ },
631
+ {
632
+ "epoch": 0.07842051849801643,
633
+ "grad_norm": 0.6001924276351929,
634
+ "learning_rate": 0.00017843653690400278,
635
+ "loss": 1.022,
636
+ "step": 85
637
+ },
638
+ {
639
+ "epoch": 0.07934311283328721,
640
+ "grad_norm": 0.5846312046051025,
641
+ "learning_rate": 0.00017785941337656827,
642
+ "loss": 0.9959,
643
+ "step": 86
644
+ },
645
+ {
646
+ "epoch": 0.08026570716855798,
647
+ "grad_norm": 0.6020424365997314,
648
+ "learning_rate": 0.00017727574941763373,
649
+ "loss": 0.4521,
650
+ "step": 87
651
+ },
652
+ {
653
+ "epoch": 0.08118830150382876,
654
+ "grad_norm": 0.43142932653427124,
655
+ "learning_rate": 0.00017668560128194635,
656
+ "loss": 1.1681,
657
+ "step": 88
658
+ },
659
+ {
660
+ "epoch": 0.08211089583909954,
661
+ "grad_norm": 0.660566508769989,
662
+ "learning_rate": 0.0001760890258492117,
663
+ "loss": 1.3461,
664
+ "step": 89
665
+ },
666
+ {
667
+ "epoch": 0.08303349017437033,
668
+ "grad_norm": 0.5984129309654236,
669
+ "learning_rate": 0.00017548608061861176,
670
+ "loss": 1.2538,
671
+ "step": 90
672
+ },
673
+ {
674
+ "epoch": 0.08395608450964111,
675
+ "grad_norm": 0.5732012391090393,
676
+ "learning_rate": 0.0001748768237032627,
677
+ "loss": 1.2725,
678
+ "step": 91
679
+ },
680
+ {
681
+ "epoch": 0.08487867884491189,
682
+ "grad_norm": 0.5434936881065369,
683
+ "learning_rate": 0.00017426131382461415,
684
+ "loss": 1.0838,
685
+ "step": 92
686
+ },
687
+ {
688
+ "epoch": 0.08580127318018267,
689
+ "grad_norm": 0.4646851718425751,
690
+ "learning_rate": 0.00017363961030678927,
691
+ "loss": 1.0811,
692
+ "step": 93
693
+ },
694
+ {
695
+ "epoch": 0.08672386751545345,
696
+ "grad_norm": 0.48649483919143677,
697
+ "learning_rate": 0.00017301177307086713,
698
+ "loss": 1.0298,
699
+ "step": 94
700
+ },
701
+ {
702
+ "epoch": 0.08764646185072424,
703
+ "grad_norm": 0.5579357147216797,
704
+ "learning_rate": 0.00017237786262910726,
705
+ "loss": 1.0374,
706
+ "step": 95
707
+ },
708
+ {
709
+ "epoch": 0.08856905618599502,
710
+ "grad_norm": 0.612695574760437,
711
+ "learning_rate": 0.00017173794007911735,
712
+ "loss": 1.0592,
713
+ "step": 96
714
+ },
715
+ {
716
+ "epoch": 0.0894916505212658,
717
+ "grad_norm": 0.5733603239059448,
718
+ "learning_rate": 0.00017109206709796477,
719
+ "loss": 1.0191,
720
+ "step": 97
721
+ },
722
+ {
723
+ "epoch": 0.09041424485653658,
724
+ "grad_norm": 0.6126962304115295,
725
+ "learning_rate": 0.00017044030593623167,
726
+ "loss": 1.0036,
727
+ "step": 98
728
+ },
729
+ {
730
+ "epoch": 0.09133683919180736,
731
+ "grad_norm": 0.6726813912391663,
732
+ "learning_rate": 0.00016978271941201536,
733
+ "loss": 0.8152,
734
+ "step": 99
735
+ },
736
+ {
737
+ "epoch": 0.09225943352707815,
738
+ "grad_norm": 0.40060344338417053,
739
+ "learning_rate": 0.00016911937090487373,
740
+ "loss": 0.0465,
741
+ "step": 100
742
+ },
743
+ {
744
+ "epoch": 0.09225943352707815,
745
+ "eval_loss": 1.0208970308303833,
746
+ "eval_runtime": 0.6034,
747
+ "eval_samples_per_second": 82.859,
748
+ "eval_steps_per_second": 8.286,
749
+ "step": 100
750
  }
751
  ],
752
  "logging_steps": 1,
 
775
  "attributes": {}
776
  }
777
  },
778
+ "total_flos": 4.1985528232411136e+17,
779
  "train_batch_size": 3,
780
  "trial_name": null,
781
  "trial_params": null