AmberYifan commited on
Commit
b35151f
·
verified ·
1 Parent(s): 8d1315e

Training in progress, epoch 3, checkpoint

Browse files
last-checkpoint/global_step939/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a3030aa2ca00f9c85781616cda70781d489297f5cdd296ea718c3e27e6487b8
3
+ size 15231238316
last-checkpoint/global_step939/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db553f5bc98d86e1242587970f72aa4784af7c9801c87ca81cc1ce05b655e58c
3
+ size 15231238316
last-checkpoint/global_step939/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8abdd733236b2c4ca349bbd09f2b0d1f554e7553af46355b8f7490c306fc301
3
+ size 15231238316
last-checkpoint/global_step939/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7607e35b2fcb9200e3ec640d8d90f150f1b46629a061968a2e8524da88f79d89
3
+ size 15231238316
last-checkpoint/global_step939/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99b1a3922c793b0c24d306251b0c1441e9f7a9cce14ecb23e7f75bc3c63f9af2
3
+ size 167957
last-checkpoint/global_step939/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f71281b19ec81ffaa3d5f8398270ac4941b2ac635fe676ef886a45e5c46042cf
3
+ size 167957
last-checkpoint/global_step939/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cbc34819d4db93c1a1169bbdce31406e3960e5dbe9738e8f1fc22a9e244180d
3
+ size 167957
last-checkpoint/global_step939/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38e1230e3716503c60f04ce18fba282c382f9e189ddab60f41472b12e0e7d9c7
3
+ size 167957
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step626
 
1
+ global_step939
last-checkpoint/model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf0755602096c91fe2b4d381c50fdb77abfc190fe513a5831423ea6e84429558
3
  size 4877660776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:184ff6ae2420eec7f5524f54d118353b9f6a6c3a459b2d101e5547cef9ca9483
3
  size 4877660776
last-checkpoint/model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49ee7632cd01e477f147ac9451490851626527180de86e59dea5d2ca13c71496
3
  size 4932751008
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9df66356ef4f00804993ec125ddcc4f4d4813cede56f803c14e5cf74b889ef10
3
  size 4932751008
last-checkpoint/model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8786338b529aa84877a6cd10b21308adff95260cbad8768e2c333525fe0819ab
3
  size 4330865200
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51aea595ce0da91fb422d4789e5b8672a5e22c43c7832b1436fade9edd7fe70a
3
  size 4330865200
last-checkpoint/model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d900f8e740b4ee7074c6b81cb2f66112767340d204eba6637442bb533cfe5e84
3
  size 1089994880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdeb01658dc309d495b6b3c7c0bee75c4166a79b8666a6bdba1a9e0bdf651ee3
3
  size 1089994880
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2307c03867cef25b5028feb9a23f80e784b9af9a615de13ddca560a6a90fb593
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab7cf0d34d60dfda516cf9661904550e2e294e723edd07c25c738f05e8ba92d1
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:50218cfaecdd818354e567b7167c13899e3b42297e7d8f58bd7e732cfa547800
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7b183324e8227a51a9556d86b2ad893a8c4c52205ed4a737356c6611dac7353
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9814a66b49861f5495b06dae3be12ddf7185b88e2cae1fb808ca9efd99d5807f
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac69e994090f4818cb1fa6f6cefa363178552c3c731c6507ff195bcb07fd5bef
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a7be93040a65e0a29975f6c70b94418e1fdf88423a50c58aa572141d3c92fbfc
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f68e61b63402f8afb1f69c960f7944965655dac11e3ccf29919c282f23931f86
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f7725b44c69725321786f1e58dccd7ea4d3ae5794ea47bd49c0d4a139aec266d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe7d17fbcfa5bee1bf6cb5aedab3ff5a70436912c200d7301d173be443809d63
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.0,
5
  "eval_steps": 500,
6
- "global_step": 626,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -984,6 +984,487 @@
984
  "eval_samples_per_second": 14.57,
985
  "eval_steps_per_second": 0.51,
986
  "step": 626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
987
  }
988
  ],
989
  "logging_steps": 10,
@@ -998,7 +1479,7 @@
998
  "should_evaluate": false,
999
  "should_log": false,
1000
  "should_save": true,
1001
- "should_training_stop": false
1002
  },
1003
  "attributes": {}
1004
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
  "eval_steps": 500,
6
+ "global_step": 939,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
984
  "eval_samples_per_second": 14.57,
985
  "eval_steps_per_second": 0.51,
986
  "step": 626
987
+ },
988
+ {
989
+ "epoch": 2.012779552715655,
990
+ "grad_norm": 0.0005327773266414625,
991
+ "learning_rate": 1.8284023668639053e-07,
992
+ "logits/chosen": 0.13671875,
993
+ "logits/rejected": -0.169921875,
994
+ "logps/chosen": -105.0,
995
+ "logps/rejected": -402.0,
996
+ "loss": 0.0,
997
+ "rewards/accuracies": 1.0,
998
+ "rewards/chosen": 1.78125,
999
+ "rewards/margins": 15.5,
1000
+ "rewards/rejected": -13.6875,
1001
+ "step": 630
1002
+ },
1003
+ {
1004
+ "epoch": 2.0447284345047922,
1005
+ "grad_norm": 0.00022759241145421004,
1006
+ "learning_rate": 1.7692307692307693e-07,
1007
+ "logits/chosen": 0.048095703125,
1008
+ "logits/rejected": -0.12060546875,
1009
+ "logps/chosen": -99.5,
1010
+ "logps/rejected": -400.0,
1011
+ "loss": 0.0,
1012
+ "rewards/accuracies": 1.0,
1013
+ "rewards/chosen": 1.84375,
1014
+ "rewards/margins": 15.625,
1015
+ "rewards/rejected": -13.75,
1016
+ "step": 640
1017
+ },
1018
+ {
1019
+ "epoch": 2.07667731629393,
1020
+ "grad_norm": 0.001652160843436512,
1021
+ "learning_rate": 1.710059171597633e-07,
1022
+ "logits/chosen": -0.00069427490234375,
1023
+ "logits/rejected": -0.162109375,
1024
+ "logps/chosen": -101.5,
1025
+ "logps/rejected": -398.0,
1026
+ "loss": 0.0001,
1027
+ "rewards/accuracies": 1.0,
1028
+ "rewards/chosen": 1.78125,
1029
+ "rewards/margins": 15.625,
1030
+ "rewards/rejected": -13.875,
1031
+ "step": 650
1032
+ },
1033
+ {
1034
+ "epoch": 2.108626198083067,
1035
+ "grad_norm": 0.0016687636602714445,
1036
+ "learning_rate": 1.650887573964497e-07,
1037
+ "logits/chosen": 0.04638671875,
1038
+ "logits/rejected": -0.1396484375,
1039
+ "logps/chosen": -95.5,
1040
+ "logps/rejected": -400.0,
1041
+ "loss": 0.0,
1042
+ "rewards/accuracies": 1.0,
1043
+ "rewards/chosen": 1.796875,
1044
+ "rewards/margins": 15.4375,
1045
+ "rewards/rejected": -13.625,
1046
+ "step": 660
1047
+ },
1048
+ {
1049
+ "epoch": 2.1405750798722045,
1050
+ "grad_norm": 0.00042481779313726915,
1051
+ "learning_rate": 1.591715976331361e-07,
1052
+ "logits/chosen": -0.0157470703125,
1053
+ "logits/rejected": -0.146484375,
1054
+ "logps/chosen": -96.5,
1055
+ "logps/rejected": -400.0,
1056
+ "loss": 0.0,
1057
+ "rewards/accuracies": 1.0,
1058
+ "rewards/chosen": 1.796875,
1059
+ "rewards/margins": 15.875,
1060
+ "rewards/rejected": -14.0625,
1061
+ "step": 670
1062
+ },
1063
+ {
1064
+ "epoch": 2.1725239616613417,
1065
+ "grad_norm": 0.0003239142196186926,
1066
+ "learning_rate": 1.5325443786982248e-07,
1067
+ "logits/chosen": 0.1669921875,
1068
+ "logits/rejected": -0.11376953125,
1069
+ "logps/chosen": -96.5,
1070
+ "logps/rejected": -404.0,
1071
+ "loss": 0.0,
1072
+ "rewards/accuracies": 1.0,
1073
+ "rewards/chosen": 1.7109375,
1074
+ "rewards/margins": 15.8125,
1075
+ "rewards/rejected": -14.0625,
1076
+ "step": 680
1077
+ },
1078
+ {
1079
+ "epoch": 2.2044728434504792,
1080
+ "grad_norm": 0.00015234963642246293,
1081
+ "learning_rate": 1.4733727810650885e-07,
1082
+ "logits/chosen": -0.0703125,
1083
+ "logits/rejected": -0.185546875,
1084
+ "logps/chosen": -96.5,
1085
+ "logps/rejected": -398.0,
1086
+ "loss": 0.0,
1087
+ "rewards/accuracies": 1.0,
1088
+ "rewards/chosen": 1.8828125,
1089
+ "rewards/margins": 15.5,
1090
+ "rewards/rejected": -13.625,
1091
+ "step": 690
1092
+ },
1093
+ {
1094
+ "epoch": 2.236421725239617,
1095
+ "grad_norm": 0.0008997814309118761,
1096
+ "learning_rate": 1.4142011834319526e-07,
1097
+ "logits/chosen": -0.0380859375,
1098
+ "logits/rejected": -0.12890625,
1099
+ "logps/chosen": -105.5,
1100
+ "logps/rejected": -402.0,
1101
+ "loss": 0.0,
1102
+ "rewards/accuracies": 1.0,
1103
+ "rewards/chosen": 1.7734375,
1104
+ "rewards/margins": 15.375,
1105
+ "rewards/rejected": -13.5625,
1106
+ "step": 700
1107
+ },
1108
+ {
1109
+ "epoch": 2.268370607028754,
1110
+ "grad_norm": 0.0003177375115825633,
1111
+ "learning_rate": 1.3550295857988164e-07,
1112
+ "logits/chosen": -0.10498046875,
1113
+ "logits/rejected": -0.130859375,
1114
+ "logps/chosen": -101.0,
1115
+ "logps/rejected": -398.0,
1116
+ "loss": 0.0,
1117
+ "rewards/accuracies": 1.0,
1118
+ "rewards/chosen": 1.875,
1119
+ "rewards/margins": 15.75,
1120
+ "rewards/rejected": -13.875,
1121
+ "step": 710
1122
+ },
1123
+ {
1124
+ "epoch": 2.3003194888178915,
1125
+ "grad_norm": 0.0007482430969870445,
1126
+ "learning_rate": 1.2958579881656802e-07,
1127
+ "logits/chosen": 0.0927734375,
1128
+ "logits/rejected": -0.1328125,
1129
+ "logps/chosen": -106.5,
1130
+ "logps/rejected": -406.0,
1131
+ "loss": 0.0,
1132
+ "rewards/accuracies": 1.0,
1133
+ "rewards/chosen": 1.765625,
1134
+ "rewards/margins": 15.6875,
1135
+ "rewards/rejected": -13.9375,
1136
+ "step": 720
1137
+ },
1138
+ {
1139
+ "epoch": 2.3322683706070286,
1140
+ "grad_norm": 0.00010184283390916899,
1141
+ "learning_rate": 1.2366863905325443e-07,
1142
+ "logits/chosen": 0.1552734375,
1143
+ "logits/rejected": -0.05908203125,
1144
+ "logps/chosen": -95.5,
1145
+ "logps/rejected": -412.0,
1146
+ "loss": 0.0,
1147
+ "rewards/accuracies": 1.0,
1148
+ "rewards/chosen": 1.921875,
1149
+ "rewards/margins": 16.5,
1150
+ "rewards/rejected": -14.625,
1151
+ "step": 730
1152
+ },
1153
+ {
1154
+ "epoch": 2.364217252396166,
1155
+ "grad_norm": 0.0008696203319851818,
1156
+ "learning_rate": 1.1775147928994082e-07,
1157
+ "logits/chosen": -0.0260009765625,
1158
+ "logits/rejected": -0.23828125,
1159
+ "logps/chosen": -104.5,
1160
+ "logps/rejected": -398.0,
1161
+ "loss": 0.0,
1162
+ "rewards/accuracies": 1.0,
1163
+ "rewards/chosen": 1.8359375,
1164
+ "rewards/margins": 15.6875,
1165
+ "rewards/rejected": -13.8125,
1166
+ "step": 740
1167
+ },
1168
+ {
1169
+ "epoch": 2.3961661341853033,
1170
+ "grad_norm": 0.0005906389744401823,
1171
+ "learning_rate": 1.1183431952662721e-07,
1172
+ "logits/chosen": -0.019287109375,
1173
+ "logits/rejected": -0.1416015625,
1174
+ "logps/chosen": -108.0,
1175
+ "logps/rejected": -404.0,
1176
+ "loss": 0.0,
1177
+ "rewards/accuracies": 1.0,
1178
+ "rewards/chosen": 1.7890625,
1179
+ "rewards/margins": 15.8125,
1180
+ "rewards/rejected": -14.0625,
1181
+ "step": 750
1182
+ },
1183
+ {
1184
+ "epoch": 2.428115015974441,
1185
+ "grad_norm": 0.0009197680517853092,
1186
+ "learning_rate": 1.059171597633136e-07,
1187
+ "logits/chosen": 0.029052734375,
1188
+ "logits/rejected": -0.193359375,
1189
+ "logps/chosen": -97.0,
1190
+ "logps/rejected": -400.0,
1191
+ "loss": 0.0,
1192
+ "rewards/accuracies": 1.0,
1193
+ "rewards/chosen": 1.8046875,
1194
+ "rewards/margins": 15.8125,
1195
+ "rewards/rejected": -14.0,
1196
+ "step": 760
1197
+ },
1198
+ {
1199
+ "epoch": 2.460063897763578,
1200
+ "grad_norm": 0.01155545960955193,
1201
+ "learning_rate": 1e-07,
1202
+ "logits/chosen": 0.1630859375,
1203
+ "logits/rejected": -0.1318359375,
1204
+ "logps/chosen": -82.0,
1205
+ "logps/rejected": -412.0,
1206
+ "loss": 0.0,
1207
+ "rewards/accuracies": 1.0,
1208
+ "rewards/chosen": 1.859375,
1209
+ "rewards/margins": 16.25,
1210
+ "rewards/rejected": -14.4375,
1211
+ "step": 770
1212
+ },
1213
+ {
1214
+ "epoch": 2.4920127795527156,
1215
+ "grad_norm": 0.0001793365966728007,
1216
+ "learning_rate": 9.408284023668639e-08,
1217
+ "logits/chosen": 0.140625,
1218
+ "logits/rejected": -0.1416015625,
1219
+ "logps/chosen": -99.0,
1220
+ "logps/rejected": -408.0,
1221
+ "loss": 0.0,
1222
+ "rewards/accuracies": 1.0,
1223
+ "rewards/chosen": 1.765625,
1224
+ "rewards/margins": 16.375,
1225
+ "rewards/rejected": -14.6875,
1226
+ "step": 780
1227
+ },
1228
+ {
1229
+ "epoch": 2.523961661341853,
1230
+ "grad_norm": 0.0035154170380901225,
1231
+ "learning_rate": 8.816568047337278e-08,
1232
+ "logits/chosen": 0.02880859375,
1233
+ "logits/rejected": -0.076171875,
1234
+ "logps/chosen": -95.0,
1235
+ "logps/rejected": -402.0,
1236
+ "loss": 0.0,
1237
+ "rewards/accuracies": 1.0,
1238
+ "rewards/chosen": 2.015625,
1239
+ "rewards/margins": 15.6875,
1240
+ "rewards/rejected": -13.6875,
1241
+ "step": 790
1242
+ },
1243
+ {
1244
+ "epoch": 2.5559105431309903,
1245
+ "grad_norm": 0.0006999365033696467,
1246
+ "learning_rate": 8.224852071005916e-08,
1247
+ "logits/chosen": -0.287109375,
1248
+ "logits/rejected": -0.1630859375,
1249
+ "logps/chosen": -111.0,
1250
+ "logps/rejected": -400.0,
1251
+ "loss": 0.0,
1252
+ "rewards/accuracies": 1.0,
1253
+ "rewards/chosen": 1.8828125,
1254
+ "rewards/margins": 15.875,
1255
+ "rewards/rejected": -14.0,
1256
+ "step": 800
1257
+ },
1258
+ {
1259
+ "epoch": 2.587859424920128,
1260
+ "grad_norm": 0.00043385978100053777,
1261
+ "learning_rate": 7.633136094674555e-08,
1262
+ "logits/chosen": 0.0234375,
1263
+ "logits/rejected": -0.1416015625,
1264
+ "logps/chosen": -94.0,
1265
+ "logps/rejected": -408.0,
1266
+ "loss": 0.0,
1267
+ "rewards/accuracies": 1.0,
1268
+ "rewards/chosen": 1.828125,
1269
+ "rewards/margins": 16.5,
1270
+ "rewards/rejected": -14.6875,
1271
+ "step": 810
1272
+ },
1273
+ {
1274
+ "epoch": 2.619808306709265,
1275
+ "grad_norm": 0.00028654137725038975,
1276
+ "learning_rate": 7.041420118343195e-08,
1277
+ "logits/chosen": -0.0263671875,
1278
+ "logits/rejected": -0.171875,
1279
+ "logps/chosen": -102.0,
1280
+ "logps/rejected": -408.0,
1281
+ "loss": 0.0,
1282
+ "rewards/accuracies": 1.0,
1283
+ "rewards/chosen": 1.984375,
1284
+ "rewards/margins": 16.375,
1285
+ "rewards/rejected": -14.4375,
1286
+ "step": 820
1287
+ },
1288
+ {
1289
+ "epoch": 2.6517571884984026,
1290
+ "grad_norm": 0.010220233121677299,
1291
+ "learning_rate": 6.449704142011835e-08,
1292
+ "logits/chosen": -0.038330078125,
1293
+ "logits/rejected": -0.04638671875,
1294
+ "logps/chosen": -103.5,
1295
+ "logps/rejected": -410.0,
1296
+ "loss": 0.0,
1297
+ "rewards/accuracies": 1.0,
1298
+ "rewards/chosen": 1.9140625,
1299
+ "rewards/margins": 16.5,
1300
+ "rewards/rejected": -14.625,
1301
+ "step": 830
1302
+ },
1303
+ {
1304
+ "epoch": 2.68370607028754,
1305
+ "grad_norm": 0.012838197327440185,
1306
+ "learning_rate": 5.857988165680473e-08,
1307
+ "logits/chosen": 0.0185546875,
1308
+ "logits/rejected": -0.130859375,
1309
+ "logps/chosen": -95.0,
1310
+ "logps/rejected": -412.0,
1311
+ "loss": 0.0,
1312
+ "rewards/accuracies": 1.0,
1313
+ "rewards/chosen": 2.03125,
1314
+ "rewards/margins": 16.625,
1315
+ "rewards/rejected": -14.625,
1316
+ "step": 840
1317
+ },
1318
+ {
1319
+ "epoch": 2.7156549520766773,
1320
+ "grad_norm": 0.00029967980239553816,
1321
+ "learning_rate": 5.266272189349112e-08,
1322
+ "logits/chosen": 0.08447265625,
1323
+ "logits/rejected": -0.1015625,
1324
+ "logps/chosen": -102.0,
1325
+ "logps/rejected": -404.0,
1326
+ "loss": 0.0,
1327
+ "rewards/accuracies": 1.0,
1328
+ "rewards/chosen": 1.9296875,
1329
+ "rewards/margins": 15.9375,
1330
+ "rewards/rejected": -14.0,
1331
+ "step": 850
1332
+ },
1333
+ {
1334
+ "epoch": 2.747603833865815,
1335
+ "grad_norm": 0.0002762678724235396,
1336
+ "learning_rate": 4.674556213017751e-08,
1337
+ "logits/chosen": 0.048828125,
1338
+ "logits/rejected": -0.1591796875,
1339
+ "logps/chosen": -100.0,
1340
+ "logps/rejected": -404.0,
1341
+ "loss": 0.0,
1342
+ "rewards/accuracies": 1.0,
1343
+ "rewards/chosen": 1.890625,
1344
+ "rewards/margins": 16.0,
1345
+ "rewards/rejected": -14.0625,
1346
+ "step": 860
1347
+ },
1348
+ {
1349
+ "epoch": 2.779552715654952,
1350
+ "grad_norm": 0.00012800387502098561,
1351
+ "learning_rate": 4.082840236686391e-08,
1352
+ "logits/chosen": 0.16796875,
1353
+ "logits/rejected": -0.0634765625,
1354
+ "logps/chosen": -108.5,
1355
+ "logps/rejected": -418.0,
1356
+ "loss": 0.0,
1357
+ "rewards/accuracies": 1.0,
1358
+ "rewards/chosen": 1.8984375,
1359
+ "rewards/margins": 16.625,
1360
+ "rewards/rejected": -14.75,
1361
+ "step": 870
1362
+ },
1363
+ {
1364
+ "epoch": 2.8115015974440896,
1365
+ "grad_norm": 0.005831428455641523,
1366
+ "learning_rate": 3.4911242603550294e-08,
1367
+ "logits/chosen": 0.0576171875,
1368
+ "logits/rejected": -0.08837890625,
1369
+ "logps/chosen": -87.5,
1370
+ "logps/rejected": -410.0,
1371
+ "loss": 0.0,
1372
+ "rewards/accuracies": 1.0,
1373
+ "rewards/chosen": 1.9375,
1374
+ "rewards/margins": 16.375,
1375
+ "rewards/rejected": -14.375,
1376
+ "step": 880
1377
+ },
1378
+ {
1379
+ "epoch": 2.8434504792332267,
1380
+ "grad_norm": 0.00012405404139800464,
1381
+ "learning_rate": 2.8994082840236687e-08,
1382
+ "logits/chosen": -0.045166015625,
1383
+ "logits/rejected": -0.041259765625,
1384
+ "logps/chosen": -108.0,
1385
+ "logps/rejected": -404.0,
1386
+ "loss": 0.0,
1387
+ "rewards/accuracies": 1.0,
1388
+ "rewards/chosen": 1.9765625,
1389
+ "rewards/margins": 16.125,
1390
+ "rewards/rejected": -14.125,
1391
+ "step": 890
1392
+ },
1393
+ {
1394
+ "epoch": 2.8753993610223643,
1395
+ "grad_norm": 0.00014955962263616494,
1396
+ "learning_rate": 2.3076923076923076e-08,
1397
+ "logits/chosen": -0.05517578125,
1398
+ "logits/rejected": -0.171875,
1399
+ "logps/chosen": -101.5,
1400
+ "logps/rejected": -400.0,
1401
+ "loss": 0.0,
1402
+ "rewards/accuracies": 1.0,
1403
+ "rewards/chosen": 1.9296875,
1404
+ "rewards/margins": 15.875,
1405
+ "rewards/rejected": -13.9375,
1406
+ "step": 900
1407
+ },
1408
+ {
1409
+ "epoch": 2.9073482428115014,
1410
+ "grad_norm": 9.318676138250297e-05,
1411
+ "learning_rate": 1.7159763313609465e-08,
1412
+ "logits/chosen": 0.00396728515625,
1413
+ "logits/rejected": -0.0693359375,
1414
+ "logps/chosen": -114.5,
1415
+ "logps/rejected": -412.0,
1416
+ "loss": 0.0,
1417
+ "rewards/accuracies": 1.0,
1418
+ "rewards/chosen": 1.8125,
1419
+ "rewards/margins": 16.375,
1420
+ "rewards/rejected": -14.5625,
1421
+ "step": 910
1422
+ },
1423
+ {
1424
+ "epoch": 2.939297124600639,
1425
+ "grad_norm": 0.009008026329567341,
1426
+ "learning_rate": 1.1242603550295858e-08,
1427
+ "logits/chosen": 0.1337890625,
1428
+ "logits/rejected": -0.123046875,
1429
+ "logps/chosen": -97.0,
1430
+ "logps/rejected": -406.0,
1431
+ "loss": 0.0,
1432
+ "rewards/accuracies": 1.0,
1433
+ "rewards/chosen": 1.890625,
1434
+ "rewards/margins": 16.125,
1435
+ "rewards/rejected": -14.25,
1436
+ "step": 920
1437
+ },
1438
+ {
1439
+ "epoch": 2.9712460063897765,
1440
+ "grad_norm": 0.00016004830074262195,
1441
+ "learning_rate": 5.325443786982248e-09,
1442
+ "logits/chosen": -0.08251953125,
1443
+ "logits/rejected": -0.16015625,
1444
+ "logps/chosen": -99.0,
1445
+ "logps/rejected": -406.0,
1446
+ "loss": 0.0,
1447
+ "rewards/accuracies": 1.0,
1448
+ "rewards/chosen": 1.8828125,
1449
+ "rewards/margins": 16.25,
1450
+ "rewards/rejected": -14.3125,
1451
+ "step": 930
1452
+ },
1453
+ {
1454
+ "epoch": 3.0,
1455
+ "eval_logits/chosen": 0.193359375,
1456
+ "eval_logits/rejected": 0.004974365234375,
1457
+ "eval_logps/chosen": -82.0,
1458
+ "eval_logps/rejected": -406.0,
1459
+ "eval_loss": 1.4230236047296785e-06,
1460
+ "eval_rewards/accuracies": 1.0,
1461
+ "eval_rewards/chosen": 1.9609375,
1462
+ "eval_rewards/margins": 16.375,
1463
+ "eval_rewards/rejected": -14.375,
1464
+ "eval_runtime": 16.4141,
1465
+ "eval_samples_per_second": 12.185,
1466
+ "eval_steps_per_second": 0.426,
1467
+ "step": 939
1468
  }
1469
  ],
1470
  "logging_steps": 10,
 
1479
  "should_evaluate": false,
1480
  "should_log": false,
1481
  "should_save": true,
1482
+ "should_training_stop": true
1483
  },
1484
  "attributes": {}
1485
  }