zzhang1987 commited on
Commit
11cb349
·
verified ·
1 Parent(s): 397c8c6

Model save

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. all_results.json +4 -4
  3. train_results.json +4 -4
  4. trainer_state.json +255 -255
README.md CHANGED
@@ -27,7 +27,7 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/causalai/huggingface/runs/shfyq9d4)
31
 
32
 
33
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/causalai/huggingface/runs/mth6kytm)
31
 
32
 
33
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 0.010119893048938951,
4
- "train_runtime": 11252.7773,
5
  "train_samples": 17056,
6
- "train_samples_per_second": 1.516,
7
- "train_steps_per_second": 0.012
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 0.11666255951964093,
4
+ "train_runtime": 146747.14,
5
  "train_samples": 17056,
6
+ "train_samples_per_second": 0.116,
7
+ "train_steps_per_second": 0.001
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "total_flos": 0.0,
3
- "train_loss": 0.010119893048938951,
4
- "train_runtime": 11252.7773,
5
  "train_samples": 17056,
6
- "train_samples_per_second": 1.516,
7
- "train_steps_per_second": 0.012
8
  }
 
1
  {
2
  "total_flos": 0.0,
3
+ "train_loss": 0.11666255951964093,
4
+ "train_runtime": 146747.14,
5
  "train_samples": 17056,
6
+ "train_samples_per_second": 0.116,
7
+ "train_steps_per_second": 0.001
8
  }
trainer_state.json CHANGED
@@ -9,399 +9,399 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "completion_length": 35.1837890625,
13
  "epoch": 0.0375234521575985,
14
- "grad_norm": 2.402863025665283,
15
- "kl": 0.011089268326759338,
16
  "learning_rate": 7.1428571428571436e-06,
17
- "loss": 0.0032,
18
- "reward": 0.037890625,
19
- "reward_std": 0.08385129463858902,
20
  "rewards/accuracy_reward": 0.0158203125,
21
- "rewards/format_reward": 0.0046875,
22
- "rewards/relaxed_accuracy_reward": 0.0173828125,
23
  "step": 5
24
  },
25
  {
26
- "completion_length": 8.055859375,
27
  "epoch": 0.075046904315197,
28
- "grad_norm": 107.49523162841797,
29
- "kl": 2.6065808296203614,
30
  "learning_rate": 1.4285714285714287e-05,
31
- "loss": 0.0987,
32
- "reward": 0.37265625,
33
- "reward_std": 0.21633352083154023,
34
- "rewards/accuracy_reward": 0.173046875,
35
- "rewards/format_reward": 0.006640625,
36
- "rewards/relaxed_accuracy_reward": 0.19296875,
37
  "step": 10
38
  },
39
  {
40
- "completion_length": 13.7859375,
41
  "epoch": 0.1125703564727955,
42
- "grad_norm": 28.482147216796875,
43
- "kl": 4.27113037109375,
44
  "learning_rate": 1.9996515418688493e-05,
45
- "loss": 0.1535,
46
- "reward": 0.7833984375,
47
- "reward_std": 0.3650039840955287,
48
- "rewards/accuracy_reward": 0.230078125,
49
- "rewards/format_reward": 0.285546875,
50
- "rewards/relaxed_accuracy_reward": 0.2677734375,
51
  "step": 15
52
  },
53
  {
54
- "completion_length": 39.316015625,
55
  "epoch": 0.150093808630394,
56
- "grad_norm": 3.342979669570923,
57
- "kl": 1.038037109375,
58
  "learning_rate": 1.9874809871741877e-05,
59
- "loss": 0.0344,
60
- "reward": 1.452734375,
61
- "reward_std": 0.4121739151421934,
62
- "rewards/accuracy_reward": 0.2072265625,
63
- "rewards/format_reward": 0.966015625,
64
- "rewards/relaxed_accuracy_reward": 0.2794921875,
65
  "step": 20
66
  },
67
  {
68
- "completion_length": 86.3279296875,
69
  "epoch": 0.18761726078799248,
70
- "grad_norm": 1.2073032855987549,
71
- "kl": 0.60341796875,
72
  "learning_rate": 1.9581296124106682e-05,
73
- "loss": 0.0451,
74
- "reward": 1.4625,
75
- "reward_std": 0.5122467412147671,
76
- "rewards/accuracy_reward": 0.225390625,
77
- "rewards/format_reward": 0.939453125,
78
- "rewards/relaxed_accuracy_reward": 0.29765625,
79
  "step": 25
80
  },
81
  {
82
- "completion_length": 275.5158203125,
83
  "epoch": 0.225140712945591,
84
- "grad_norm": 5.414246082305908,
85
- "kl": 1.6671630859375,
86
  "learning_rate": 1.912108091398988e-05,
87
- "loss": 0.0761,
88
- "reward": 1.3607421875,
89
- "reward_std": 0.5011306325905025,
90
- "rewards/accuracy_reward": 0.24375,
91
- "rewards/format_reward": 0.8123046875,
92
- "rewards/relaxed_accuracy_reward": 0.3046875,
93
  "step": 30
94
  },
95
  {
96
- "completion_length": 50.355078125,
97
  "epoch": 0.2626641651031895,
98
- "grad_norm": 57.9657096862793,
99
- "kl": 0.691357421875,
100
  "learning_rate": 1.8502171357296144e-05,
101
- "loss": 0.0052,
102
- "reward": 1.4498046875,
103
- "reward_std": 0.4204754514619708,
104
- "rewards/accuracy_reward": 0.2087890625,
105
- "rewards/format_reward": 0.946875,
106
- "rewards/relaxed_accuracy_reward": 0.294140625,
107
  "step": 35
108
  },
109
  {
110
- "completion_length": 49.6513671875,
111
  "epoch": 0.300187617260788,
112
- "grad_norm": 1.8818649053573608,
113
- "kl": 0.776123046875,
114
  "learning_rate": 1.773533563475053e-05,
115
- "loss": 0.0342,
116
- "reward": 1.570703125,
117
- "reward_std": 0.33258107244037094,
118
- "rewards/accuracy_reward": 0.2494140625,
119
- "rewards/format_reward": 0.991796875,
120
- "rewards/relaxed_accuracy_reward": 0.3294921875,
121
  "step": 40
122
  },
123
  {
124
- "completion_length": 41.2986328125,
125
  "epoch": 0.33771106941838647,
126
- "grad_norm": 1.5757495164871216,
127
- "kl": 104.27255859375,
128
  "learning_rate": 1.6833915640265485e-05,
129
- "loss": 4.1831,
130
- "reward": 1.3380859375,
131
- "reward_std": 0.32749726744368673,
132
- "rewards/accuracy_reward": 0.2193359375,
133
- "rewards/format_reward": 0.8197265625,
134
- "rewards/relaxed_accuracy_reward": 0.2990234375,
135
  "step": 45
136
  },
137
  {
138
- "completion_length": 46.721484375,
139
  "epoch": 0.37523452157598497,
140
- "grad_norm": 7.2106099128723145,
141
- "kl": 2.3932861328125,
142
  "learning_rate": 1.58135948502146e-05,
143
- "loss": 0.0875,
144
- "reward": 1.24453125,
145
- "reward_std": 0.5402243793476373,
146
- "rewards/accuracy_reward": 0.2103515625,
147
- "rewards/format_reward": 0.762890625,
148
- "rewards/relaxed_accuracy_reward": 0.2712890625,
149
  "step": 50
150
  },
151
  {
152
- "completion_length": 47.58125,
153
  "epoch": 0.41275797373358347,
154
- "grad_norm": 1.5229461193084717,
155
- "kl": 1.08670654296875,
156
  "learning_rate": 1.4692125452370664e-05,
157
- "loss": 0.0485,
158
- "reward": 1.3140625,
159
- "reward_std": 0.3769304122310132,
160
- "rewards/accuracy_reward": 0.1578125,
161
- "rewards/format_reward": 0.9650390625,
162
- "rewards/relaxed_accuracy_reward": 0.1912109375,
163
  "step": 55
164
  },
165
  {
166
- "completion_length": 26.315234375,
167
  "epoch": 0.450281425891182,
168
- "grad_norm": 1.6529346704483032,
169
- "kl": 1.0970458984375,
170
  "learning_rate": 1.348901948209167e-05,
171
- "loss": 0.0523,
172
- "reward": 1.2923828125,
173
- "reward_std": 0.28480961951427164,
174
- "rewards/accuracy_reward": 0.13125,
175
- "rewards/format_reward": 0.993359375,
176
- "rewards/relaxed_accuracy_reward": 0.1677734375,
177
  "step": 60
178
  },
179
  {
180
- "completion_length": 34.857421875,
181
  "epoch": 0.4878048780487805,
182
- "grad_norm": 2.595728635787964,
183
- "kl": 0.999951171875,
184
  "learning_rate": 1.2225209339563144e-05,
185
- "loss": 0.0447,
186
- "reward": 1.24140625,
187
- "reward_std": 0.2651514788158238,
188
- "rewards/accuracy_reward": 0.1244140625,
189
- "rewards/format_reward": 0.97265625,
190
- "rewards/relaxed_accuracy_reward": 0.1443359375,
191
  "step": 65
192
  },
193
  {
194
- "completion_length": 58.794140625,
195
  "epoch": 0.525328330206379,
196
- "grad_norm": 1.4318927526474,
197
- "kl": 0.9171142578125,
198
  "learning_rate": 1.092268359463302e-05,
199
- "loss": 0.044,
200
- "reward": 1.2154296875,
201
- "reward_std": 0.25459547294303775,
202
- "rewards/accuracy_reward": 0.10625,
203
- "rewards/format_reward": 0.982421875,
204
- "rewards/relaxed_accuracy_reward": 0.1267578125,
205
  "step": 70
206
  },
207
  {
208
- "completion_length": 65.1509765625,
209
  "epoch": 0.5628517823639775,
210
- "grad_norm": 1.1972270011901855,
211
- "kl": 0.73720703125,
212
  "learning_rate": 9.604104415737309e-06,
213
- "loss": 0.0289,
214
- "reward": 1.2626953125,
215
- "reward_std": 0.29383882097899916,
216
- "rewards/accuracy_reward": 0.1248046875,
217
- "rewards/format_reward": 0.9748046875,
218
- "rewards/relaxed_accuracy_reward": 0.1630859375,
219
  "step": 75
220
  },
221
  {
222
- "completion_length": 69.7634765625,
223
  "epoch": 0.600375234521576,
224
- "grad_norm": 1.1366685628890991,
225
- "kl": 1.04488525390625,
226
  "learning_rate": 8.292413279130625e-06,
227
- "loss": 0.0299,
228
- "reward": 1.242578125,
229
- "reward_std": 0.4017621369101107,
230
- "rewards/accuracy_reward": 0.1384765625,
231
- "rewards/format_reward": 0.9298828125,
232
- "rewards/relaxed_accuracy_reward": 0.17421875,
233
  "step": 80
234
  },
235
  {
236
- "completion_length": 47.1427734375,
237
  "epoch": 0.6378986866791745,
238
- "grad_norm": 2.2813758850097656,
239
- "kl": 1.25390625,
240
  "learning_rate": 7.010431818542298e-06,
241
- "loss": 0.0495,
242
- "reward": 1.2376953125,
243
- "reward_std": 0.3655336996074766,
244
- "rewards/accuracy_reward": 0.13125,
245
- "rewards/format_reward": 0.9458984375,
246
- "rewards/relaxed_accuracy_reward": 0.160546875,
247
  "step": 85
248
  },
249
  {
250
- "completion_length": 31.600390625,
251
  "epoch": 0.6754221388367729,
252
- "grad_norm": 1.4212714433670044,
253
- "kl": 1.8242919921875,
254
  "learning_rate": 5.780464759928623e-06,
255
- "loss": 0.066,
256
- "reward": 1.2365234375,
257
- "reward_std": 0.353168362705037,
258
- "rewards/accuracy_reward": 0.137109375,
259
- "rewards/format_reward": 0.9404296875,
260
- "rewards/relaxed_accuracy_reward": 0.158984375,
261
  "step": 90
262
  },
263
  {
264
- "completion_length": 28.031640625,
265
  "epoch": 0.7129455909943715,
266
- "grad_norm": 12.062272071838379,
267
- "kl": 1.24541015625,
268
  "learning_rate": 4.623911849714226e-06,
269
- "loss": 0.0525,
270
- "reward": 1.287890625,
271
- "reward_std": 0.23205105541273952,
272
- "rewards/accuracy_reward": 0.140625,
273
- "rewards/format_reward": 0.9787109375,
274
- "rewards/relaxed_accuracy_reward": 0.1685546875,
275
  "step": 95
276
  },
277
  {
278
- "completion_length": 28.1373046875,
279
  "epoch": 0.7504690431519699,
280
- "grad_norm": 3.8444881439208984,
281
- "kl": 1.2223388671875,
282
  "learning_rate": 3.560895528440844e-06,
283
- "loss": 0.0449,
284
- "reward": 1.3689453125,
285
- "reward_std": 0.24813596652820707,
286
- "rewards/accuracy_reward": 0.1681640625,
287
- "rewards/format_reward": 0.9888671875,
288
- "rewards/relaxed_accuracy_reward": 0.2119140625,
289
  "step": 100
290
  },
291
  {
292
  "epoch": 0.7504690431519699,
293
- "eval_completion_length": 29.434995644599304,
294
- "eval_kl": 0.7486117160278746,
295
- "eval_loss": 0.0375058688223362,
296
- "eval_reward": 1.4985844947735192,
297
- "eval_reward_std": 0.3546458259744096,
298
- "eval_rewards/accuracy_reward": 0.2264808362369338,
299
- "eval_rewards/format_reward": 0.9954268292682927,
300
- "eval_rewards/relaxed_accuracy_reward": 0.2766768292682927,
301
- "eval_runtime": 783.8882,
302
- "eval_samples_per_second": 1.461,
303
- "eval_steps_per_second": 0.366,
304
  "step": 100
305
  },
306
  {
307
- "completion_length": 28.3857421875,
308
  "epoch": 0.7879924953095685,
309
- "grad_norm": 2.9354612827301025,
310
- "kl": 1.0091796875,
311
  "learning_rate": 2.6099108277934105e-06,
312
- "loss": 0.0337,
313
- "reward": 1.3822265625,
314
- "reward_std": 0.27833853210322557,
315
- "rewards/accuracy_reward": 0.1744140625,
316
- "rewards/format_reward": 0.991015625,
317
- "rewards/relaxed_accuracy_reward": 0.216796875,
318
  "step": 105
319
  },
320
  {
321
- "completion_length": 29.3595703125,
322
  "epoch": 0.8255159474671669,
323
- "grad_norm": 1.311169147491455,
324
- "kl": 0.9508544921875,
325
  "learning_rate": 1.7875035823168641e-06,
326
- "loss": 0.0285,
327
- "reward": 1.246484375,
328
- "reward_std": 0.28176811882294717,
329
- "rewards/accuracy_reward": 0.12265625,
330
- "rewards/format_reward": 0.9828125,
331
- "rewards/relaxed_accuracy_reward": 0.141015625,
332
  "step": 110
333
  },
334
  {
335
- "completion_length": 29.8638671875,
336
  "epoch": 0.8630393996247655,
337
- "grad_norm": 1.3971428871154785,
338
- "kl": 0.9172119140625,
339
  "learning_rate": 1.1079825545001887e-06,
340
- "loss": 0.0346,
341
- "reward": 1.23984375,
342
- "reward_std": 0.2760173589922488,
343
- "rewards/accuracy_reward": 0.1142578125,
344
- "rewards/format_reward": 0.98359375,
345
- "rewards/relaxed_accuracy_reward": 0.1419921875,
346
  "step": 115
347
  },
348
  {
349
- "completion_length": 32.0333984375,
350
  "epoch": 0.900562851782364,
351
- "grad_norm": 2.8106608390808105,
352
- "kl": 1.179638671875,
353
  "learning_rate": 5.831704818578842e-07,
354
- "loss": 0.057,
355
- "reward": 1.2111328125,
356
- "reward_std": 0.3261663016863167,
357
- "rewards/accuracy_reward": 0.1119140625,
358
- "rewards/format_reward": 0.9591796875,
359
- "rewards/relaxed_accuracy_reward": 0.1400390625,
360
  "step": 120
361
  },
362
  {
363
- "completion_length": 33.3609375,
364
  "epoch": 0.9380863039399625,
365
- "grad_norm": 2.104233503341675,
366
- "kl": 1.3634521484375,
367
  "learning_rate": 2.2219837744959284e-07,
368
- "loss": 0.0553,
369
- "reward": 1.223046875,
370
- "reward_std": 0.3774998709093779,
371
- "rewards/accuracy_reward": 0.121484375,
372
- "rewards/format_reward": 0.94140625,
373
- "rewards/relaxed_accuracy_reward": 0.16015625,
374
  "step": 125
375
  },
376
  {
377
- "completion_length": 32.5916015625,
378
  "epoch": 0.975609756097561,
379
- "grad_norm": 2.3259518146514893,
380
- "kl": 1.3031005859375,
381
  "learning_rate": 3.134666272774034e-08,
382
- "loss": 0.0566,
383
- "reward": 1.218359375,
384
- "reward_std": 0.35580105874687434,
385
- "rewards/accuracy_reward": 0.1244140625,
386
- "rewards/format_reward": 0.9486328125,
387
- "rewards/relaxed_accuracy_reward": 0.1453125,
388
  "step": 130
389
  },
390
  {
391
- "completion_length": 32.7490234375,
392
  "epoch": 0.99812382739212,
393
- "kl": 1.3529052734375,
394
- "reward": 1.2366536458333333,
395
- "reward_std": 0.3705777660167466,
396
- "rewards/accuracy_reward": 0.13151041666666666,
397
- "rewards/format_reward": 0.9427083333333334,
398
- "rewards/relaxed_accuracy_reward": 0.16243489583333334,
399
  "step": 133,
400
  "total_flos": 0.0,
401
- "train_loss": 0.010119893048938951,
402
- "train_runtime": 11252.7773,
403
- "train_samples_per_second": 1.516,
404
- "train_steps_per_second": 0.012
405
  }
406
  ],
407
  "logging_steps": 5,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "completion_length": 33.946875,
13
  "epoch": 0.0375234521575985,
14
+ "grad_norm": 1.5592833757400513,
15
+ "kl": 0.013537660241127014,
16
  "learning_rate": 7.1428571428571436e-06,
17
+ "loss": 0.0031,
18
+ "reward": 0.03671875,
19
+ "reward_std": 0.08486471865326166,
20
  "rewards/accuracy_reward": 0.0158203125,
21
+ "rewards/format_reward": 0.0033203125,
22
+ "rewards/relaxed_accuracy_reward": 0.017578125,
23
  "step": 5
24
  },
25
  {
26
+ "completion_length": 7.9525390625,
27
  "epoch": 0.075046904315197,
28
+ "grad_norm": 26.73110008239746,
29
+ "kl": 17.372037601470947,
30
  "learning_rate": 1.4285714285714287e-05,
31
+ "loss": 0.7047,
32
+ "reward": 0.413671875,
33
+ "reward_std": 0.19759062808007002,
34
+ "rewards/accuracy_reward": 0.1939453125,
35
+ "rewards/format_reward": 0.0005859375,
36
+ "rewards/relaxed_accuracy_reward": 0.219140625,
37
  "step": 10
38
  },
39
  {
40
+ "completion_length": 29.3978515625,
41
  "epoch": 0.1125703564727955,
42
+ "grad_norm": 12.636091232299805,
43
+ "kl": 2.432843017578125,
44
  "learning_rate": 1.9996515418688493e-05,
45
+ "loss": 0.0908,
46
+ "reward": 0.30703125,
47
+ "reward_std": 0.24456602307036518,
48
+ "rewards/accuracy_reward": 0.140625,
49
+ "rewards/format_reward": 0.0,
50
+ "rewards/relaxed_accuracy_reward": 0.16640625,
51
  "step": 15
52
  },
53
  {
54
+ "completion_length": 27.5798828125,
55
  "epoch": 0.150093808630394,
56
+ "grad_norm": 21.91870880126953,
57
+ "kl": 1.253253173828125,
58
  "learning_rate": 1.9874809871741877e-05,
59
+ "loss": 0.0532,
60
+ "reward": 0.5099609375,
61
+ "reward_std": 0.2849292915314436,
62
+ "rewards/accuracy_reward": 0.22578125,
63
+ "rewards/format_reward": 0.0,
64
+ "rewards/relaxed_accuracy_reward": 0.2841796875,
65
  "step": 20
66
  },
67
  {
68
+ "completion_length": 9.8216796875,
69
  "epoch": 0.18761726078799248,
70
+ "grad_norm": 21.709964752197266,
71
+ "kl": 1.0984130859375,
72
  "learning_rate": 1.9581296124106682e-05,
73
+ "loss": 0.0578,
74
+ "reward": 0.5490234375,
75
+ "reward_std": 0.32321499213576316,
76
+ "rewards/accuracy_reward": 0.2373046875,
77
+ "rewards/format_reward": 0.0,
78
+ "rewards/relaxed_accuracy_reward": 0.31171875,
79
  "step": 25
80
  },
81
  {
82
+ "completion_length": 22.2578125,
83
  "epoch": 0.225140712945591,
84
+ "grad_norm": 165.1543426513672,
85
+ "kl": 3.56005859375,
86
  "learning_rate": 1.912108091398988e-05,
87
+ "loss": 0.1402,
88
+ "reward": 0.4880859375,
89
+ "reward_std": 0.3410706129856408,
90
+ "rewards/accuracy_reward": 0.222265625,
91
+ "rewards/format_reward": 0.0,
92
+ "rewards/relaxed_accuracy_reward": 0.2658203125,
93
  "step": 30
94
  },
95
  {
96
+ "completion_length": 20.027734375,
97
  "epoch": 0.2626641651031895,
98
+ "grad_norm": 1127.5472412109375,
99
+ "kl": 6.7017822265625,
100
  "learning_rate": 1.8502171357296144e-05,
101
+ "loss": 0.2507,
102
+ "reward": 0.387890625,
103
+ "reward_std": 0.3576823682524264,
104
+ "rewards/accuracy_reward": 0.1626953125,
105
+ "rewards/format_reward": 0.0,
106
+ "rewards/relaxed_accuracy_reward": 0.2251953125,
107
  "step": 35
108
  },
109
  {
110
+ "completion_length": 29.49453125,
111
  "epoch": 0.300187617260788,
112
+ "grad_norm": 25.603010177612305,
113
+ "kl": 1.09813232421875,
114
  "learning_rate": 1.773533563475053e-05,
115
+ "loss": 0.0428,
116
+ "reward": 0.5158203125,
117
+ "reward_std": 0.36268206988461316,
118
+ "rewards/accuracy_reward": 0.22578125,
119
+ "rewards/format_reward": 0.0,
120
+ "rewards/relaxed_accuracy_reward": 0.2900390625,
121
  "step": 40
122
  },
123
  {
124
+ "completion_length": 17.1138671875,
125
  "epoch": 0.33771106941838647,
126
+ "grad_norm": 6.592025279998779,
127
+ "kl": 1.22408447265625,
128
  "learning_rate": 1.6833915640265485e-05,
129
+ "loss": 0.0536,
130
+ "reward": 0.4765625,
131
+ "reward_std": 0.2620124928187579,
132
+ "rewards/accuracy_reward": 0.2068359375,
133
+ "rewards/format_reward": 0.0,
134
+ "rewards/relaxed_accuracy_reward": 0.2697265625,
135
  "step": 45
136
  },
137
  {
138
+ "completion_length": 35.35703125,
139
  "epoch": 0.37523452157598497,
140
+ "grad_norm": 3.753960132598877,
141
+ "kl": 1.41783447265625,
142
  "learning_rate": 1.58135948502146e-05,
143
+ "loss": 0.0564,
144
+ "reward": 0.5171875,
145
+ "reward_std": 0.3441387979779392,
146
+ "rewards/accuracy_reward": 0.232421875,
147
+ "rewards/format_reward": 0.0,
148
+ "rewards/relaxed_accuracy_reward": 0.284765625,
149
  "step": 50
150
  },
151
  {
152
+ "completion_length": 19.3244140625,
153
  "epoch": 0.41275797373358347,
154
+ "grad_norm": 8.08942985534668,
155
+ "kl": 1.07467041015625,
156
  "learning_rate": 1.4692125452370664e-05,
157
+ "loss": 0.038,
158
+ "reward": 0.5447265625,
159
+ "reward_std": 0.3162791552487761,
160
+ "rewards/accuracy_reward": 0.2421875,
161
+ "rewards/format_reward": 0.0,
162
+ "rewards/relaxed_accuracy_reward": 0.3025390625,
163
  "step": 55
164
  },
165
  {
166
+ "completion_length": 13.803515625,
167
  "epoch": 0.450281425891182,
168
+ "grad_norm": 6688.68994140625,
169
+ "kl": 11.382958984375,
170
  "learning_rate": 1.348901948209167e-05,
171
+ "loss": 0.4542,
172
+ "reward": 0.5509765625,
173
+ "reward_std": 0.2770004874095321,
174
+ "rewards/accuracy_reward": 0.24453125,
175
+ "rewards/format_reward": 0.0,
176
+ "rewards/relaxed_accuracy_reward": 0.3064453125,
177
  "step": 60
178
  },
179
  {
180
+ "completion_length": 10.150390625,
181
  "epoch": 0.4878048780487805,
182
+ "grad_norm": 22.93476104736328,
183
+ "kl": 2.70247802734375,
184
  "learning_rate": 1.2225209339563144e-05,
185
+ "loss": 0.1131,
186
+ "reward": 0.58359375,
187
+ "reward_std": 0.277045093011111,
188
+ "rewards/accuracy_reward": 0.2546875,
189
+ "rewards/format_reward": 0.0,
190
+ "rewards/relaxed_accuracy_reward": 0.32890625,
191
  "step": 65
192
  },
193
  {
194
+ "completion_length": 9.9724609375,
195
  "epoch": 0.525328330206379,
196
+ "grad_norm": 4.137301445007324,
197
+ "kl": 1.7162109375,
198
  "learning_rate": 1.092268359463302e-05,
199
+ "loss": 0.0629,
200
+ "reward": 0.524609375,
201
+ "reward_std": 0.2948215680196881,
202
+ "rewards/accuracy_reward": 0.2169921875,
203
+ "rewards/format_reward": 0.0,
204
+ "rewards/relaxed_accuracy_reward": 0.3076171875,
205
  "step": 70
206
  },
207
  {
208
+ "completion_length": 8.46484375,
209
  "epoch": 0.5628517823639775,
210
+ "grad_norm": 5.097509860992432,
211
+ "kl": 2.0114501953125,
212
  "learning_rate": 9.604104415737309e-06,
213
+ "loss": 0.0958,
214
+ "reward": 0.5404296875,
215
+ "reward_std": 0.3218729373533279,
216
+ "rewards/accuracy_reward": 0.2349609375,
217
+ "rewards/format_reward": 0.0,
218
+ "rewards/relaxed_accuracy_reward": 0.30546875,
219
  "step": 75
220
  },
221
  {
222
+ "completion_length": 8.421875,
223
  "epoch": 0.600375234521576,
224
+ "grad_norm": 6.521908283233643,
225
+ "kl": 2.06651611328125,
226
  "learning_rate": 8.292413279130625e-06,
227
+ "loss": 0.0886,
228
+ "reward": 0.528125,
229
+ "reward_std": 0.32329283356666566,
230
+ "rewards/accuracy_reward": 0.222265625,
231
+ "rewards/format_reward": 0.0,
232
+ "rewards/relaxed_accuracy_reward": 0.305859375,
233
  "step": 80
234
  },
235
  {
236
+ "completion_length": 8.27578125,
237
  "epoch": 0.6378986866791745,
238
+ "grad_norm": 3.548377513885498,
239
+ "kl": 1.679052734375,
240
  "learning_rate": 7.010431818542298e-06,
241
+ "loss": 0.0482,
242
+ "reward": 0.4205078125,
243
+ "reward_std": 0.3025111163035035,
244
+ "rewards/accuracy_reward": 0.1734375,
245
+ "rewards/format_reward": 0.0,
246
+ "rewards/relaxed_accuracy_reward": 0.2470703125,
247
  "step": 85
248
  },
249
  {
250
+ "completion_length": 8.621484375,
251
  "epoch": 0.6754221388367729,
252
+ "grad_norm": 11.26955509185791,
253
+ "kl": 2.084521484375,
254
  "learning_rate": 5.780464759928623e-06,
255
+ "loss": 0.097,
256
+ "reward": 0.566015625,
257
+ "reward_std": 0.31781149725429714,
258
+ "rewards/accuracy_reward": 0.239453125,
259
+ "rewards/format_reward": 0.0,
260
+ "rewards/relaxed_accuracy_reward": 0.3265625,
261
  "step": 90
262
  },
263
  {
264
+ "completion_length": 8.6640625,
265
  "epoch": 0.7129455909943715,
266
+ "grad_norm": 15.46524715423584,
267
+ "kl": 2.5925048828125,
268
  "learning_rate": 4.623911849714226e-06,
269
+ "loss": 0.0919,
270
+ "reward": 0.546484375,
271
+ "reward_std": 0.26226845681667327,
272
+ "rewards/accuracy_reward": 0.22421875,
273
+ "rewards/format_reward": 0.0,
274
+ "rewards/relaxed_accuracy_reward": 0.322265625,
275
  "step": 95
276
  },
277
  {
278
+ "completion_length": 8.1521484375,
279
  "epoch": 0.7504690431519699,
280
+ "grad_norm": 8.370095252990723,
281
+ "kl": 2.80860595703125,
282
  "learning_rate": 3.560895528440844e-06,
283
+ "loss": 0.0876,
284
+ "reward": 0.6302734375,
285
+ "reward_std": 0.27545339791104195,
286
+ "rewards/accuracy_reward": 0.275,
287
+ "rewards/format_reward": 0.0,
288
+ "rewards/relaxed_accuracy_reward": 0.3552734375,
289
  "step": 100
290
  },
291
  {
292
  "epoch": 0.7504690431519699,
293
+ "eval_completion_length": 8.064242160278745,
294
+ "eval_kl": 1.6110899390243902,
295
+ "eval_loss": 0.05133385583758354,
296
+ "eval_reward": 0.5677264808362369,
297
+ "eval_reward_std": 0.37672981305716347,
298
+ "eval_rewards/accuracy_reward": 0.24401132404181183,
299
+ "eval_rewards/format_reward": 0.0,
300
+ "eval_rewards/relaxed_accuracy_reward": 0.3237151567944251,
301
+ "eval_runtime": 419.9905,
302
+ "eval_samples_per_second": 2.726,
303
+ "eval_steps_per_second": 0.683,
304
  "step": 100
305
  },
306
  {
307
+ "completion_length": 8.2787109375,
308
  "epoch": 0.7879924953095685,
309
+ "grad_norm": 5.566633224487305,
310
+ "kl": 1.6791015625,
311
  "learning_rate": 2.6099108277934105e-06,
312
+ "loss": 0.0743,
313
+ "reward": 0.580859375,
314
+ "reward_std": 0.3424753251951188,
315
+ "rewards/accuracy_reward": 0.253125,
316
+ "rewards/format_reward": 0.0,
317
+ "rewards/relaxed_accuracy_reward": 0.327734375,
318
  "step": 105
319
  },
320
  {
321
+ "completion_length": 8.3333984375,
322
  "epoch": 0.8255159474671669,
323
+ "grad_norm": 4.05443000793457,
324
+ "kl": 2.09364013671875,
325
  "learning_rate": 1.7875035823168641e-06,
326
+ "loss": 0.0748,
327
+ "reward": 0.5283203125,
328
+ "reward_std": 0.3331008433829993,
329
+ "rewards/accuracy_reward": 0.2318359375,
330
+ "rewards/format_reward": 0.0,
331
+ "rewards/relaxed_accuracy_reward": 0.296484375,
332
  "step": 110
333
  },
334
  {
335
+ "completion_length": 8.5595703125,
336
  "epoch": 0.8630393996247655,
337
+ "grad_norm": 20.012693405151367,
338
+ "kl": 2.143017578125,
339
  "learning_rate": 1.1079825545001887e-06,
340
+ "loss": 0.0681,
341
+ "reward": 0.5373046875,
342
+ "reward_std": 0.3182933186646551,
343
+ "rewards/accuracy_reward": 0.23046875,
344
+ "rewards/format_reward": 0.0,
345
+ "rewards/relaxed_accuracy_reward": 0.3068359375,
346
  "step": 115
347
  },
348
  {
349
+ "completion_length": 8.526171875,
350
  "epoch": 0.900562851782364,
351
+ "grad_norm": 12.22225570678711,
352
+ "kl": 1.8458984375,
353
  "learning_rate": 5.831704818578842e-07,
354
+ "loss": 0.0716,
355
+ "reward": 0.5498046875,
356
+ "reward_std": 0.31468736389651897,
357
+ "rewards/accuracy_reward": 0.237890625,
358
+ "rewards/format_reward": 0.0,
359
+ "rewards/relaxed_accuracy_reward": 0.3119140625,
360
  "step": 120
361
  },
362
  {
363
+ "completion_length": 8.767578125,
364
  "epoch": 0.9380863039399625,
365
+ "grad_norm": 4.848618507385254,
366
+ "kl": 1.8501708984375,
367
  "learning_rate": 2.2219837744959284e-07,
368
+ "loss": 0.0649,
369
+ "reward": 0.5712890625,
370
+ "reward_std": 0.33388876002281903,
371
+ "rewards/accuracy_reward": 0.242578125,
372
+ "rewards/format_reward": 0.0,
373
+ "rewards/relaxed_accuracy_reward": 0.3287109375,
374
  "step": 125
375
  },
376
  {
377
+ "completion_length": 8.5228515625,
378
  "epoch": 0.975609756097561,
379
+ "grad_norm": 5.429755210876465,
380
+ "kl": 1.84400634765625,
381
  "learning_rate": 3.134666272774034e-08,
382
+ "loss": 0.0876,
383
+ "reward": 0.564453125,
384
+ "reward_std": 0.31874394970946013,
385
+ "rewards/accuracy_reward": 0.2439453125,
386
+ "rewards/format_reward": 0.0,
387
+ "rewards/relaxed_accuracy_reward": 0.3205078125,
388
  "step": 130
389
  },
390
  {
391
+ "completion_length": 8.5595703125,
392
  "epoch": 0.99812382739212,
393
+ "kl": 1.7513020833333333,
394
+ "reward": 0.55078125,
395
+ "reward_std": 0.3167417396325618,
396
+ "rewards/accuracy_reward": 0.2275390625,
397
+ "rewards/format_reward": 0.0,
398
+ "rewards/relaxed_accuracy_reward": 0.3232421875,
399
  "step": 133,
400
  "total_flos": 0.0,
401
+ "train_loss": 0.11666255951964093,
402
+ "train_runtime": 146747.14,
403
+ "train_samples_per_second": 0.116,
404
+ "train_steps_per_second": 0.001
405
  }
406
  ],
407
  "logging_steps": 5,