ChiefTheLord commited on
Commit
433d8a9
verified
1 Parent(s): 805516f

Upload folder using huggingface_hub

Browse files
coco_checkpoints/checkpoint-1445/adapter.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72c4d6f5d9a4fa1041f97cad75da6f005dddef6561ef9a6f5ee1588d4baa58ef
3
+ size 17064856
coco_checkpoints/checkpoint-1445/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7deb029965a5f3aa5628ab7bf742d8f256b2428ddf7503f6827f9734b76cf91
3
+ size 8714492
coco_checkpoints/checkpoint-1445/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb028321b15740df4ceb8773af2e92f09aeb64177e0d03281800bfd793bbc2a7
3
+ size 14244
coco_checkpoints/checkpoint-1445/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac3188b75baca9bbfe53e3e08b7e090c4f46683e7933bbae216c3ba16f204ab2
3
+ size 1064
coco_checkpoints/checkpoint-1445/trainer_state.json ADDED
@@ -0,0 +1,758 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 250,
6
+ "global_step": 1445,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.011072664359861591,
13
+ "grad_norm": 7.3027024269104,
14
+ "learning_rate": 0.00036363636363636367,
15
+ "loss": 7.4688,
16
+ "step": 16
17
+ },
18
+ {
19
+ "epoch": 0.022145328719723183,
20
+ "grad_norm": 3.441852331161499,
21
+ "learning_rate": 0.0006363636363636364,
22
+ "loss": 6.1486,
23
+ "step": 32
24
+ },
25
+ {
26
+ "epoch": 0.03321799307958478,
27
+ "grad_norm": 0.9797470569610596,
28
+ "learning_rate": 0.001,
29
+ "loss": 5.1898,
30
+ "step": 48
31
+ },
32
+ {
33
+ "epoch": 0.044290657439446365,
34
+ "grad_norm": 0.4113083481788635,
35
+ "learning_rate": 0.0009996782216198338,
36
+ "loss": 5.0792,
37
+ "step": 64
38
+ },
39
+ {
40
+ "epoch": 0.05536332179930796,
41
+ "grad_norm": 0.23998336493968964,
42
+ "learning_rate": 0.0009987133006446386,
43
+ "loss": 4.9765,
44
+ "step": 80
45
+ },
46
+ {
47
+ "epoch": 0.06643598615916955,
48
+ "grad_norm": 0.34719282388687134,
49
+ "learning_rate": 0.0009971064790372482,
50
+ "loss": 5.0589,
51
+ "step": 96
52
+ },
53
+ {
54
+ "epoch": 0.07750865051903114,
55
+ "grad_norm": 0.28703269362449646,
56
+ "learning_rate": 0.0009948598249594788,
57
+ "loss": 5.0273,
58
+ "step": 112
59
+ },
60
+ {
61
+ "epoch": 0.08858131487889273,
62
+ "grad_norm": 0.3285124599933624,
63
+ "learning_rate": 0.00099197623011017,
64
+ "loss": 4.9965,
65
+ "step": 128
66
+ },
67
+ {
68
+ "epoch": 0.09965397923875433,
69
+ "grad_norm": 0.21146929264068604,
70
+ "learning_rate": 0.0009884594060032405,
71
+ "loss": 5.0386,
72
+ "step": 144
73
+ },
74
+ {
75
+ "epoch": 0.11072664359861592,
76
+ "grad_norm": 0.2574313282966614,
77
+ "learning_rate": 0.0009843138791905482,
78
+ "loss": 4.9744,
79
+ "step": 160
80
+ },
81
+ {
82
+ "epoch": 0.12179930795847752,
83
+ "grad_norm": 0.2936093211174011,
84
+ "learning_rate": 0.000979544985435704,
85
+ "loss": 5.0448,
86
+ "step": 176
87
+ },
88
+ {
89
+ "epoch": 0.1328719723183391,
90
+ "grad_norm": 0.2813468277454376,
91
+ "learning_rate": 0.0009741588628463382,
92
+ "loss": 5.0399,
93
+ "step": 192
94
+ },
95
+ {
96
+ "epoch": 0.1439446366782007,
97
+ "grad_norm": 0.27917245030403137,
98
+ "learning_rate": 0.0009681624439736599,
99
+ "loss": 5.0001,
100
+ "step": 208
101
+ },
102
+ {
103
+ "epoch": 0.15501730103806227,
104
+ "grad_norm": 0.23994863033294678,
105
+ "learning_rate": 0.0009615634468894752,
106
+ "loss": 4.9829,
107
+ "step": 224
108
+ },
109
+ {
110
+ "epoch": 0.16608996539792387,
111
+ "grad_norm": 0.18816818296909332,
112
+ "learning_rate": 0.0009543703652521542,
113
+ "loss": 4.9348,
114
+ "step": 240
115
+ },
116
+ {
117
+ "epoch": 0.17301038062283736,
118
+ "eval_bleu": 0.07082991922728418,
119
+ "eval_cap_loss": 1.862730330052497,
120
+ "eval_con_loss": 2.0789193881875687,
121
+ "eval_loss": 3.941649715827423,
122
+ "step": 250
123
+ },
124
+ {
125
+ "epoch": 0.17301038062283736,
126
+ "eval_bleu": 0.07082991922728418,
127
+ "eval_cap_loss": 1.862730330052497,
128
+ "eval_con_loss": 2.0789193881875687,
129
+ "eval_loss": 3.941649715827423,
130
+ "eval_runtime": 809.5614,
131
+ "eval_samples_per_second": 19.036,
132
+ "eval_steps_per_second": 2.38,
133
+ "step": 250
134
+ },
135
+ {
136
+ "epoch": 0.17716262975778546,
137
+ "grad_norm": 0.24259746074676514,
138
+ "learning_rate": 0.0009465924573743279,
139
+ "loss": 4.9856,
140
+ "step": 256
141
+ },
142
+ {
143
+ "epoch": 0.18823529411764706,
144
+ "grad_norm": 0.2658279836177826,
145
+ "learning_rate": 0.0009382397343063877,
146
+ "loss": 4.9553,
147
+ "step": 272
148
+ },
149
+ {
150
+ "epoch": 0.19930795847750865,
151
+ "grad_norm": 0.18191872537136078,
152
+ "learning_rate": 0.0009293229469511293,
153
+ "loss": 4.9995,
154
+ "step": 288
155
+ },
156
+ {
157
+ "epoch": 0.21038062283737025,
158
+ "grad_norm": 0.29174473881721497,
159
+ "learning_rate": 0.000919853572226118,
160
+ "loss": 5.0197,
161
+ "step": 304
162
+ },
163
+ {
164
+ "epoch": 0.22145328719723184,
165
+ "grad_norm": 0.21258646249771118,
166
+ "learning_rate": 0.0009098437982915953,
167
+ "loss": 4.9636,
168
+ "step": 320
169
+ },
170
+ {
171
+ "epoch": 0.23252595155709344,
172
+ "grad_norm": 0.18101181089878082,
173
+ "learning_rate": 0.0008993065088629304,
174
+ "loss": 4.9274,
175
+ "step": 336
176
+ },
177
+ {
178
+ "epoch": 0.24359861591695503,
179
+ "grad_norm": 0.2041468620300293,
180
+ "learning_rate": 0.0008882552666278186,
181
+ "loss": 5.0077,
182
+ "step": 352
183
+ },
184
+ {
185
+ "epoch": 0.2546712802768166,
186
+ "grad_norm": 0.3073960244655609,
187
+ "learning_rate": 0.0008767042957895606,
188
+ "loss": 4.9781,
189
+ "step": 368
190
+ },
191
+ {
192
+ "epoch": 0.2657439446366782,
193
+ "grad_norm": 0.17274291813373566,
194
+ "learning_rate": 0.0008646684637588991,
195
+ "loss": 4.911,
196
+ "step": 384
197
+ },
198
+ {
199
+ "epoch": 0.2768166089965398,
200
+ "grad_norm": 0.20907478034496307,
201
+ "learning_rate": 0.0008521632620179735,
202
+ "loss": 5.0089,
203
+ "step": 400
204
+ },
205
+ {
206
+ "epoch": 0.2878892733564014,
207
+ "grad_norm": 0.2937226891517639,
208
+ "learning_rate": 0.0008392047861810229,
209
+ "loss": 4.9306,
210
+ "step": 416
211
+ },
212
+ {
213
+ "epoch": 0.29896193771626295,
214
+ "grad_norm": 0.35890915989875793,
215
+ "learning_rate": 0.0008258097152775044,
216
+ "loss": 4.9363,
217
+ "step": 432
218
+ },
219
+ {
220
+ "epoch": 0.31003460207612454,
221
+ "grad_norm": 0.19902297854423523,
222
+ "learning_rate": 0.0008119952902842882,
223
+ "loss": 4.9872,
224
+ "step": 448
225
+ },
226
+ {
227
+ "epoch": 0.32110726643598614,
228
+ "grad_norm": 0.2420748621225357,
229
+ "learning_rate": 0.0007977792919345632,
230
+ "loss": 5.0178,
231
+ "step": 464
232
+ },
233
+ {
234
+ "epoch": 0.33217993079584773,
235
+ "grad_norm": 0.21633224189281464,
236
+ "learning_rate": 0.0007831800178320152,
237
+ "loss": 4.9588,
238
+ "step": 480
239
+ },
240
+ {
241
+ "epoch": 0.34325259515570933,
242
+ "grad_norm": 0.2581658363342285,
243
+ "learning_rate": 0.0007682162588997332,
244
+ "loss": 4.9214,
245
+ "step": 496
246
+ },
247
+ {
248
+ "epoch": 0.3460207612456747,
249
+ "eval_bleu": 0.0743834461108624,
250
+ "eval_cap_loss": 1.8318203130285544,
251
+ "eval_con_loss": 2.0786952485553205,
252
+ "eval_loss": 3.9105155644914187,
253
+ "step": 500
254
+ },
255
+ {
256
+ "epoch": 0.3460207612456747,
257
+ "eval_bleu": 0.0743834461108624,
258
+ "eval_cap_loss": 1.8318203130285544,
259
+ "eval_con_loss": 2.0786952485553205,
260
+ "eval_loss": 3.9105155644914187,
261
+ "eval_runtime": 820.0211,
262
+ "eval_samples_per_second": 18.793,
263
+ "eval_steps_per_second": 2.35,
264
+ "step": 500
265
+ },
266
+ {
267
+ "epoch": 0.3543252595155709,
268
+ "grad_norm": 0.24933111667633057,
269
+ "learning_rate": 0.0007529072751941595,
270
+ "loss": 4.9951,
271
+ "step": 512
272
+ },
273
+ {
274
+ "epoch": 0.3653979238754325,
275
+ "grad_norm": 0.1792280375957489,
276
+ "learning_rate": 0.0007372727711152087,
277
+ "loss": 4.8813,
278
+ "step": 528
279
+ },
280
+ {
281
+ "epoch": 0.3764705882352941,
282
+ "grad_norm": 0.13637550175189972,
283
+ "learning_rate": 0.0007213328700444696,
284
+ "loss": 4.9125,
285
+ "step": 544
286
+ },
287
+ {
288
+ "epoch": 0.3875432525951557,
289
+ "grad_norm": 0.3409987688064575,
290
+ "learning_rate": 0.0007051080884441287,
291
+ "loss": 4.9073,
292
+ "step": 560
293
+ },
294
+ {
295
+ "epoch": 0.3986159169550173,
296
+ "grad_norm": 0.2467418760061264,
297
+ "learning_rate": 0.0006886193094499536,
298
+ "loss": 4.9227,
299
+ "step": 576
300
+ },
301
+ {
302
+ "epoch": 0.4096885813148789,
303
+ "grad_norm": 0.2751147747039795,
304
+ "learning_rate": 0.000671887755992327,
305
+ "loss": 4.9871,
306
+ "step": 592
307
+ },
308
+ {
309
+ "epoch": 0.4207612456747405,
310
+ "grad_norm": 0.21447473764419556,
311
+ "learning_rate": 0.000654934963479926,
312
+ "loss": 4.9484,
313
+ "step": 608
314
+ },
315
+ {
316
+ "epoch": 0.4318339100346021,
317
+ "grad_norm": 0.26142337918281555,
318
+ "learning_rate": 0.0006377827520812061,
319
+ "loss": 4.9204,
320
+ "step": 624
321
+ },
322
+ {
323
+ "epoch": 0.4429065743944637,
324
+ "grad_norm": 0.3584180176258087,
325
+ "learning_rate": 0.0006204531986393678,
326
+ "loss": 5.0,
327
+ "step": 640
328
+ },
329
+ {
330
+ "epoch": 0.4539792387543253,
331
+ "grad_norm": 0.2877948582172394,
332
+ "learning_rate": 0.0006029686082569537,
333
+ "loss": 5.0003,
334
+ "step": 656
335
+ },
336
+ {
337
+ "epoch": 0.46505190311418687,
338
+ "grad_norm": 0.2049325406551361,
339
+ "learning_rate": 0.000585351485586648,
340
+ "loss": 4.9349,
341
+ "step": 672
342
+ },
343
+ {
344
+ "epoch": 0.47612456747404847,
345
+ "grad_norm": 0.200492262840271,
346
+ "learning_rate": 0.0005676245058652349,
347
+ "loss": 4.9385,
348
+ "step": 688
349
+ },
350
+ {
351
+ "epoch": 0.48719723183391006,
352
+ "grad_norm": 0.24712397158145905,
353
+ "learning_rate": 0.000549810485727994,
354
+ "loss": 4.9516,
355
+ "step": 704
356
+ },
357
+ {
358
+ "epoch": 0.4982698961937716,
359
+ "grad_norm": 0.380741149187088,
360
+ "learning_rate": 0.0005319323538411021,
361
+ "loss": 4.9121,
362
+ "step": 720
363
+ },
364
+ {
365
+ "epoch": 0.5093425605536333,
366
+ "grad_norm": 0.2339814305305481,
367
+ "learning_rate": 0.0005140131213898345,
368
+ "loss": 4.9058,
369
+ "step": 736
370
+ },
371
+ {
372
+ "epoch": 0.5190311418685121,
373
+ "eval_bleu": 0.0755992902414665,
374
+ "eval_cap_loss": 1.8195727369484656,
375
+ "eval_con_loss": 2.0776209530585406,
376
+ "eval_loss": 3.897193689945144,
377
+ "step": 750
378
+ },
379
+ {
380
+ "epoch": 0.5190311418685121,
381
+ "eval_bleu": 0.0755992902414665,
382
+ "eval_cap_loss": 1.8195727369484656,
383
+ "eval_con_loss": 2.0776209530585406,
384
+ "eval_loss": 3.897193689945144,
385
+ "eval_runtime": 819.7019,
386
+ "eval_samples_per_second": 18.801,
387
+ "eval_steps_per_second": 2.351,
388
+ "step": 750
389
+ },
390
+ {
391
+ "epoch": 0.5204152249134948,
392
+ "grad_norm": 0.23871256411075592,
393
+ "learning_rate": 0.0004960758524605593,
394
+ "loss": 4.9217,
395
+ "step": 752
396
+ },
397
+ {
398
+ "epoch": 0.5314878892733564,
399
+ "grad_norm": 0.19021867215633392,
400
+ "learning_rate": 0.0004781436343546391,
401
+ "loss": 4.9383,
402
+ "step": 768
403
+ },
404
+ {
405
+ "epoch": 0.542560553633218,
406
+ "grad_norm": 0.39012765884399414,
407
+ "learning_rate": 0.0004602395478724539,
408
+ "loss": 4.8852,
409
+ "step": 784
410
+ },
411
+ {
412
+ "epoch": 0.5536332179930796,
413
+ "grad_norm": 0.2299482375383377,
414
+ "learning_rate": 0.00044238663760578963,
415
+ "loss": 4.9461,
416
+ "step": 800
417
+ },
418
+ {
419
+ "epoch": 0.5647058823529412,
420
+ "grad_norm": 0.20332716405391693,
421
+ "learning_rate": 0.0004246078822768339,
422
+ "loss": 4.8672,
423
+ "step": 816
424
+ },
425
+ {
426
+ "epoch": 0.5757785467128028,
427
+ "grad_norm": 0.2742558717727661,
428
+ "learning_rate": 0.00040692616516195134,
429
+ "loss": 5.0016,
430
+ "step": 832
431
+ },
432
+ {
433
+ "epoch": 0.5868512110726644,
434
+ "grad_norm": 0.18639664351940155,
435
+ "learning_rate": 0.0003893642446383089,
436
+ "loss": 4.8718,
437
+ "step": 848
438
+ },
439
+ {
440
+ "epoch": 0.5979238754325259,
441
+ "grad_norm": 0.14423900842666626,
442
+ "learning_rate": 0.00037194472489126174,
443
+ "loss": 4.9254,
444
+ "step": 864
445
+ },
446
+ {
447
+ "epoch": 0.6089965397923875,
448
+ "grad_norm": 0.25753453373908997,
449
+ "learning_rate": 0.00035469002682019933,
450
+ "loss": 5.0226,
451
+ "step": 880
452
+ },
453
+ {
454
+ "epoch": 0.6200692041522491,
455
+ "grad_norm": 0.29433926939964294,
456
+ "learning_rate": 0.00033762235918030425,
457
+ "loss": 4.9075,
458
+ "step": 896
459
+ },
460
+ {
461
+ "epoch": 0.6311418685121107,
462
+ "grad_norm": 0.16973747313022614,
463
+ "learning_rate": 0.0003207636899973617,
464
+ "loss": 4.9367,
465
+ "step": 912
466
+ },
467
+ {
468
+ "epoch": 0.6422145328719723,
469
+ "grad_norm": 0.23007385432720184,
470
+ "learning_rate": 0.0003041357182924178,
471
+ "loss": 4.8899,
472
+ "step": 928
473
+ },
474
+ {
475
+ "epoch": 0.6532871972318339,
476
+ "grad_norm": 0.23643143475055695,
477
+ "learning_rate": 0.000287759846152675,
478
+ "loss": 4.8813,
479
+ "step": 944
480
+ },
481
+ {
482
+ "epoch": 0.6643598615916955,
483
+ "grad_norm": 0.17182067036628723,
484
+ "learning_rate": 0.00027165715118457735,
485
+ "loss": 4.9201,
486
+ "step": 960
487
+ },
488
+ {
489
+ "epoch": 0.6754325259515571,
490
+ "grad_norm": 0.29925912618637085,
491
+ "learning_rate": 0.0002558483593845372,
492
+ "loss": 4.898,
493
+ "step": 976
494
+ },
495
+ {
496
+ "epoch": 0.6865051903114187,
497
+ "grad_norm": 0.17474640905857086,
498
+ "learning_rate": 0.00024035381846222555,
499
+ "loss": 4.8839,
500
+ "step": 992
501
+ },
502
+ {
503
+ "epoch": 0.6920415224913494,
504
+ "eval_bleu": 0.07638727198557162,
505
+ "eval_cap_loss": 1.797022891217832,
506
+ "eval_con_loss": 2.0767276909416923,
507
+ "eval_loss": 3.87375058110786,
508
+ "step": 1000
509
+ },
510
+ {
511
+ "epoch": 0.6920415224913494,
512
+ "eval_bleu": 0.07638727198557162,
513
+ "eval_cap_loss": 1.797022891217832,
514
+ "eval_con_loss": 2.0767276909416923,
515
+ "eval_loss": 3.87375058110786,
516
+ "eval_runtime": 815.983,
517
+ "eval_samples_per_second": 18.886,
518
+ "eval_steps_per_second": 2.362,
519
+ "step": 1000
520
+ },
521
+ {
522
+ "epoch": 0.6975778546712803,
523
+ "grad_norm": 0.2200097143650055,
524
+ "learning_rate": 0.00022519347165076065,
525
+ "loss": 4.9202,
526
+ "step": 1008
527
+ },
528
+ {
529
+ "epoch": 0.7086505190311418,
530
+ "grad_norm": 0.2346457690000534,
531
+ "learning_rate": 0.00021038683203750092,
532
+ "loss": 4.8698,
533
+ "step": 1024
534
+ },
535
+ {
536
+ "epoch": 0.7197231833910035,
537
+ "grad_norm": 0.24073997139930725,
538
+ "learning_rate": 0.00019595295744848825,
539
+ "loss": 5.01,
540
+ "step": 1040
541
+ },
542
+ {
543
+ "epoch": 0.730795847750865,
544
+ "grad_norm": 0.2730049192905426,
545
+ "learning_rate": 0.00018191042591886197,
546
+ "loss": 4.8847,
547
+ "step": 1056
548
+ },
549
+ {
550
+ "epoch": 0.7418685121107267,
551
+ "grad_norm": 0.21411365270614624,
552
+ "learning_rate": 0.00016827731178081822,
553
+ "loss": 4.9802,
554
+ "step": 1072
555
+ },
556
+ {
557
+ "epoch": 0.7529411764705882,
558
+ "grad_norm": 0.2260352373123169,
559
+ "learning_rate": 0.0001550711623998926,
560
+ "loss": 4.9593,
561
+ "step": 1088
562
+ },
563
+ {
564
+ "epoch": 0.7640138408304499,
565
+ "grad_norm": 0.2655484974384308,
566
+ "learning_rate": 0.0001423089755895095,
567
+ "loss": 4.8097,
568
+ "step": 1104
569
+ },
570
+ {
571
+ "epoch": 0.7750865051903114,
572
+ "grad_norm": 0.2067459672689438,
573
+ "learning_rate": 0.0001300071777328658,
574
+ "loss": 4.947,
575
+ "step": 1120
576
+ },
577
+ {
578
+ "epoch": 0.7861591695501731,
579
+ "grad_norm": 0.21391142904758453,
580
+ "learning_rate": 0.00011818160264031097,
581
+ "loss": 4.8673,
582
+ "step": 1136
583
+ },
584
+ {
585
+ "epoch": 0.7972318339100346,
586
+ "grad_norm": 0.3288532793521881,
587
+ "learning_rate": 0.00010684747116943683,
588
+ "loss": 4.9652,
589
+ "step": 1152
590
+ },
591
+ {
592
+ "epoch": 0.8083044982698961,
593
+ "grad_norm": 0.1693842113018036,
594
+ "learning_rate": 9.60193716341039e-05,
595
+ "loss": 4.9003,
596
+ "step": 1168
597
+ },
598
+ {
599
+ "epoch": 0.8193771626297578,
600
+ "grad_norm": 0.3056645691394806,
601
+ "learning_rate": 8.571124102762767e-05,
602
+ "loss": 4.9436,
603
+ "step": 1184
604
+ },
605
+ {
606
+ "epoch": 0.8304498269896193,
607
+ "grad_norm": 0.3365015387535095,
608
+ "learning_rate": 7.593634708428437e-05,
609
+ "loss": 5.0349,
610
+ "step": 1200
611
+ },
612
+ {
613
+ "epoch": 0.841522491349481,
614
+ "grad_norm": 0.2918214201927185,
615
+ "learning_rate": 6.670727120223142e-05,
616
+ "loss": 4.8812,
617
+ "step": 1216
618
+ },
619
+ {
620
+ "epoch": 0.8525951557093425,
621
+ "grad_norm": 0.2237936407327652,
622
+ "learning_rate": 5.8035892249820085e-05,
623
+ "loss": 4.8842,
624
+ "step": 1232
625
+ },
626
+ {
627
+ "epoch": 0.8636678200692042,
628
+ "grad_norm": 0.23957201838493347,
629
+ "learning_rate": 4.993337127614273e-05,
630
+ "loss": 4.9281,
631
+ "step": 1248
632
+ },
633
+ {
634
+ "epoch": 0.8650519031141869,
635
+ "eval_bleu": 0.07725825664704841,
636
+ "eval_cap_loss": 1.781457468055355,
637
+ "eval_con_loss": 2.0761843878924693,
638
+ "eval_loss": 3.8576418577727716,
639
+ "step": 1250
640
+ },
641
+ {
642
+ "epoch": 0.8650519031141869,
643
+ "eval_bleu": 0.07725825664704841,
644
+ "eval_cap_loss": 1.781457468055355,
645
+ "eval_con_loss": 2.0761843878924693,
646
+ "eval_loss": 3.8576418577727716,
647
+ "eval_runtime": 819.4117,
648
+ "eval_samples_per_second": 18.807,
649
+ "eval_steps_per_second": 2.352,
650
+ "step": 1250
651
+ },
652
+ {
653
+ "epoch": 0.8747404844290657,
654
+ "grad_norm": 0.09429222345352173,
655
+ "learning_rate": 4.2410137145495964e-05,
656
+ "loss": 4.8982,
657
+ "step": 1264
658
+ },
659
+ {
660
+ "epoch": 0.8858131487889274,
661
+ "grad_norm": 0.23660314083099365,
662
+ "learning_rate": 3.54758731142486e-05,
663
+ "loss": 4.9127,
664
+ "step": 1280
665
+ },
666
+ {
667
+ "epoch": 0.8968858131487889,
668
+ "grad_norm": 0.2092629075050354,
669
+ "learning_rate": 2.9139504367391158e-05,
670
+ "loss": 5.0183,
671
+ "step": 1296
672
+ },
673
+ {
674
+ "epoch": 0.9079584775086506,
675
+ "grad_norm": 0.2870592772960663,
676
+ "learning_rate": 2.3409186530809423e-05,
677
+ "loss": 4.9319,
678
+ "step": 1312
679
+ },
680
+ {
681
+ "epoch": 0.9190311418685121,
682
+ "grad_norm": 0.23483090102672577,
683
+ "learning_rate": 1.8292295174068717e-05,
684
+ "loss": 4.836,
685
+ "step": 1328
686
+ },
687
+ {
688
+ "epoch": 0.9301038062283737,
689
+ "grad_norm": 0.24700886011123657,
690
+ "learning_rate": 1.3795416317218035e-05,
691
+ "loss": 4.9113,
692
+ "step": 1344
693
+ },
694
+ {
695
+ "epoch": 0.9411764705882353,
696
+ "grad_norm": 0.20898666977882385,
697
+ "learning_rate": 9.924337953834795e-06,
698
+ "loss": 4.9519,
699
+ "step": 1360
700
+ },
701
+ {
702
+ "epoch": 0.9522491349480969,
703
+ "grad_norm": 0.14755718410015106,
704
+ "learning_rate": 6.684042601220186e-06,
705
+ "loss": 4.8827,
706
+ "step": 1376
707
+ },
708
+ {
709
+ "epoch": 0.9633217993079585,
710
+ "grad_norm": 0.2121778130531311,
711
+ "learning_rate": 4.078700887333364e-06,
712
+ "loss": 4.861,
713
+ "step": 1392
714
+ },
715
+ {
716
+ "epoch": 0.9743944636678201,
717
+ "grad_norm": 0.2525366544723511,
718
+ "learning_rate": 2.1116661827202956e-06,
719
+ "loss": 4.8674,
720
+ "step": 1408
721
+ },
722
+ {
723
+ "epoch": 0.9854671280276817,
724
+ "grad_norm": 0.278390109539032,
725
+ "learning_rate": 7.854702843449468e-07,
726
+ "loss": 4.9493,
727
+ "step": 1424
728
+ },
729
+ {
730
+ "epoch": 0.9965397923875432,
731
+ "grad_norm": 0.22038041055202484,
732
+ "learning_rate": 1.0182015687909552e-07,
733
+ "loss": 4.9065,
734
+ "step": 1440
735
+ }
736
+ ],
737
+ "logging_steps": 16,
738
+ "max_steps": 1445,
739
+ "num_input_tokens_seen": 0,
740
+ "num_train_epochs": 1,
741
+ "save_steps": 250,
742
+ "stateful_callbacks": {
743
+ "TrainerControl": {
744
+ "args": {
745
+ "should_epoch_stop": false,
746
+ "should_evaluate": false,
747
+ "should_log": false,
748
+ "should_save": true,
749
+ "should_training_stop": true
750
+ },
751
+ "attributes": {}
752
+ }
753
+ },
754
+ "total_flos": 0.0,
755
+ "train_batch_size": 32,
756
+ "trial_name": null,
757
+ "trial_params": null
758
+ }