|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.1307953260459858, |
|
"eval_steps": 500, |
|
"global_step": 1500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003769317753486619, |
|
"grad_norm": 2.665037155151367, |
|
"learning_rate": 4.999956146783009e-05, |
|
"loss": 1.564, |
|
"num_input_tokens_seen": 23856, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.007538635506973238, |
|
"grad_norm": 1.4514005184173584, |
|
"learning_rate": 4.9998245886705174e-05, |
|
"loss": 0.968, |
|
"num_input_tokens_seen": 47584, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.011307953260459858, |
|
"grad_norm": 2.0009350776672363, |
|
"learning_rate": 4.999605330277923e-05, |
|
"loss": 0.9879, |
|
"num_input_tokens_seen": 70864, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.015077271013946476, |
|
"grad_norm": 1.2164318561553955, |
|
"learning_rate": 4.999298379297376e-05, |
|
"loss": 1.073, |
|
"num_input_tokens_seen": 94192, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.018846588767433094, |
|
"grad_norm": 2.2099952697753906, |
|
"learning_rate": 4.998903746497505e-05, |
|
"loss": 0.8549, |
|
"num_input_tokens_seen": 117472, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.022615906520919715, |
|
"grad_norm": 1.6259618997573853, |
|
"learning_rate": 4.998421445723046e-05, |
|
"loss": 0.8704, |
|
"num_input_tokens_seen": 140704, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.026385224274406333, |
|
"grad_norm": 1.7849559783935547, |
|
"learning_rate": 4.997851493894349e-05, |
|
"loss": 0.9786, |
|
"num_input_tokens_seen": 163680, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.03015454202789295, |
|
"grad_norm": 1.8650152683258057, |
|
"learning_rate": 4.997193911006793e-05, |
|
"loss": 0.8356, |
|
"num_input_tokens_seen": 187072, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03392385978137957, |
|
"grad_norm": 1.681622862815857, |
|
"learning_rate": 4.996448720130077e-05, |
|
"loss": 0.778, |
|
"num_input_tokens_seen": 211040, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03769317753486619, |
|
"grad_norm": 1.6398649215698242, |
|
"learning_rate": 4.995615947407415e-05, |
|
"loss": 0.939, |
|
"num_input_tokens_seen": 234560, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04146249528835281, |
|
"grad_norm": 2.327622890472412, |
|
"learning_rate": 4.994695622054618e-05, |
|
"loss": 1.0274, |
|
"num_input_tokens_seen": 257632, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.04523181304183943, |
|
"grad_norm": 2.6391327381134033, |
|
"learning_rate": 4.9936877763590664e-05, |
|
"loss": 0.8822, |
|
"num_input_tokens_seen": 281024, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.049001130795326045, |
|
"grad_norm": 1.8776222467422485, |
|
"learning_rate": 4.992592445678582e-05, |
|
"loss": 0.8146, |
|
"num_input_tokens_seen": 304688, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.052770448548812667, |
|
"grad_norm": 1.6592698097229004, |
|
"learning_rate": 4.991409668440185e-05, |
|
"loss": 0.9989, |
|
"num_input_tokens_seen": 327424, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.05653976630229928, |
|
"grad_norm": 1.709376335144043, |
|
"learning_rate": 4.990139486138743e-05, |
|
"loss": 0.9344, |
|
"num_input_tokens_seen": 350528, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.0603090840557859, |
|
"grad_norm": 1.3547413349151611, |
|
"learning_rate": 4.988781943335521e-05, |
|
"loss": 0.7932, |
|
"num_input_tokens_seen": 373280, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.06407840180927252, |
|
"grad_norm": 1.3162572383880615, |
|
"learning_rate": 4.987337087656614e-05, |
|
"loss": 0.9445, |
|
"num_input_tokens_seen": 395856, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.06784771956275915, |
|
"grad_norm": 1.8211767673492432, |
|
"learning_rate": 4.985804969791279e-05, |
|
"loss": 0.7369, |
|
"num_input_tokens_seen": 418704, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.07161703731624576, |
|
"grad_norm": 1.552996039390564, |
|
"learning_rate": 4.984185643490151e-05, |
|
"loss": 1.0226, |
|
"num_input_tokens_seen": 442432, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.07538635506973237, |
|
"grad_norm": 1.816767930984497, |
|
"learning_rate": 4.9824791655633676e-05, |
|
"loss": 0.7753, |
|
"num_input_tokens_seen": 466128, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.079155672823219, |
|
"grad_norm": 1.6724802255630493, |
|
"learning_rate": 4.9806855958785625e-05, |
|
"loss": 0.8278, |
|
"num_input_tokens_seen": 489536, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.08292499057670562, |
|
"grad_norm": 2.187622308731079, |
|
"learning_rate": 4.978804997358779e-05, |
|
"loss": 0.8432, |
|
"num_input_tokens_seen": 513200, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.08669430833019223, |
|
"grad_norm": 1.500815987586975, |
|
"learning_rate": 4.9768374359802525e-05, |
|
"loss": 0.9649, |
|
"num_input_tokens_seen": 536432, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.09046362608367886, |
|
"grad_norm": 1.6637320518493652, |
|
"learning_rate": 4.9747829807701e-05, |
|
"loss": 0.8249, |
|
"num_input_tokens_seen": 559776, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.09423294383716548, |
|
"grad_norm": 1.5978686809539795, |
|
"learning_rate": 4.972641703803896e-05, |
|
"loss": 0.9157, |
|
"num_input_tokens_seen": 583248, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.09800226159065209, |
|
"grad_norm": 1.4269095659255981, |
|
"learning_rate": 4.9704136802031485e-05, |
|
"loss": 0.885, |
|
"num_input_tokens_seen": 606768, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.1017715793441387, |
|
"grad_norm": 1.6915644407272339, |
|
"learning_rate": 4.96809898813266e-05, |
|
"loss": 0.8736, |
|
"num_input_tokens_seen": 630896, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.10554089709762533, |
|
"grad_norm": 1.6915837526321411, |
|
"learning_rate": 4.965697708797784e-05, |
|
"loss": 0.7312, |
|
"num_input_tokens_seen": 654320, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.10931021485111195, |
|
"grad_norm": 1.921988606452942, |
|
"learning_rate": 4.963209926441581e-05, |
|
"loss": 0.9478, |
|
"num_input_tokens_seen": 677248, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.11307953260459856, |
|
"grad_norm": 2.241665840148926, |
|
"learning_rate": 4.9606357283418575e-05, |
|
"loss": 0.9174, |
|
"num_input_tokens_seen": 700672, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.11684885035808519, |
|
"grad_norm": 2.076327323913574, |
|
"learning_rate": 4.957975204808108e-05, |
|
"loss": 0.8453, |
|
"num_input_tokens_seen": 724480, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.1206181681115718, |
|
"grad_norm": 1.5655834674835205, |
|
"learning_rate": 4.955228449178345e-05, |
|
"loss": 0.701, |
|
"num_input_tokens_seen": 748144, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.12438748586505842, |
|
"grad_norm": 1.8062623739242554, |
|
"learning_rate": 4.952395557815826e-05, |
|
"loss": 0.7981, |
|
"num_input_tokens_seen": 771584, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.12815680361854503, |
|
"grad_norm": 2.2940807342529297, |
|
"learning_rate": 4.949476630105669e-05, |
|
"loss": 0.8824, |
|
"num_input_tokens_seen": 795248, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.13192612137203166, |
|
"grad_norm": 2.108461856842041, |
|
"learning_rate": 4.9464717684513726e-05, |
|
"loss": 0.8368, |
|
"num_input_tokens_seen": 818272, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.1356954391255183, |
|
"grad_norm": 1.5639266967773438, |
|
"learning_rate": 4.943381078271214e-05, |
|
"loss": 0.951, |
|
"num_input_tokens_seen": 841440, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.1394647568790049, |
|
"grad_norm": 1.4479436874389648, |
|
"learning_rate": 4.9402046679945613e-05, |
|
"loss": 0.8697, |
|
"num_input_tokens_seen": 864640, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.14323407463249152, |
|
"grad_norm": 1.8331745862960815, |
|
"learning_rate": 4.936942649058061e-05, |
|
"loss": 0.7765, |
|
"num_input_tokens_seen": 888032, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.14700339238597815, |
|
"grad_norm": 1.6916519403457642, |
|
"learning_rate": 4.933595135901732e-05, |
|
"loss": 0.778, |
|
"num_input_tokens_seen": 911008, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.15077271013946475, |
|
"grad_norm": 2.0202736854553223, |
|
"learning_rate": 4.930162245964952e-05, |
|
"loss": 0.8926, |
|
"num_input_tokens_seen": 934432, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15454202789295138, |
|
"grad_norm": 1.8288179636001587, |
|
"learning_rate": 4.926644099682334e-05, |
|
"loss": 0.805, |
|
"num_input_tokens_seen": 958064, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.158311345646438, |
|
"grad_norm": 1.9957374334335327, |
|
"learning_rate": 4.9230408204795034e-05, |
|
"loss": 0.8433, |
|
"num_input_tokens_seen": 980992, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.1620806633999246, |
|
"grad_norm": 1.5921601057052612, |
|
"learning_rate": 4.9193525347687696e-05, |
|
"loss": 0.8483, |
|
"num_input_tokens_seen": 1004736, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.16584998115341124, |
|
"grad_norm": 2.1509461402893066, |
|
"learning_rate": 4.9155793719446863e-05, |
|
"loss": 0.9689, |
|
"num_input_tokens_seen": 1028320, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.16961929890689786, |
|
"grad_norm": 1.9183369874954224, |
|
"learning_rate": 4.911721464379516e-05, |
|
"loss": 0.9025, |
|
"num_input_tokens_seen": 1051696, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.17338861666038446, |
|
"grad_norm": 1.5253652334213257, |
|
"learning_rate": 4.907778947418585e-05, |
|
"loss": 0.8423, |
|
"num_input_tokens_seen": 1074640, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.1771579344138711, |
|
"grad_norm": 1.917833924293518, |
|
"learning_rate": 4.9037519593755356e-05, |
|
"loss": 0.968, |
|
"num_input_tokens_seen": 1098096, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.18092725216735772, |
|
"grad_norm": 1.8350000381469727, |
|
"learning_rate": 4.89964064152747e-05, |
|
"loss": 0.8118, |
|
"num_input_tokens_seen": 1121344, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.18469656992084432, |
|
"grad_norm": 1.4250982999801636, |
|
"learning_rate": 4.895445138110001e-05, |
|
"loss": 0.8695, |
|
"num_input_tokens_seen": 1144608, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.18846588767433095, |
|
"grad_norm": 3.1919894218444824, |
|
"learning_rate": 4.891165596312186e-05, |
|
"loss": 0.9135, |
|
"num_input_tokens_seen": 1168112, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.19223520542781755, |
|
"grad_norm": 2.4486422538757324, |
|
"learning_rate": 4.886802166271364e-05, |
|
"loss": 0.798, |
|
"num_input_tokens_seen": 1190992, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.19600452318130418, |
|
"grad_norm": 1.6712846755981445, |
|
"learning_rate": 4.882355001067892e-05, |
|
"loss": 0.8068, |
|
"num_input_tokens_seen": 1214832, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.1997738409347908, |
|
"grad_norm": 2.27498197555542, |
|
"learning_rate": 4.8778242567197685e-05, |
|
"loss": 0.7621, |
|
"num_input_tokens_seen": 1238080, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.2035431586882774, |
|
"grad_norm": 1.8954802751541138, |
|
"learning_rate": 4.873210092177167e-05, |
|
"loss": 0.7604, |
|
"num_input_tokens_seen": 1261056, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.20731247644176404, |
|
"grad_norm": 2.805199146270752, |
|
"learning_rate": 4.868512669316855e-05, |
|
"loss": 0.9166, |
|
"num_input_tokens_seen": 1283968, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.21108179419525067, |
|
"grad_norm": 1.6355040073394775, |
|
"learning_rate": 4.863732152936514e-05, |
|
"loss": 0.8487, |
|
"num_input_tokens_seen": 1308192, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.21485111194873727, |
|
"grad_norm": 1.5382211208343506, |
|
"learning_rate": 4.858868710748963e-05, |
|
"loss": 0.9388, |
|
"num_input_tokens_seen": 1331648, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.2186204297022239, |
|
"grad_norm": 4.8234052658081055, |
|
"learning_rate": 4.85392251337627e-05, |
|
"loss": 0.8762, |
|
"num_input_tokens_seen": 1354880, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.22238974745571052, |
|
"grad_norm": 1.7781318426132202, |
|
"learning_rate": 4.848893734343769e-05, |
|
"loss": 0.8672, |
|
"num_input_tokens_seen": 1378064, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.22615906520919712, |
|
"grad_norm": 1.4167485237121582, |
|
"learning_rate": 4.8437825500739696e-05, |
|
"loss": 0.9335, |
|
"num_input_tokens_seen": 1401568, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.22992838296268375, |
|
"grad_norm": 1.112923502922058, |
|
"learning_rate": 4.838589139880371e-05, |
|
"loss": 0.7408, |
|
"num_input_tokens_seen": 1424960, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.23369770071617038, |
|
"grad_norm": 1.789054274559021, |
|
"learning_rate": 4.833313685961167e-05, |
|
"loss": 0.7255, |
|
"num_input_tokens_seen": 1448560, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.23746701846965698, |
|
"grad_norm": 2.056309223175049, |
|
"learning_rate": 4.82795637339286e-05, |
|
"loss": 0.8214, |
|
"num_input_tokens_seen": 1472224, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.2412363362231436, |
|
"grad_norm": 2.14561128616333, |
|
"learning_rate": 4.822517390123761e-05, |
|
"loss": 0.7577, |
|
"num_input_tokens_seen": 1495232, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.24500565397663024, |
|
"grad_norm": 2.092505931854248, |
|
"learning_rate": 4.8169969269674016e-05, |
|
"loss": 0.7087, |
|
"num_input_tokens_seen": 1518608, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.24877497173011684, |
|
"grad_norm": 3.2833504676818848, |
|
"learning_rate": 4.811395177595836e-05, |
|
"loss": 0.8798, |
|
"num_input_tokens_seen": 1541552, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.25254428948360347, |
|
"grad_norm": 3.2298083305358887, |
|
"learning_rate": 4.8057123385328495e-05, |
|
"loss": 0.8511, |
|
"num_input_tokens_seen": 1565136, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.25631360723709007, |
|
"grad_norm": 3.187859296798706, |
|
"learning_rate": 4.799948609147061e-05, |
|
"loss": 0.8655, |
|
"num_input_tokens_seen": 1588624, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.2600829249905767, |
|
"grad_norm": 1.7054897546768188, |
|
"learning_rate": 4.7941041916449316e-05, |
|
"loss": 0.7643, |
|
"num_input_tokens_seen": 1612016, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.2638522427440633, |
|
"grad_norm": 2.040390729904175, |
|
"learning_rate": 4.788179291063667e-05, |
|
"loss": 0.8325, |
|
"num_input_tokens_seen": 1635856, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.2676215604975499, |
|
"grad_norm": 1.6121920347213745, |
|
"learning_rate": 4.78217411526403e-05, |
|
"loss": 0.6917, |
|
"num_input_tokens_seen": 1659168, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.2713908782510366, |
|
"grad_norm": 1.7065237760543823, |
|
"learning_rate": 4.7760888749230416e-05, |
|
"loss": 0.9086, |
|
"num_input_tokens_seen": 1682480, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.2751601960045232, |
|
"grad_norm": 2.437305212020874, |
|
"learning_rate": 4.769923783526593e-05, |
|
"loss": 0.9648, |
|
"num_input_tokens_seen": 1705952, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.2789295137580098, |
|
"grad_norm": 1.7989013195037842, |
|
"learning_rate": 4.7636790573619586e-05, |
|
"loss": 0.8712, |
|
"num_input_tokens_seen": 1729504, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.28269883151149644, |
|
"grad_norm": 1.2867515087127686, |
|
"learning_rate": 4.7573549155102014e-05, |
|
"loss": 0.7044, |
|
"num_input_tokens_seen": 1752784, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.28646814926498304, |
|
"grad_norm": 1.7485004663467407, |
|
"learning_rate": 4.7509515798384956e-05, |
|
"loss": 0.92, |
|
"num_input_tokens_seen": 1775840, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.29023746701846964, |
|
"grad_norm": 1.8417341709136963, |
|
"learning_rate": 4.7444692749923345e-05, |
|
"loss": 0.7396, |
|
"num_input_tokens_seen": 1798944, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.2940067847719563, |
|
"grad_norm": 1.755854606628418, |
|
"learning_rate": 4.7379082283876566e-05, |
|
"loss": 0.8619, |
|
"num_input_tokens_seen": 1822016, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.2977761025254429, |
|
"grad_norm": 1.924985408782959, |
|
"learning_rate": 4.73126867020286e-05, |
|
"loss": 0.7399, |
|
"num_input_tokens_seen": 1845584, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.3015454202789295, |
|
"grad_norm": 2.078490972518921, |
|
"learning_rate": 4.724550833370735e-05, |
|
"loss": 0.7882, |
|
"num_input_tokens_seen": 1868976, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.30531473803241616, |
|
"grad_norm": 1.7066706418991089, |
|
"learning_rate": 4.717754953570286e-05, |
|
"loss": 0.7579, |
|
"num_input_tokens_seen": 1892096, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.30908405578590276, |
|
"grad_norm": 1.5587912797927856, |
|
"learning_rate": 4.710881269218467e-05, |
|
"loss": 0.8531, |
|
"num_input_tokens_seen": 1915136, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.31285337353938936, |
|
"grad_norm": 2.13066029548645, |
|
"learning_rate": 4.7039300214618134e-05, |
|
"loss": 0.8279, |
|
"num_input_tokens_seen": 1938464, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.316622691292876, |
|
"grad_norm": 1.6272748708724976, |
|
"learning_rate": 4.696901454167988e-05, |
|
"loss": 0.9496, |
|
"num_input_tokens_seen": 1961696, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.3203920090463626, |
|
"grad_norm": 1.9481947422027588, |
|
"learning_rate": 4.68979581391722e-05, |
|
"loss": 0.7804, |
|
"num_input_tokens_seen": 1985072, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.3241613267998492, |
|
"grad_norm": 2.0169599056243896, |
|
"learning_rate": 4.682613349993655e-05, |
|
"loss": 0.8848, |
|
"num_input_tokens_seen": 2007936, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.32793064455333587, |
|
"grad_norm": 2.285831928253174, |
|
"learning_rate": 4.675354314376614e-05, |
|
"loss": 0.7155, |
|
"num_input_tokens_seen": 2031280, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.33169996230682247, |
|
"grad_norm": 1.7333121299743652, |
|
"learning_rate": 4.6680189617317474e-05, |
|
"loss": 0.7408, |
|
"num_input_tokens_seen": 2054176, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.33546928006030907, |
|
"grad_norm": 1.5312005281448364, |
|
"learning_rate": 4.660607549402103e-05, |
|
"loss": 0.6917, |
|
"num_input_tokens_seen": 2077392, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.33923859781379573, |
|
"grad_norm": 1.2847707271575928, |
|
"learning_rate": 4.6531203373991014e-05, |
|
"loss": 0.8376, |
|
"num_input_tokens_seen": 2100464, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.34300791556728233, |
|
"grad_norm": 2.3927323818206787, |
|
"learning_rate": 4.645557588393407e-05, |
|
"loss": 0.7672, |
|
"num_input_tokens_seen": 2124240, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.34677723332076893, |
|
"grad_norm": 1.467666506767273, |
|
"learning_rate": 4.63791956770572e-05, |
|
"loss": 0.7285, |
|
"num_input_tokens_seen": 2147584, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.3505465510742556, |
|
"grad_norm": 2.08063006401062, |
|
"learning_rate": 4.6302065432974616e-05, |
|
"loss": 0.8577, |
|
"num_input_tokens_seen": 2170976, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.3543158688277422, |
|
"grad_norm": 2.0516254901885986, |
|
"learning_rate": 4.6224187857613786e-05, |
|
"loss": 0.8195, |
|
"num_input_tokens_seen": 2194128, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.3580851865812288, |
|
"grad_norm": 1.4173938035964966, |
|
"learning_rate": 4.6145565683120496e-05, |
|
"loss": 0.878, |
|
"num_input_tokens_seen": 2217056, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.36185450433471544, |
|
"grad_norm": 2.968780517578125, |
|
"learning_rate": 4.606620166776294e-05, |
|
"loss": 0.8187, |
|
"num_input_tokens_seen": 2240800, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.36562382208820204, |
|
"grad_norm": 2.426954507827759, |
|
"learning_rate": 4.598609859583506e-05, |
|
"loss": 0.7713, |
|
"num_input_tokens_seen": 2263984, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.36939313984168864, |
|
"grad_norm": 1.7469669580459595, |
|
"learning_rate": 4.590525927755874e-05, |
|
"loss": 0.8583, |
|
"num_input_tokens_seen": 2287776, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.3731624575951753, |
|
"grad_norm": 4.131247043609619, |
|
"learning_rate": 4.582368654898533e-05, |
|
"loss": 0.8106, |
|
"num_input_tokens_seen": 2311072, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.3769317753486619, |
|
"grad_norm": 2.013920545578003, |
|
"learning_rate": 4.5741383271896094e-05, |
|
"loss": 0.8578, |
|
"num_input_tokens_seen": 2334768, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.3807010931021485, |
|
"grad_norm": 2.0032143592834473, |
|
"learning_rate": 4.565835233370178e-05, |
|
"loss": 0.7192, |
|
"num_input_tokens_seen": 2358272, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.3844704108556351, |
|
"grad_norm": 1.9632434844970703, |
|
"learning_rate": 4.557459664734141e-05, |
|
"loss": 0.8681, |
|
"num_input_tokens_seen": 2381440, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.38823972860912176, |
|
"grad_norm": 3.1098926067352295, |
|
"learning_rate": 4.549011915118001e-05, |
|
"loss": 0.6713, |
|
"num_input_tokens_seen": 2403984, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.39200904636260836, |
|
"grad_norm": 2.1910107135772705, |
|
"learning_rate": 4.540492280890555e-05, |
|
"loss": 0.7997, |
|
"num_input_tokens_seen": 2427392, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.39577836411609496, |
|
"grad_norm": 2.5994174480438232, |
|
"learning_rate": 4.531901060942497e-05, |
|
"loss": 0.8715, |
|
"num_input_tokens_seen": 2450384, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.3995476818695816, |
|
"grad_norm": 2.206372022628784, |
|
"learning_rate": 4.523238556675935e-05, |
|
"loss": 0.6608, |
|
"num_input_tokens_seen": 2473424, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.4033169996230682, |
|
"grad_norm": 2.1349329948425293, |
|
"learning_rate": 4.514505071993812e-05, |
|
"loss": 0.7568, |
|
"num_input_tokens_seen": 2497232, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.4070863173765548, |
|
"grad_norm": 2.0707926750183105, |
|
"learning_rate": 4.505700913289246e-05, |
|
"loss": 0.8325, |
|
"num_input_tokens_seen": 2520432, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.4108556351300415, |
|
"grad_norm": 1.6440961360931396, |
|
"learning_rate": 4.496826389434784e-05, |
|
"loss": 0.8083, |
|
"num_input_tokens_seen": 2543616, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.4146249528835281, |
|
"grad_norm": 2.5610768795013428, |
|
"learning_rate": 4.48788181177156e-05, |
|
"loss": 0.7702, |
|
"num_input_tokens_seen": 2566992, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.4183942706370147, |
|
"grad_norm": 2.882587432861328, |
|
"learning_rate": 4.478867494098381e-05, |
|
"loss": 0.6993, |
|
"num_input_tokens_seen": 2590144, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.42216358839050133, |
|
"grad_norm": 2.3161754608154297, |
|
"learning_rate": 4.469783752660709e-05, |
|
"loss": 0.8109, |
|
"num_input_tokens_seen": 2614240, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.42593290614398793, |
|
"grad_norm": 2.578278064727783, |
|
"learning_rate": 4.460630906139571e-05, |
|
"loss": 0.7901, |
|
"num_input_tokens_seen": 2637696, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.42970222389747453, |
|
"grad_norm": 2.362873077392578, |
|
"learning_rate": 4.451409275640379e-05, |
|
"loss": 0.9066, |
|
"num_input_tokens_seen": 2660768, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.4334715416509612, |
|
"grad_norm": 1.8829095363616943, |
|
"learning_rate": 4.442119184681664e-05, |
|
"loss": 0.703, |
|
"num_input_tokens_seen": 2683792, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.4372408594044478, |
|
"grad_norm": 1.8470242023468018, |
|
"learning_rate": 4.432760959183725e-05, |
|
"loss": 0.9203, |
|
"num_input_tokens_seen": 2707136, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.4410101771579344, |
|
"grad_norm": 2.575732707977295, |
|
"learning_rate": 4.423334927457198e-05, |
|
"loss": 0.9314, |
|
"num_input_tokens_seen": 2729808, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.44477949491142105, |
|
"grad_norm": 1.765871524810791, |
|
"learning_rate": 4.413841420191532e-05, |
|
"loss": 0.7105, |
|
"num_input_tokens_seen": 2752992, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.44854881266490765, |
|
"grad_norm": 1.8366035223007202, |
|
"learning_rate": 4.404280770443398e-05, |
|
"loss": 0.894, |
|
"num_input_tokens_seen": 2776480, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.45231813041839425, |
|
"grad_norm": 2.326671838760376, |
|
"learning_rate": 4.3946533136249926e-05, |
|
"loss": 0.8868, |
|
"num_input_tokens_seen": 2799728, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.4560874481718809, |
|
"grad_norm": 1.8004709482192993, |
|
"learning_rate": 4.384959387492277e-05, |
|
"loss": 0.7679, |
|
"num_input_tokens_seen": 2823008, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.4598567659253675, |
|
"grad_norm": 1.893351435661316, |
|
"learning_rate": 4.37519933213313e-05, |
|
"loss": 0.7895, |
|
"num_input_tokens_seen": 2845984, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.4636260836788541, |
|
"grad_norm": 2.1309449672698975, |
|
"learning_rate": 4.365373489955411e-05, |
|
"loss": 0.8527, |
|
"num_input_tokens_seen": 2869024, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.46739540143234076, |
|
"grad_norm": 2.4292616844177246, |
|
"learning_rate": 4.355482205674951e-05, |
|
"loss": 0.7533, |
|
"num_input_tokens_seen": 2892240, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.47116471918582736, |
|
"grad_norm": 4.796603679656982, |
|
"learning_rate": 4.3455258263034605e-05, |
|
"loss": 0.8048, |
|
"num_input_tokens_seen": 2915792, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.47493403693931396, |
|
"grad_norm": 2.2890474796295166, |
|
"learning_rate": 4.33550470113635e-05, |
|
"loss": 0.7072, |
|
"num_input_tokens_seen": 2939488, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.4787033546928006, |
|
"grad_norm": 1.8012391328811646, |
|
"learning_rate": 4.3254191817404804e-05, |
|
"loss": 0.7911, |
|
"num_input_tokens_seen": 2962992, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.4824726724462872, |
|
"grad_norm": 1.8014194965362549, |
|
"learning_rate": 4.3152696219418295e-05, |
|
"loss": 0.8293, |
|
"num_input_tokens_seen": 2986544, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.4862419901997738, |
|
"grad_norm": 2.239366292953491, |
|
"learning_rate": 4.305056377813075e-05, |
|
"loss": 0.8835, |
|
"num_input_tokens_seen": 3009984, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.4900113079532605, |
|
"grad_norm": 1.5583738088607788, |
|
"learning_rate": 4.294779807661105e-05, |
|
"loss": 0.7262, |
|
"num_input_tokens_seen": 3033520, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.4937806257067471, |
|
"grad_norm": 1.7024521827697754, |
|
"learning_rate": 4.2844402720144496e-05, |
|
"loss": 0.8231, |
|
"num_input_tokens_seen": 3057056, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.4975499434602337, |
|
"grad_norm": 3.108896493911743, |
|
"learning_rate": 4.274038133610628e-05, |
|
"loss": 0.8302, |
|
"num_input_tokens_seen": 3080656, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.5013192612137203, |
|
"grad_norm": 3.166442394256592, |
|
"learning_rate": 4.263573757383427e-05, |
|
"loss": 0.8095, |
|
"num_input_tokens_seen": 3103792, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.5050885789672069, |
|
"grad_norm": 2.218747854232788, |
|
"learning_rate": 4.2530475104500956e-05, |
|
"loss": 0.8756, |
|
"num_input_tokens_seen": 3126976, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.5088578967206936, |
|
"grad_norm": 2.5289299488067627, |
|
"learning_rate": 4.242459762098466e-05, |
|
"loss": 0.7733, |
|
"num_input_tokens_seen": 3150224, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.5126272144741801, |
|
"grad_norm": 1.674834132194519, |
|
"learning_rate": 4.231810883773999e-05, |
|
"loss": 0.7715, |
|
"num_input_tokens_seen": 3173296, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.5163965322276668, |
|
"grad_norm": 1.7702105045318604, |
|
"learning_rate": 4.2211012490667524e-05, |
|
"loss": 0.6996, |
|
"num_input_tokens_seen": 3196560, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.5201658499811534, |
|
"grad_norm": 2.7741544246673584, |
|
"learning_rate": 4.2103312336982734e-05, |
|
"loss": 0.8889, |
|
"num_input_tokens_seen": 3220432, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.52393516773464, |
|
"grad_norm": 2.749328136444092, |
|
"learning_rate": 4.19950121550842e-05, |
|
"loss": 0.8717, |
|
"num_input_tokens_seen": 3243712, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.5277044854881267, |
|
"grad_norm": 2.4472267627716064, |
|
"learning_rate": 4.188611574442101e-05, |
|
"loss": 0.6314, |
|
"num_input_tokens_seen": 3266976, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.5314738032416133, |
|
"grad_norm": 2.1869001388549805, |
|
"learning_rate": 4.177662692535952e-05, |
|
"loss": 0.7517, |
|
"num_input_tokens_seen": 3290656, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.5352431209950999, |
|
"grad_norm": 1.7907201051712036, |
|
"learning_rate": 4.166654953904926e-05, |
|
"loss": 0.8373, |
|
"num_input_tokens_seen": 3313712, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.5390124387485865, |
|
"grad_norm": 1.8779516220092773, |
|
"learning_rate": 4.155588744728826e-05, |
|
"loss": 0.7025, |
|
"num_input_tokens_seen": 3336880, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.5427817565020732, |
|
"grad_norm": 2.0460379123687744, |
|
"learning_rate": 4.144464453238748e-05, |
|
"loss": 0.6709, |
|
"num_input_tokens_seen": 3360416, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.5465510742555597, |
|
"grad_norm": 2.096501350402832, |
|
"learning_rate": 4.133282469703469e-05, |
|
"loss": 0.6451, |
|
"num_input_tokens_seen": 3383408, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.5503203920090464, |
|
"grad_norm": 2.441951036453247, |
|
"learning_rate": 4.122043186415746e-05, |
|
"loss": 0.754, |
|
"num_input_tokens_seen": 3406752, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.554089709762533, |
|
"grad_norm": 2.059370517730713, |
|
"learning_rate": 4.110746997678565e-05, |
|
"loss": 0.8784, |
|
"num_input_tokens_seen": 3429968, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.5578590275160196, |
|
"grad_norm": 1.969912052154541, |
|
"learning_rate": 4.0993942997912984e-05, |
|
"loss": 0.9384, |
|
"num_input_tokens_seen": 3453136, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.5616283452695062, |
|
"grad_norm": 2.3595261573791504, |
|
"learning_rate": 4.087985491035804e-05, |
|
"loss": 0.7915, |
|
"num_input_tokens_seen": 3476384, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.5653976630229929, |
|
"grad_norm": 2.187009572982788, |
|
"learning_rate": 4.076520971662455e-05, |
|
"loss": 0.8374, |
|
"num_input_tokens_seen": 3500176, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.5691669807764794, |
|
"grad_norm": 1.7618275880813599, |
|
"learning_rate": 4.065001143876097e-05, |
|
"loss": 0.7302, |
|
"num_input_tokens_seen": 3524048, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.5729362985299661, |
|
"grad_norm": 2.1269426345825195, |
|
"learning_rate": 4.053426411821934e-05, |
|
"loss": 0.754, |
|
"num_input_tokens_seen": 3547056, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.5767056162834527, |
|
"grad_norm": 2.3369758129119873, |
|
"learning_rate": 4.0417971815713584e-05, |
|
"loss": 0.8208, |
|
"num_input_tokens_seen": 3570608, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.5804749340369393, |
|
"grad_norm": 2.156280279159546, |
|
"learning_rate": 4.030113861107693e-05, |
|
"loss": 0.9635, |
|
"num_input_tokens_seen": 3594192, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.5842442517904259, |
|
"grad_norm": 1.7685117721557617, |
|
"learning_rate": 4.0183768603118886e-05, |
|
"loss": 0.7606, |
|
"num_input_tokens_seen": 3617600, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.5880135695439126, |
|
"grad_norm": 2.0940916538238525, |
|
"learning_rate": 4.0065865909481417e-05, |
|
"loss": 0.7283, |
|
"num_input_tokens_seen": 3640416, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.5917828872973991, |
|
"grad_norm": 2.5771267414093018, |
|
"learning_rate": 3.994743466649442e-05, |
|
"loss": 0.6878, |
|
"num_input_tokens_seen": 3663696, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.5955522050508858, |
|
"grad_norm": 2.336862087249756, |
|
"learning_rate": 3.982847902903071e-05, |
|
"loss": 0.8009, |
|
"num_input_tokens_seen": 3686880, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.5993215228043725, |
|
"grad_norm": 2.91589093208313, |
|
"learning_rate": 3.9709003170360176e-05, |
|
"loss": 0.794, |
|
"num_input_tokens_seen": 3710224, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.603090840557859, |
|
"grad_norm": 3.051378011703491, |
|
"learning_rate": 3.958901128200344e-05, |
|
"loss": 0.7532, |
|
"num_input_tokens_seen": 3733536, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.6068601583113457, |
|
"grad_norm": 3.457322835922241, |
|
"learning_rate": 3.946850757358475e-05, |
|
"loss": 0.8581, |
|
"num_input_tokens_seen": 3756688, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.6106294760648323, |
|
"grad_norm": 1.4198774099349976, |
|
"learning_rate": 3.9347496272684325e-05, |
|
"loss": 0.7889, |
|
"num_input_tokens_seen": 3779504, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.6143987938183189, |
|
"grad_norm": 2.5762572288513184, |
|
"learning_rate": 3.922598162469003e-05, |
|
"loss": 0.7401, |
|
"num_input_tokens_seen": 3802544, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.6181681115718055, |
|
"grad_norm": 2.5552995204925537, |
|
"learning_rate": 3.910396789264845e-05, |
|
"loss": 0.799, |
|
"num_input_tokens_seen": 3825680, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.6219374293252922, |
|
"grad_norm": 1.8057173490524292, |
|
"learning_rate": 3.8981459357115325e-05, |
|
"loss": 0.8015, |
|
"num_input_tokens_seen": 3849040, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.6257067470787787, |
|
"grad_norm": 1.8398315906524658, |
|
"learning_rate": 3.885846031600536e-05, |
|
"loss": 0.6905, |
|
"num_input_tokens_seen": 3872320, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.6294760648322654, |
|
"grad_norm": 2.7629125118255615, |
|
"learning_rate": 3.8734975084441466e-05, |
|
"loss": 0.845, |
|
"num_input_tokens_seen": 3895920, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.633245382585752, |
|
"grad_norm": 1.9969942569732666, |
|
"learning_rate": 3.8611007994603365e-05, |
|
"loss": 0.7317, |
|
"num_input_tokens_seen": 3919360, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.6370147003392386, |
|
"grad_norm": 3.591285467147827, |
|
"learning_rate": 3.8486563395575625e-05, |
|
"loss": 0.724, |
|
"num_input_tokens_seen": 3942736, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.6407840180927252, |
|
"grad_norm": 2.4714577198028564, |
|
"learning_rate": 3.8361645653195026e-05, |
|
"loss": 0.664, |
|
"num_input_tokens_seen": 3966144, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.6445533358462119, |
|
"grad_norm": 3.081908702850342, |
|
"learning_rate": 3.823625914989748e-05, |
|
"loss": 0.5922, |
|
"num_input_tokens_seen": 3989472, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.6483226535996984, |
|
"grad_norm": 3.5514636039733887, |
|
"learning_rate": 3.811040828456421e-05, |
|
"loss": 0.9465, |
|
"num_input_tokens_seen": 4012656, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.6520919713531851, |
|
"grad_norm": 3.4660677909851074, |
|
"learning_rate": 3.798409747236745e-05, |
|
"loss": 0.7781, |
|
"num_input_tokens_seen": 4035888, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.6558612891066717, |
|
"grad_norm": 3.08896541595459, |
|
"learning_rate": 3.7857331144615574e-05, |
|
"loss": 0.8309, |
|
"num_input_tokens_seen": 4059504, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.6596306068601583, |
|
"grad_norm": 2.4300408363342285, |
|
"learning_rate": 3.773011374859761e-05, |
|
"loss": 0.7812, |
|
"num_input_tokens_seen": 4083440, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.6633999246136449, |
|
"grad_norm": 3.5635619163513184, |
|
"learning_rate": 3.7602449747427204e-05, |
|
"loss": 0.7888, |
|
"num_input_tokens_seen": 4106928, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.6671692423671316, |
|
"grad_norm": 1.9514297246932983, |
|
"learning_rate": 3.747434361988608e-05, |
|
"loss": 0.7584, |
|
"num_input_tokens_seen": 4130336, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.6709385601206181, |
|
"grad_norm": 1.6948657035827637, |
|
"learning_rate": 3.734579986026688e-05, |
|
"loss": 0.6848, |
|
"num_input_tokens_seen": 4153872, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.6747078778741048, |
|
"grad_norm": 1.9811207056045532, |
|
"learning_rate": 3.7216822978215514e-05, |
|
"loss": 0.8068, |
|
"num_input_tokens_seen": 4177072, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.6784771956275915, |
|
"grad_norm": 3.7668049335479736, |
|
"learning_rate": 3.7087417498572944e-05, |
|
"loss": 0.8122, |
|
"num_input_tokens_seen": 4200256, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.682246513381078, |
|
"grad_norm": 2.3684747219085693, |
|
"learning_rate": 3.695758796121642e-05, |
|
"loss": 0.5764, |
|
"num_input_tokens_seen": 4223264, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.6860158311345647, |
|
"grad_norm": 2.597867250442505, |
|
"learning_rate": 3.6827338920900254e-05, |
|
"loss": 0.8898, |
|
"num_input_tokens_seen": 4247024, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.6897851488880513, |
|
"grad_norm": 2.0299720764160156, |
|
"learning_rate": 3.6696674947095984e-05, |
|
"loss": 0.7741, |
|
"num_input_tokens_seen": 4270800, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.6935544666415379, |
|
"grad_norm": 2.4363317489624023, |
|
"learning_rate": 3.656560062383208e-05, |
|
"loss": 0.8219, |
|
"num_input_tokens_seen": 4294352, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.6973237843950245, |
|
"grad_norm": 1.752125859260559, |
|
"learning_rate": 3.6434120549533135e-05, |
|
"loss": 0.6813, |
|
"num_input_tokens_seen": 4318208, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.7010931021485112, |
|
"grad_norm": 3.325110912322998, |
|
"learning_rate": 3.6302239336858545e-05, |
|
"loss": 0.6866, |
|
"num_input_tokens_seen": 4341728, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.7048624199019977, |
|
"grad_norm": 1.9037253856658936, |
|
"learning_rate": 3.6169961612540645e-05, |
|
"loss": 0.8628, |
|
"num_input_tokens_seen": 4365392, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 0.7086317376554844, |
|
"grad_norm": 1.9913270473480225, |
|
"learning_rate": 3.603729201722244e-05, |
|
"loss": 0.8978, |
|
"num_input_tokens_seen": 4389184, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.712401055408971, |
|
"grad_norm": 2.668295383453369, |
|
"learning_rate": 3.5904235205294776e-05, |
|
"loss": 0.7572, |
|
"num_input_tokens_seen": 4412272, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 0.7161703731624576, |
|
"grad_norm": 2.363468885421753, |
|
"learning_rate": 3.5770795844733035e-05, |
|
"loss": 0.7205, |
|
"num_input_tokens_seen": 4435072, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.7199396909159442, |
|
"grad_norm": 2.3162343502044678, |
|
"learning_rate": 3.5636978616933416e-05, |
|
"loss": 0.7521, |
|
"num_input_tokens_seen": 4458272, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 0.7237090086694309, |
|
"grad_norm": 1.9907945394515991, |
|
"learning_rate": 3.550278821654866e-05, |
|
"loss": 0.7205, |
|
"num_input_tokens_seen": 4481056, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.7274783264229174, |
|
"grad_norm": 2.0076990127563477, |
|
"learning_rate": 3.536822935132336e-05, |
|
"loss": 0.6597, |
|
"num_input_tokens_seen": 4504288, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 0.7312476441764041, |
|
"grad_norm": 1.6346359252929688, |
|
"learning_rate": 3.5233306741928806e-05, |
|
"loss": 0.7293, |
|
"num_input_tokens_seen": 4527552, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.7350169619298907, |
|
"grad_norm": 2.227130651473999, |
|
"learning_rate": 3.509802512179737e-05, |
|
"loss": 0.7055, |
|
"num_input_tokens_seen": 4551024, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.7387862796833773, |
|
"grad_norm": 3.003014087677002, |
|
"learning_rate": 3.496238923695646e-05, |
|
"loss": 0.763, |
|
"num_input_tokens_seen": 4574464, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.742555597436864, |
|
"grad_norm": 2.196281671524048, |
|
"learning_rate": 3.4826403845861986e-05, |
|
"loss": 0.6831, |
|
"num_input_tokens_seen": 4597600, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 0.7463249151903506, |
|
"grad_norm": 2.487607717514038, |
|
"learning_rate": 3.4690073719231425e-05, |
|
"loss": 0.6586, |
|
"num_input_tokens_seen": 4621424, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.7500942329438371, |
|
"grad_norm": 2.686861753463745, |
|
"learning_rate": 3.455340363987648e-05, |
|
"loss": 0.7481, |
|
"num_input_tokens_seen": 4644400, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 0.7538635506973238, |
|
"grad_norm": 2.511491060256958, |
|
"learning_rate": 3.4416398402535284e-05, |
|
"loss": 0.8084, |
|
"num_input_tokens_seen": 4667568, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7576328684508103, |
|
"grad_norm": 3.698335886001587, |
|
"learning_rate": 3.427906281370414e-05, |
|
"loss": 0.8876, |
|
"num_input_tokens_seen": 4690624, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 0.761402186204297, |
|
"grad_norm": 4.032379150390625, |
|
"learning_rate": 3.414140169146896e-05, |
|
"loss": 0.8391, |
|
"num_input_tokens_seen": 4714032, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.7651715039577837, |
|
"grad_norm": 1.8396016359329224, |
|
"learning_rate": 3.400341986533618e-05, |
|
"loss": 0.6847, |
|
"num_input_tokens_seen": 4737280, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 0.7689408217112702, |
|
"grad_norm": 2.474912166595459, |
|
"learning_rate": 3.386512217606339e-05, |
|
"loss": 0.6742, |
|
"num_input_tokens_seen": 4760848, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.7727101394647569, |
|
"grad_norm": 3.3939056396484375, |
|
"learning_rate": 3.3726513475489445e-05, |
|
"loss": 0.8607, |
|
"num_input_tokens_seen": 4783888, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.7764794572182435, |
|
"grad_norm": 2.451918363571167, |
|
"learning_rate": 3.3587598626364294e-05, |
|
"loss": 0.6614, |
|
"num_input_tokens_seen": 4807312, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.7802487749717301, |
|
"grad_norm": 2.324479341506958, |
|
"learning_rate": 3.344838250217833e-05, |
|
"loss": 0.7078, |
|
"num_input_tokens_seen": 4831088, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 0.7840180927252167, |
|
"grad_norm": 2.647658586502075, |
|
"learning_rate": 3.330886998699149e-05, |
|
"loss": 0.7944, |
|
"num_input_tokens_seen": 4854608, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.7877874104787034, |
|
"grad_norm": 2.21478533744812, |
|
"learning_rate": 3.316906597526186e-05, |
|
"loss": 0.7436, |
|
"num_input_tokens_seen": 4877648, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 0.7915567282321899, |
|
"grad_norm": 2.0611255168914795, |
|
"learning_rate": 3.302897537167397e-05, |
|
"loss": 0.8234, |
|
"num_input_tokens_seen": 4900672, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.7953260459856766, |
|
"grad_norm": 2.0772039890289307, |
|
"learning_rate": 3.288860309096671e-05, |
|
"loss": 0.6463, |
|
"num_input_tokens_seen": 4924192, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 0.7990953637391632, |
|
"grad_norm": 3.051643133163452, |
|
"learning_rate": 3.2747954057760965e-05, |
|
"loss": 0.6872, |
|
"num_input_tokens_seen": 4947600, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.8028646814926498, |
|
"grad_norm": 2.400989532470703, |
|
"learning_rate": 3.260703320638679e-05, |
|
"loss": 0.6143, |
|
"num_input_tokens_seen": 4971888, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 0.8066339992461364, |
|
"grad_norm": 2.88338303565979, |
|
"learning_rate": 3.246584548071034e-05, |
|
"loss": 0.8022, |
|
"num_input_tokens_seen": 4995056, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.8104033169996231, |
|
"grad_norm": 3.0658576488494873, |
|
"learning_rate": 3.232439583396036e-05, |
|
"loss": 0.7144, |
|
"num_input_tokens_seen": 5018208, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 0.8141726347531096, |
|
"grad_norm": 2.397193193435669, |
|
"learning_rate": 3.2182689228554517e-05, |
|
"loss": 0.757, |
|
"num_input_tokens_seen": 5041584, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.8179419525065963, |
|
"grad_norm": 2.7260894775390625, |
|
"learning_rate": 3.204073063592522e-05, |
|
"loss": 0.729, |
|
"num_input_tokens_seen": 5064352, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 0.821711270260083, |
|
"grad_norm": 2.864222764968872, |
|
"learning_rate": 3.189852503634523e-05, |
|
"loss": 0.7441, |
|
"num_input_tokens_seen": 5087712, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 0.8254805880135695, |
|
"grad_norm": 2.9511120319366455, |
|
"learning_rate": 3.1756077418752967e-05, |
|
"loss": 0.8861, |
|
"num_input_tokens_seen": 5111104, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 0.8292499057670562, |
|
"grad_norm": 2.240609645843506, |
|
"learning_rate": 3.1613392780577455e-05, |
|
"loss": 0.7098, |
|
"num_input_tokens_seen": 5134720, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.8330192235205428, |
|
"grad_norm": 2.762382984161377, |
|
"learning_rate": 3.147047612756302e-05, |
|
"loss": 0.7099, |
|
"num_input_tokens_seen": 5157744, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 0.8367885412740294, |
|
"grad_norm": 2.2460415363311768, |
|
"learning_rate": 3.132733247359366e-05, |
|
"loss": 0.7648, |
|
"num_input_tokens_seen": 5180976, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 0.840557859027516, |
|
"grad_norm": 2.3306996822357178, |
|
"learning_rate": 3.118396684051714e-05, |
|
"loss": 0.7906, |
|
"num_input_tokens_seen": 5204976, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 0.8443271767810027, |
|
"grad_norm": 2.5235891342163086, |
|
"learning_rate": 3.104038425796884e-05, |
|
"loss": 0.6022, |
|
"num_input_tokens_seen": 5228048, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 0.8480964945344892, |
|
"grad_norm": 1.9855157136917114, |
|
"learning_rate": 3.089658976319528e-05, |
|
"loss": 0.8142, |
|
"num_input_tokens_seen": 5251664, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 0.8518658122879759, |
|
"grad_norm": 2.2459208965301514, |
|
"learning_rate": 3.0752588400877405e-05, |
|
"loss": 0.8263, |
|
"num_input_tokens_seen": 5274976, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 0.8556351300414625, |
|
"grad_norm": 2.2121737003326416, |
|
"learning_rate": 3.060838522295361e-05, |
|
"loss": 0.8581, |
|
"num_input_tokens_seen": 5298352, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 0.8594044477949491, |
|
"grad_norm": 2.092641592025757, |
|
"learning_rate": 3.0463985288442475e-05, |
|
"loss": 0.7224, |
|
"num_input_tokens_seen": 5321440, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 0.8631737655484357, |
|
"grad_norm": 2.473989963531494, |
|
"learning_rate": 3.031939366326535e-05, |
|
"loss": 0.7379, |
|
"num_input_tokens_seen": 5344992, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 0.8669430833019224, |
|
"grad_norm": 2.082732677459717, |
|
"learning_rate": 3.0174615420068563e-05, |
|
"loss": 0.6864, |
|
"num_input_tokens_seen": 5368448, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.8707124010554089, |
|
"grad_norm": 2.066802501678467, |
|
"learning_rate": 3.0029655638045496e-05, |
|
"loss": 0.6144, |
|
"num_input_tokens_seen": 5391568, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 0.8744817188088956, |
|
"grad_norm": 1.7655906677246094, |
|
"learning_rate": 2.9884519402758342e-05, |
|
"loss": 0.6818, |
|
"num_input_tokens_seen": 5414992, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 0.8782510365623822, |
|
"grad_norm": 2.6639342308044434, |
|
"learning_rate": 2.9739211805959783e-05, |
|
"loss": 0.8296, |
|
"num_input_tokens_seen": 5438368, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 0.8820203543158688, |
|
"grad_norm": 2.0490756034851074, |
|
"learning_rate": 2.9593737945414264e-05, |
|
"loss": 0.7764, |
|
"num_input_tokens_seen": 5461504, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 0.8857896720693554, |
|
"grad_norm": 2.066798448562622, |
|
"learning_rate": 2.9448102924719207e-05, |
|
"loss": 0.7245, |
|
"num_input_tokens_seen": 5484992, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 0.8895589898228421, |
|
"grad_norm": 2.2488503456115723, |
|
"learning_rate": 2.9302311853125942e-05, |
|
"loss": 0.8319, |
|
"num_input_tokens_seen": 5508480, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 0.8933283075763286, |
|
"grad_norm": 3.150104284286499, |
|
"learning_rate": 2.9156369845360467e-05, |
|
"loss": 0.6652, |
|
"num_input_tokens_seen": 5531520, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 0.8970976253298153, |
|
"grad_norm": 2.160212755203247, |
|
"learning_rate": 2.9010282021444008e-05, |
|
"loss": 0.5858, |
|
"num_input_tokens_seen": 5554480, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 0.900866943083302, |
|
"grad_norm": 2.795851707458496, |
|
"learning_rate": 2.8864053506513405e-05, |
|
"loss": 0.6248, |
|
"num_input_tokens_seen": 5577888, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 0.9046362608367885, |
|
"grad_norm": 2.5486669540405273, |
|
"learning_rate": 2.8717689430641292e-05, |
|
"loss": 0.8447, |
|
"num_input_tokens_seen": 5601424, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.9084055785902752, |
|
"grad_norm": 2.2841227054595947, |
|
"learning_rate": 2.857119492865613e-05, |
|
"loss": 0.6355, |
|
"num_input_tokens_seen": 5624880, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 0.9121748963437618, |
|
"grad_norm": 2.077192544937134, |
|
"learning_rate": 2.842457513996207e-05, |
|
"loss": 0.5075, |
|
"num_input_tokens_seen": 5648160, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 0.9159442140972484, |
|
"grad_norm": 2.1494250297546387, |
|
"learning_rate": 2.8277835208358637e-05, |
|
"loss": 0.5762, |
|
"num_input_tokens_seen": 5671280, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 0.919713531850735, |
|
"grad_norm": 2.5202853679656982, |
|
"learning_rate": 2.813098028186028e-05, |
|
"loss": 0.666, |
|
"num_input_tokens_seen": 5694352, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 0.9234828496042217, |
|
"grad_norm": 3.3707363605499268, |
|
"learning_rate": 2.798401551251576e-05, |
|
"loss": 0.6056, |
|
"num_input_tokens_seen": 5717536, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 0.9272521673577082, |
|
"grad_norm": 3.4344711303710938, |
|
"learning_rate": 2.7836946056227426e-05, |
|
"loss": 0.8095, |
|
"num_input_tokens_seen": 5740416, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 0.9310214851111949, |
|
"grad_norm": 2.2710204124450684, |
|
"learning_rate": 2.7689777072570287e-05, |
|
"loss": 0.7501, |
|
"num_input_tokens_seen": 5763712, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 0.9347908028646815, |
|
"grad_norm": 2.100459337234497, |
|
"learning_rate": 2.7542513724611057e-05, |
|
"loss": 0.5595, |
|
"num_input_tokens_seen": 5787168, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 0.9385601206181681, |
|
"grad_norm": 3.0176243782043457, |
|
"learning_rate": 2.739516117872697e-05, |
|
"loss": 0.694, |
|
"num_input_tokens_seen": 5810704, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 0.9423294383716547, |
|
"grad_norm": 2.814894676208496, |
|
"learning_rate": 2.7247724604424557e-05, |
|
"loss": 0.8521, |
|
"num_input_tokens_seen": 5834192, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.9460987561251414, |
|
"grad_norm": 1.729038953781128, |
|
"learning_rate": 2.71002091741583e-05, |
|
"loss": 0.7794, |
|
"num_input_tokens_seen": 5857344, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 0.9498680738786279, |
|
"grad_norm": 2.401945114135742, |
|
"learning_rate": 2.695262006314912e-05, |
|
"loss": 0.7721, |
|
"num_input_tokens_seen": 5880448, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 0.9536373916321146, |
|
"grad_norm": 2.005068302154541, |
|
"learning_rate": 2.680496244920287e-05, |
|
"loss": 0.6403, |
|
"num_input_tokens_seen": 5903200, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 0.9574067093856012, |
|
"grad_norm": 1.8433932065963745, |
|
"learning_rate": 2.665724151252868e-05, |
|
"loss": 0.738, |
|
"num_input_tokens_seen": 5926272, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 0.9611760271390878, |
|
"grad_norm": 2.32893705368042, |
|
"learning_rate": 2.6509462435557152e-05, |
|
"loss": 0.6469, |
|
"num_input_tokens_seen": 5949680, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 0.9649453448925744, |
|
"grad_norm": 2.5352189540863037, |
|
"learning_rate": 2.6361630402758648e-05, |
|
"loss": 0.6953, |
|
"num_input_tokens_seen": 5973088, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 0.9687146626460611, |
|
"grad_norm": 2.217909574508667, |
|
"learning_rate": 2.6213750600461334e-05, |
|
"loss": 0.6365, |
|
"num_input_tokens_seen": 5996688, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 0.9724839803995476, |
|
"grad_norm": 1.218327283859253, |
|
"learning_rate": 2.6065828216669253e-05, |
|
"loss": 0.6691, |
|
"num_input_tokens_seen": 6019744, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 0.9762532981530343, |
|
"grad_norm": 1.710153579711914, |
|
"learning_rate": 2.5917868440880317e-05, |
|
"loss": 0.64, |
|
"num_input_tokens_seen": 6042640, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 0.980022615906521, |
|
"grad_norm": 2.1945626735687256, |
|
"learning_rate": 2.5769876463904265e-05, |
|
"loss": 0.6674, |
|
"num_input_tokens_seen": 6066112, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.9837919336600075, |
|
"grad_norm": 2.905019760131836, |
|
"learning_rate": 2.5621857477680506e-05, |
|
"loss": 0.6288, |
|
"num_input_tokens_seen": 6089296, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 0.9875612514134942, |
|
"grad_norm": 2.2408697605133057, |
|
"learning_rate": 2.5473816675096017e-05, |
|
"loss": 0.8477, |
|
"num_input_tokens_seen": 6112784, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 0.9913305691669808, |
|
"grad_norm": 2.5332565307617188, |
|
"learning_rate": 2.5325759249803154e-05, |
|
"loss": 0.7663, |
|
"num_input_tokens_seen": 6136048, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 0.9950998869204674, |
|
"grad_norm": 2.0274627208709717, |
|
"learning_rate": 2.517769039603744e-05, |
|
"loss": 0.861, |
|
"num_input_tokens_seen": 6159920, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 0.998869204673954, |
|
"grad_norm": 2.1323111057281494, |
|
"learning_rate": 2.5029615308435338e-05, |
|
"loss": 0.8545, |
|
"num_input_tokens_seen": 6183024, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 1.0026385224274406, |
|
"grad_norm": 1.8671432733535767, |
|
"learning_rate": 2.4881539181851986e-05, |
|
"loss": 0.6469, |
|
"num_input_tokens_seen": 6206224, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 1.0064078401809273, |
|
"grad_norm": 1.7728124856948853, |
|
"learning_rate": 2.4733467211179008e-05, |
|
"loss": 0.7507, |
|
"num_input_tokens_seen": 6229936, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 1.0101771579344139, |
|
"grad_norm": 3.0307295322418213, |
|
"learning_rate": 2.4585404591162218e-05, |
|
"loss": 0.7181, |
|
"num_input_tokens_seen": 6253152, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 1.0139464756879004, |
|
"grad_norm": 1.6619493961334229, |
|
"learning_rate": 2.4437356516219358e-05, |
|
"loss": 0.627, |
|
"num_input_tokens_seen": 6276208, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 1.0177157934413872, |
|
"grad_norm": 2.804950714111328, |
|
"learning_rate": 2.4289328180257926e-05, |
|
"loss": 0.7266, |
|
"num_input_tokens_seen": 6299408, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 1.0214851111948737, |
|
"grad_norm": 1.9919047355651855, |
|
"learning_rate": 2.4141324776492915e-05, |
|
"loss": 0.5684, |
|
"num_input_tokens_seen": 6323024, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 1.0252544289483603, |
|
"grad_norm": 5.080316543579102, |
|
"learning_rate": 2.399335149726463e-05, |
|
"loss": 0.6196, |
|
"num_input_tokens_seen": 6345952, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 1.029023746701847, |
|
"grad_norm": 2.5290539264678955, |
|
"learning_rate": 2.3845413533856517e-05, |
|
"loss": 0.5296, |
|
"num_input_tokens_seen": 6369856, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 1.0327930644553336, |
|
"grad_norm": 2.438260555267334, |
|
"learning_rate": 2.3697516076313066e-05, |
|
"loss": 0.7537, |
|
"num_input_tokens_seen": 6393136, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 1.0365623822088201, |
|
"grad_norm": 3.0706968307495117, |
|
"learning_rate": 2.354966431325773e-05, |
|
"loss": 0.5909, |
|
"num_input_tokens_seen": 6416736, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 1.040331699962307, |
|
"grad_norm": 2.409477472305298, |
|
"learning_rate": 2.3401863431710863e-05, |
|
"loss": 0.7042, |
|
"num_input_tokens_seen": 6440048, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 1.0441010177157934, |
|
"grad_norm": 2.7514872550964355, |
|
"learning_rate": 2.325411861690776e-05, |
|
"loss": 0.7301, |
|
"num_input_tokens_seen": 6463504, |
|
"step": 1385 |
|
}, |
|
{ |
|
"epoch": 1.04787033546928, |
|
"grad_norm": 3.314840793609619, |
|
"learning_rate": 2.3106435052116764e-05, |
|
"loss": 0.7472, |
|
"num_input_tokens_seen": 6486608, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 1.0516396532227668, |
|
"grad_norm": 3.0806994438171387, |
|
"learning_rate": 2.2958817918457412e-05, |
|
"loss": 0.5799, |
|
"num_input_tokens_seen": 6509760, |
|
"step": 1395 |
|
}, |
|
{ |
|
"epoch": 1.0554089709762533, |
|
"grad_norm": 3.1639201641082764, |
|
"learning_rate": 2.2811272394718647e-05, |
|
"loss": 0.6512, |
|
"num_input_tokens_seen": 6532992, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.0591782887297398, |
|
"grad_norm": 2.3345203399658203, |
|
"learning_rate": 2.2663803657177173e-05, |
|
"loss": 0.748, |
|
"num_input_tokens_seen": 6556384, |
|
"step": 1405 |
|
}, |
|
{ |
|
"epoch": 1.0629476064832266, |
|
"grad_norm": 6.463221549987793, |
|
"learning_rate": 2.2516416879415824e-05, |
|
"loss": 0.6635, |
|
"num_input_tokens_seen": 6580336, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 1.0667169242367132, |
|
"grad_norm": 2.5758097171783447, |
|
"learning_rate": 2.2369117232142077e-05, |
|
"loss": 0.5894, |
|
"num_input_tokens_seen": 6603584, |
|
"step": 1415 |
|
}, |
|
{ |
|
"epoch": 1.0704862419901997, |
|
"grad_norm": 2.079308271408081, |
|
"learning_rate": 2.2221909883006646e-05, |
|
"loss": 0.5952, |
|
"num_input_tokens_seen": 6626864, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 1.0742555597436865, |
|
"grad_norm": 2.637446403503418, |
|
"learning_rate": 2.20747999964222e-05, |
|
"loss": 0.7849, |
|
"num_input_tokens_seen": 6650480, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 1.078024877497173, |
|
"grad_norm": 2.275803565979004, |
|
"learning_rate": 2.192779273338215e-05, |
|
"loss": 0.7059, |
|
"num_input_tokens_seen": 6673808, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 1.0817941952506596, |
|
"grad_norm": 3.0671417713165283, |
|
"learning_rate": 2.1780893251279626e-05, |
|
"loss": 0.7389, |
|
"num_input_tokens_seen": 6697232, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 1.0855635130041463, |
|
"grad_norm": 2.4702844619750977, |
|
"learning_rate": 2.163410670372652e-05, |
|
"loss": 0.5858, |
|
"num_input_tokens_seen": 6721104, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 1.0893328307576329, |
|
"grad_norm": 2.3466484546661377, |
|
"learning_rate": 2.148743824037269e-05, |
|
"loss": 0.6743, |
|
"num_input_tokens_seen": 6744624, |
|
"step": 1445 |
|
}, |
|
{ |
|
"epoch": 1.0931021485111194, |
|
"grad_norm": 1.9515085220336914, |
|
"learning_rate": 2.1340893006725288e-05, |
|
"loss": 0.5893, |
|
"num_input_tokens_seen": 6768000, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 1.0968714662646062, |
|
"grad_norm": 2.264751434326172, |
|
"learning_rate": 2.1194476143968258e-05, |
|
"loss": 0.653, |
|
"num_input_tokens_seen": 6791296, |
|
"step": 1455 |
|
}, |
|
{ |
|
"epoch": 1.1006407840180927, |
|
"grad_norm": 3.2526028156280518, |
|
"learning_rate": 2.1048192788781977e-05, |
|
"loss": 0.6829, |
|
"num_input_tokens_seen": 6814800, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 1.1044101017715793, |
|
"grad_norm": 3.7399232387542725, |
|
"learning_rate": 2.090204807316301e-05, |
|
"loss": 0.5642, |
|
"num_input_tokens_seen": 6838128, |
|
"step": 1465 |
|
}, |
|
{ |
|
"epoch": 1.108179419525066, |
|
"grad_norm": 4.240833282470703, |
|
"learning_rate": 2.0756047124244095e-05, |
|
"loss": 0.6401, |
|
"num_input_tokens_seen": 6861312, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 1.1119487372785526, |
|
"grad_norm": 3.439541816711426, |
|
"learning_rate": 2.0610195064114273e-05, |
|
"loss": 0.6188, |
|
"num_input_tokens_seen": 6884128, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 1.1157180550320391, |
|
"grad_norm": 3.2410943508148193, |
|
"learning_rate": 2.0464497009639176e-05, |
|
"loss": 0.5929, |
|
"num_input_tokens_seen": 6908336, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 1.119487372785526, |
|
"grad_norm": 2.0210845470428467, |
|
"learning_rate": 2.0318958072281517e-05, |
|
"loss": 0.7596, |
|
"num_input_tokens_seen": 6931392, |
|
"step": 1485 |
|
}, |
|
{ |
|
"epoch": 1.1232566905390124, |
|
"grad_norm": 3.183546781539917, |
|
"learning_rate": 2.017358335792178e-05, |
|
"loss": 0.6313, |
|
"num_input_tokens_seen": 6954800, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 1.127026008292499, |
|
"grad_norm": 3.0664725303649902, |
|
"learning_rate": 2.0028377966679092e-05, |
|
"loss": 0.6371, |
|
"num_input_tokens_seen": 6978384, |
|
"step": 1495 |
|
}, |
|
{ |
|
"epoch": 1.1307953260459858, |
|
"grad_norm": 2.9559929370880127, |
|
"learning_rate": 1.9883346992732256e-05, |
|
"loss": 0.6982, |
|
"num_input_tokens_seen": 7001632, |
|
"step": 1500 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 2652, |
|
"num_input_tokens_seen": 7001632, |
|
"num_train_epochs": 2, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.33828956912681e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|