|
{ |
|
"best_metric": 0.0266929492354393, |
|
"best_model_checkpoint": "deberta-v3-base-zyda-2-transformed-readability/checkpoint-40767", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 40767, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03679446611229671, |
|
"grad_norm": 2.2327511310577393, |
|
"learning_rate": 4.9386758898128385e-05, |
|
"loss": 0.0956, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.07358893222459342, |
|
"grad_norm": 0.9045169353485107, |
|
"learning_rate": 4.877351779625678e-05, |
|
"loss": 0.0524, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.11038339833689013, |
|
"grad_norm": 0.6377315521240234, |
|
"learning_rate": 4.8160276694385164e-05, |
|
"loss": 0.0439, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.14717786444918685, |
|
"grad_norm": 1.2090165615081787, |
|
"learning_rate": 4.754703559251355e-05, |
|
"loss": 0.0395, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.18397233056148354, |
|
"grad_norm": 1.0190351009368896, |
|
"learning_rate": 4.693379449064194e-05, |
|
"loss": 0.0387, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.22076679667378027, |
|
"grad_norm": 0.6785000562667847, |
|
"learning_rate": 4.632055338877033e-05, |
|
"loss": 0.0364, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.257561262786077, |
|
"grad_norm": 0.37076541781425476, |
|
"learning_rate": 4.570731228689872e-05, |
|
"loss": 0.0427, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.2943557288983737, |
|
"grad_norm": 0.2937301695346832, |
|
"learning_rate": 4.509407118502711e-05, |
|
"loss": 0.0359, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.3311501950106704, |
|
"grad_norm": 0.3934372663497925, |
|
"learning_rate": 4.448083008315549e-05, |
|
"loss": 0.0354, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.3679446611229671, |
|
"grad_norm": 0.8720031380653381, |
|
"learning_rate": 4.386758898128389e-05, |
|
"loss": 0.0343, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.40473912723526384, |
|
"grad_norm": 0.2753826677799225, |
|
"learning_rate": 4.325434787941227e-05, |
|
"loss": 0.0333, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.44153359334756054, |
|
"grad_norm": 0.9069143533706665, |
|
"learning_rate": 4.264110677754066e-05, |
|
"loss": 0.0319, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.47832805945985724, |
|
"grad_norm": 1.0423845052719116, |
|
"learning_rate": 4.202786567566905e-05, |
|
"loss": 0.0319, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.515122525572154, |
|
"grad_norm": 0.5137051939964294, |
|
"learning_rate": 4.141462457379743e-05, |
|
"loss": 0.0321, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.5519169916844506, |
|
"grad_norm": 0.34184473752975464, |
|
"learning_rate": 4.080138347192582e-05, |
|
"loss": 0.0316, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.5887114577967474, |
|
"grad_norm": 0.5334771275520325, |
|
"learning_rate": 4.018814237005421e-05, |
|
"loss": 0.0318, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.625505923909044, |
|
"grad_norm": 0.27346959710121155, |
|
"learning_rate": 3.95749012681826e-05, |
|
"loss": 0.0314, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.6623003900213408, |
|
"grad_norm": 0.46926313638687134, |
|
"learning_rate": 3.896166016631099e-05, |
|
"loss": 0.0308, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.6990948561336375, |
|
"grad_norm": 0.661072850227356, |
|
"learning_rate": 3.834841906443938e-05, |
|
"loss": 0.0309, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.7358893222459342, |
|
"grad_norm": 0.3445192575454712, |
|
"learning_rate": 3.773517796256776e-05, |
|
"loss": 0.03, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.7726837883582309, |
|
"grad_norm": 0.5244751572608948, |
|
"learning_rate": 3.712193686069616e-05, |
|
"loss": 0.0299, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.8094782544705277, |
|
"grad_norm": 0.401460200548172, |
|
"learning_rate": 3.650869575882454e-05, |
|
"loss": 0.0294, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.8462727205828243, |
|
"grad_norm": 0.23478317260742188, |
|
"learning_rate": 3.589545465695293e-05, |
|
"loss": 0.029, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.8830671866951211, |
|
"grad_norm": 0.4309717118740082, |
|
"learning_rate": 3.528221355508132e-05, |
|
"loss": 0.029, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.9198616528074177, |
|
"grad_norm": 0.3477807641029358, |
|
"learning_rate": 3.466897245320971e-05, |
|
"loss": 0.0295, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.9566561189197145, |
|
"grad_norm": 0.21652667224407196, |
|
"learning_rate": 3.405573135133809e-05, |
|
"loss": 0.0293, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.9934505850320112, |
|
"grad_norm": 0.46980977058410645, |
|
"learning_rate": 3.344249024946648e-05, |
|
"loss": 0.0288, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.028563737869262695, |
|
"eval_mse": 0.028563737035006864, |
|
"eval_runtime": 98.1597, |
|
"eval_samples_per_second": 509.374, |
|
"eval_steps_per_second": 63.672, |
|
"step": 13589 |
|
}, |
|
{ |
|
"epoch": 1.030245051144308, |
|
"grad_norm": 0.32912561297416687, |
|
"learning_rate": 3.282924914759487e-05, |
|
"loss": 0.0258, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.0670395172566045, |
|
"grad_norm": 0.2001865804195404, |
|
"learning_rate": 3.221600804572326e-05, |
|
"loss": 0.0251, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.1038339833689013, |
|
"grad_norm": 0.4719059467315674, |
|
"learning_rate": 3.160276694385165e-05, |
|
"loss": 0.0243, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.140628449481198, |
|
"grad_norm": 0.401038259267807, |
|
"learning_rate": 3.098952584198003e-05, |
|
"loss": 0.0246, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.1774229155934948, |
|
"grad_norm": 0.24117255210876465, |
|
"learning_rate": 3.0376284740108423e-05, |
|
"loss": 0.0248, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.2142173817057915, |
|
"grad_norm": 0.24041427671909332, |
|
"learning_rate": 2.976304363823681e-05, |
|
"loss": 0.025, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.2510118478180883, |
|
"grad_norm": 0.3739044666290283, |
|
"learning_rate": 2.91498025363652e-05, |
|
"loss": 0.0257, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.2878063139303848, |
|
"grad_norm": 0.4344153106212616, |
|
"learning_rate": 2.8536561434493587e-05, |
|
"loss": 0.0244, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.3246007800426816, |
|
"grad_norm": 0.6248531341552734, |
|
"learning_rate": 2.7923320332621977e-05, |
|
"loss": 0.0249, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.3613952461549783, |
|
"grad_norm": 0.34284424781799316, |
|
"learning_rate": 2.7310079230750363e-05, |
|
"loss": 0.0247, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 1.398189712267275, |
|
"grad_norm": 0.33926498889923096, |
|
"learning_rate": 2.6696838128878755e-05, |
|
"loss": 0.0248, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.4349841783795716, |
|
"grad_norm": 0.2008136361837387, |
|
"learning_rate": 2.6083597027007138e-05, |
|
"loss": 0.0248, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.4717786444918683, |
|
"grad_norm": 0.5362450480461121, |
|
"learning_rate": 2.5470355925135524e-05, |
|
"loss": 0.0246, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.508573110604165, |
|
"grad_norm": 0.4919290840625763, |
|
"learning_rate": 2.4857114823263916e-05, |
|
"loss": 0.0238, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 1.5453675767164619, |
|
"grad_norm": 0.3778747618198395, |
|
"learning_rate": 2.4243873721392306e-05, |
|
"loss": 0.0248, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 1.5821620428287586, |
|
"grad_norm": 0.2485371083021164, |
|
"learning_rate": 2.3630632619520692e-05, |
|
"loss": 0.0237, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 1.6189565089410554, |
|
"grad_norm": 0.2995116412639618, |
|
"learning_rate": 2.301739151764908e-05, |
|
"loss": 0.0232, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 1.6557509750533521, |
|
"grad_norm": 0.3775917887687683, |
|
"learning_rate": 2.2404150415777467e-05, |
|
"loss": 0.0239, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 1.6925454411656486, |
|
"grad_norm": 0.25416481494903564, |
|
"learning_rate": 2.1790909313905856e-05, |
|
"loss": 0.0242, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.7293399072779454, |
|
"grad_norm": 0.5196259617805481, |
|
"learning_rate": 2.1177668212034242e-05, |
|
"loss": 0.0238, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 1.7661343733902422, |
|
"grad_norm": 3.1327126026153564, |
|
"learning_rate": 2.056442711016263e-05, |
|
"loss": 0.0308, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.8029288395025387, |
|
"grad_norm": 1.1925427913665771, |
|
"learning_rate": 1.995118600829102e-05, |
|
"loss": 0.0268, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 1.8397233056148354, |
|
"grad_norm": 0.5257470011711121, |
|
"learning_rate": 1.933794490641941e-05, |
|
"loss": 0.0249, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.8765177717271322, |
|
"grad_norm": 0.4024732708930969, |
|
"learning_rate": 1.8724703804547796e-05, |
|
"loss": 0.0237, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 1.913312237839429, |
|
"grad_norm": 0.5063018798828125, |
|
"learning_rate": 1.8111462702676185e-05, |
|
"loss": 0.0231, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.9501067039517257, |
|
"grad_norm": 0.264139860868454, |
|
"learning_rate": 1.7498221600804575e-05, |
|
"loss": 0.0261, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 1.9869011700640224, |
|
"grad_norm": 0.19682620465755463, |
|
"learning_rate": 1.688498049893296e-05, |
|
"loss": 0.023, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.027232788503170013, |
|
"eval_mse": 0.02723278669797115, |
|
"eval_runtime": 77.4011, |
|
"eval_samples_per_second": 645.986, |
|
"eval_steps_per_second": 80.748, |
|
"step": 27178 |
|
}, |
|
{ |
|
"epoch": 2.023695636176319, |
|
"grad_norm": 0.3318944275379181, |
|
"learning_rate": 1.627173939706135e-05, |
|
"loss": 0.0208, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 2.060490102288616, |
|
"grad_norm": 0.20372678339481354, |
|
"learning_rate": 1.565849829518974e-05, |
|
"loss": 0.0199, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 2.0972845684009127, |
|
"grad_norm": 0.4932423233985901, |
|
"learning_rate": 1.5045257193318127e-05, |
|
"loss": 0.0201, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 2.134079034513209, |
|
"grad_norm": 0.24097684025764465, |
|
"learning_rate": 1.4432016091446513e-05, |
|
"loss": 0.0199, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 2.1708735006255058, |
|
"grad_norm": 0.46340519189834595, |
|
"learning_rate": 1.38187749895749e-05, |
|
"loss": 0.0194, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 2.2076679667378025, |
|
"grad_norm": 0.17476551234722137, |
|
"learning_rate": 1.320553388770329e-05, |
|
"loss": 0.0199, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 2.2444624328500993, |
|
"grad_norm": 0.7477974891662598, |
|
"learning_rate": 1.2592292785831677e-05, |
|
"loss": 0.0202, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 2.281256898962396, |
|
"grad_norm": 0.21329531073570251, |
|
"learning_rate": 1.1979051683960066e-05, |
|
"loss": 0.0196, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 2.318051365074693, |
|
"grad_norm": 0.39124202728271484, |
|
"learning_rate": 1.1365810582088454e-05, |
|
"loss": 0.0198, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 2.3548458311869895, |
|
"grad_norm": 0.30534929037094116, |
|
"learning_rate": 1.0752569480216842e-05, |
|
"loss": 0.0196, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 2.3916402972992863, |
|
"grad_norm": 0.2803601324558258, |
|
"learning_rate": 1.0139328378345231e-05, |
|
"loss": 0.0197, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 2.428434763411583, |
|
"grad_norm": 0.30882009863853455, |
|
"learning_rate": 9.526087276473619e-06, |
|
"loss": 0.0194, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 2.46522922952388, |
|
"grad_norm": 0.310523122549057, |
|
"learning_rate": 8.912846174602008e-06, |
|
"loss": 0.0193, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 2.5020236956361765, |
|
"grad_norm": 0.3990231156349182, |
|
"learning_rate": 8.299605072730394e-06, |
|
"loss": 0.0198, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 2.5388181617484733, |
|
"grad_norm": 0.41601407527923584, |
|
"learning_rate": 7.686363970858783e-06, |
|
"loss": 0.0192, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 2.5756126278607696, |
|
"grad_norm": 0.2621209919452667, |
|
"learning_rate": 7.073122868987171e-06, |
|
"loss": 0.0198, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 2.6124070939730664, |
|
"grad_norm": 0.231684148311615, |
|
"learning_rate": 6.459881767115559e-06, |
|
"loss": 0.0192, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 2.649201560085363, |
|
"grad_norm": 0.23939248919487, |
|
"learning_rate": 5.846640665243948e-06, |
|
"loss": 0.0191, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 2.68599602619766, |
|
"grad_norm": 0.22479325532913208, |
|
"learning_rate": 5.233399563372335e-06, |
|
"loss": 0.0192, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 2.7227904923099566, |
|
"grad_norm": 0.27915650606155396, |
|
"learning_rate": 4.620158461500724e-06, |
|
"loss": 0.0193, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 2.7595849584222534, |
|
"grad_norm": 0.19762490689754486, |
|
"learning_rate": 4.006917359629112e-06, |
|
"loss": 0.0191, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 2.79637942453455, |
|
"grad_norm": 0.42420724034309387, |
|
"learning_rate": 3.3936762577575e-06, |
|
"loss": 0.0193, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 2.8331738906468464, |
|
"grad_norm": 0.34259703755378723, |
|
"learning_rate": 2.7804351558858883e-06, |
|
"loss": 0.0188, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 2.869968356759143, |
|
"grad_norm": 0.2734413743019104, |
|
"learning_rate": 2.1671940540142763e-06, |
|
"loss": 0.019, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 2.90676282287144, |
|
"grad_norm": 0.16011129319667816, |
|
"learning_rate": 1.5539529521426646e-06, |
|
"loss": 0.0186, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 2.9435572889837367, |
|
"grad_norm": 0.4719178080558777, |
|
"learning_rate": 9.407118502710525e-07, |
|
"loss": 0.0188, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 2.9803517550960335, |
|
"grad_norm": 0.22022365033626556, |
|
"learning_rate": 3.2747074839944075e-07, |
|
"loss": 0.0189, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.0266929492354393, |
|
"eval_mse": 0.026692949063357767, |
|
"eval_runtime": 78.6017, |
|
"eval_samples_per_second": 636.118, |
|
"eval_steps_per_second": 79.515, |
|
"step": 40767 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 40767, |
|
"total_flos": 1.7161499914378214e+17, |
|
"train_loss": 0.026733717791019525, |
|
"train_runtime": 12981.5942, |
|
"train_samples_per_second": 200.976, |
|
"train_steps_per_second": 3.14 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 40767, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.7161499914378214e+17, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|