|
{ |
|
"best_metric": 10.372788429260254, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-75", |
|
"epoch": 2.3928215353938187, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.031904287138584245, |
|
"grad_norm": 0.06419440358877182, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 10.3808, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.031904287138584245, |
|
"eval_loss": 10.379875183105469, |
|
"eval_runtime": 0.1744, |
|
"eval_samples_per_second": 286.696, |
|
"eval_steps_per_second": 74.541, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06380857427716849, |
|
"grad_norm": 0.03532293438911438, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 10.3804, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.09571286141575275, |
|
"grad_norm": 0.027720365673303604, |
|
"learning_rate": 0.0001, |
|
"loss": 10.3797, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.12761714855433698, |
|
"grad_norm": 0.02375604398548603, |
|
"learning_rate": 9.99571699711836e-05, |
|
"loss": 10.3803, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.15952143569292124, |
|
"grad_norm": 0.02083384059369564, |
|
"learning_rate": 9.982876141412856e-05, |
|
"loss": 10.3797, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.1914257228315055, |
|
"grad_norm": 0.021519361063838005, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 10.3804, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.22333000997008973, |
|
"grad_norm": 0.021820418536663055, |
|
"learning_rate": 9.931634888554937e-05, |
|
"loss": 10.3802, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.25523429710867396, |
|
"grad_norm": 0.09350977092981339, |
|
"learning_rate": 9.893332032039701e-05, |
|
"loss": 10.3806, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.28713858424725824, |
|
"grad_norm": 0.039236634969711304, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 10.3797, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.3190428713858425, |
|
"grad_norm": 0.035836756229400635, |
|
"learning_rate": 9.791726278367022e-05, |
|
"loss": 10.3808, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3509471585244267, |
|
"grad_norm": 0.031301405280828476, |
|
"learning_rate": 9.728616793536588e-05, |
|
"loss": 10.3788, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.382851445663011, |
|
"grad_norm": 0.02643793448805809, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 10.3789, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.4147557328015952, |
|
"grad_norm": 0.02430611290037632, |
|
"learning_rate": 9.578385041664925e-05, |
|
"loss": 10.3781, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.44666001994017945, |
|
"grad_norm": 0.02308354340493679, |
|
"learning_rate": 9.491548749301997e-05, |
|
"loss": 10.3784, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.4785643070787637, |
|
"grad_norm": 0.028244340792298317, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 10.379, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.5104685942173479, |
|
"grad_norm": 0.07378281652927399, |
|
"learning_rate": 9.295261506157986e-05, |
|
"loss": 10.3785, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.5423728813559322, |
|
"grad_norm": 0.03998374566435814, |
|
"learning_rate": 9.186184199300464e-05, |
|
"loss": 10.3793, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.5742771684945165, |
|
"grad_norm": 0.03800854831933975, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 10.3776, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.6061814556331007, |
|
"grad_norm": 0.030691292136907578, |
|
"learning_rate": 8.947199994035401e-05, |
|
"loss": 10.3782, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.638085742771685, |
|
"grad_norm": 0.027072234079241753, |
|
"learning_rate": 8.817748015645558e-05, |
|
"loss": 10.3783, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.6699900299102692, |
|
"grad_norm": 0.02719455398619175, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 10.3792, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.7018943170488534, |
|
"grad_norm": 0.026329834014177322, |
|
"learning_rate": 8.540155934270471e-05, |
|
"loss": 10.3786, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.7337986041874377, |
|
"grad_norm": 0.03928585723042488, |
|
"learning_rate": 8.392544243589427e-05, |
|
"loss": 10.3772, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.765702891326022, |
|
"grad_norm": 0.08308948576450348, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 10.3773, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.7976071784646062, |
|
"grad_norm": 0.050812482833862305, |
|
"learning_rate": 8.081093963579707e-05, |
|
"loss": 10.3767, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.7976071784646062, |
|
"eval_loss": 10.377211570739746, |
|
"eval_runtime": 0.1813, |
|
"eval_samples_per_second": 275.802, |
|
"eval_steps_per_second": 71.709, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.8295114656031904, |
|
"grad_norm": 0.0448242649435997, |
|
"learning_rate": 7.917848237560709e-05, |
|
"loss": 10.3768, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.8614157527417746, |
|
"grad_norm": 0.03327423706650734, |
|
"learning_rate": 7.75e-05, |
|
"loss": 10.3765, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.8933200398803589, |
|
"grad_norm": 0.029801176860928535, |
|
"learning_rate": 7.577868759557654e-05, |
|
"loss": 10.3774, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.9252243270189432, |
|
"grad_norm": 0.031676966696977615, |
|
"learning_rate": 7.401782177833148e-05, |
|
"loss": 10.378, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.9571286141575274, |
|
"grad_norm": 0.032815348356962204, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 10.3778, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.9890329012961117, |
|
"grad_norm": 0.05490710586309433, |
|
"learning_rate": 7.03909064496551e-05, |
|
"loss": 10.3771, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.0209371884346958, |
|
"grad_norm": 0.10271252691745758, |
|
"learning_rate": 6.853176097769229e-05, |
|
"loss": 17.344, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.0528414755732802, |
|
"grad_norm": 0.06409234553575516, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 10.2565, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.0847457627118644, |
|
"grad_norm": 0.05500740185379982, |
|
"learning_rate": 6.473978262721463e-05, |
|
"loss": 10.3753, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.1166500498504486, |
|
"grad_norm": 0.03728007897734642, |
|
"learning_rate": 6.281416799501188e-05, |
|
"loss": 10.3715, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.148554336989033, |
|
"grad_norm": 0.03842683136463165, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 10.4211, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.1804586241276172, |
|
"grad_norm": 0.03933300822973251, |
|
"learning_rate": 5.8922008423644624e-05, |
|
"loss": 10.6714, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.2123629112662013, |
|
"grad_norm": 0.03183865174651146, |
|
"learning_rate": 5.696287243144013e-05, |
|
"loss": 10.4574, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.2442671984047857, |
|
"grad_norm": 0.036480437964200974, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 6.8814, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.27617148554337, |
|
"grad_norm": 0.1208992525935173, |
|
"learning_rate": 5.303712756855988e-05, |
|
"loss": 13.486, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.308075772681954, |
|
"grad_norm": 0.058902185410261154, |
|
"learning_rate": 5.107799157635538e-05, |
|
"loss": 10.3354, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.3399800598205385, |
|
"grad_norm": 0.05739002674818039, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 10.3683, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.3718843469591226, |
|
"grad_norm": 0.050218719989061356, |
|
"learning_rate": 4.718583200498814e-05, |
|
"loss": 10.3865, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.4037886340977068, |
|
"grad_norm": 0.03760230913758278, |
|
"learning_rate": 4.526021737278538e-05, |
|
"loss": 10.4672, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.4356929212362912, |
|
"grad_norm": 0.03762374445796013, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 10.6383, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.4675972083748754, |
|
"grad_norm": 0.03604252636432648, |
|
"learning_rate": 4.146823902230772e-05, |
|
"loss": 10.726, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.4995014955134596, |
|
"grad_norm": 0.07998944073915482, |
|
"learning_rate": 3.960909355034491e-05, |
|
"loss": 7.2666, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.531405782652044, |
|
"grad_norm": 0.11271944642066956, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 12.796, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.5633100697906281, |
|
"grad_norm": 0.0690908133983612, |
|
"learning_rate": 3.598217822166854e-05, |
|
"loss": 10.352, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.5952143569292123, |
|
"grad_norm": 0.056807372719049454, |
|
"learning_rate": 3.422131240442349e-05, |
|
"loss": 10.3753, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.5952143569292123, |
|
"eval_loss": 10.374171257019043, |
|
"eval_runtime": 0.1795, |
|
"eval_samples_per_second": 278.624, |
|
"eval_steps_per_second": 72.442, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.6271186440677967, |
|
"grad_norm": 0.0471082478761673, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 10.3914, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.659022931206381, |
|
"grad_norm": 0.03859815374016762, |
|
"learning_rate": 3.082151762439293e-05, |
|
"loss": 10.5452, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.690927218344965, |
|
"grad_norm": 0.043793581426143646, |
|
"learning_rate": 2.9189060364202943e-05, |
|
"loss": 10.5909, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.7228315054835495, |
|
"grad_norm": 0.056106261909008026, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 11.2968, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.7547357926221336, |
|
"grad_norm": 0.09795401245355606, |
|
"learning_rate": 2.6074557564105727e-05, |
|
"loss": 8.1943, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.7866400797607178, |
|
"grad_norm": 0.09200040251016617, |
|
"learning_rate": 2.459844065729529e-05, |
|
"loss": 11.2377, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.8185443668993022, |
|
"grad_norm": 0.06912614405155182, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 10.3569, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.8504486540378864, |
|
"grad_norm": 0.06139494851231575, |
|
"learning_rate": 2.1822519843544424e-05, |
|
"loss": 10.3725, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.8823529411764706, |
|
"grad_norm": 0.04969213530421257, |
|
"learning_rate": 2.0528000059645997e-05, |
|
"loss": 10.4022, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.914257228315055, |
|
"grad_norm": 0.045224543660879135, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 10.6288, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.9461615154536391, |
|
"grad_norm": 0.040774866938591, |
|
"learning_rate": 1.8138158006995364e-05, |
|
"loss": 10.4855, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.9780658025922233, |
|
"grad_norm": 0.060824956744909286, |
|
"learning_rate": 1.7047384938420154e-05, |
|
"loss": 11.9078, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 2.0099700897308077, |
|
"grad_norm": 0.12398073822259903, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 15.4694, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 2.0418743768693917, |
|
"grad_norm": 0.08448352664709091, |
|
"learning_rate": 1.5084512506980026e-05, |
|
"loss": 10.1654, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 2.073778664007976, |
|
"grad_norm": 0.07190793752670288, |
|
"learning_rate": 1.4216149583350754e-05, |
|
"loss": 10.3571, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.1056829511465605, |
|
"grad_norm": 0.06088152155280113, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 10.3712, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 2.1375872382851444, |
|
"grad_norm": 0.05480685457587242, |
|
"learning_rate": 1.2713832064634126e-05, |
|
"loss": 10.4064, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 2.169491525423729, |
|
"grad_norm": 0.047166239470243454, |
|
"learning_rate": 1.2082737216329794e-05, |
|
"loss": 10.6693, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 2.201395812562313, |
|
"grad_norm": 0.04388361796736717, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 10.443, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 2.233300099700897, |
|
"grad_norm": 0.03891513869166374, |
|
"learning_rate": 1.1066679679603e-05, |
|
"loss": 9.0255, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.2652043868394816, |
|
"grad_norm": 0.12693141400814056, |
|
"learning_rate": 1.0683651114450641e-05, |
|
"loss": 11.3904, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 2.297108673978066, |
|
"grad_norm": 0.09395328909158707, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 10.3146, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 2.32901296111665, |
|
"grad_norm": 0.0779162272810936, |
|
"learning_rate": 1.017123858587145e-05, |
|
"loss": 10.3691, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 2.3609172482552343, |
|
"grad_norm": 0.06264185160398483, |
|
"learning_rate": 1.00428300288164e-05, |
|
"loss": 10.3792, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 2.3928215353938187, |
|
"grad_norm": 0.044317249208688736, |
|
"learning_rate": 1e-05, |
|
"loss": 10.4356, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 2.3928215353938187, |
|
"eval_loss": 10.372788429260254, |
|
"eval_runtime": 0.1713, |
|
"eval_samples_per_second": 291.857, |
|
"eval_steps_per_second": 75.883, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 64152089395200.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|