| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.0, | |
| "eval_steps": 500, | |
| "global_step": 80, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 0.03321758509380139, | |
| "learning_rate": 6.25e-06, | |
| "loss": 0.1785, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 0.03270132297361398, | |
| "learning_rate": 1.25e-05, | |
| "loss": 0.1808, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.031504129734254496, | |
| "learning_rate": 1.8750000000000002e-05, | |
| "loss": 0.1782, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.036360864431180286, | |
| "learning_rate": 2.5e-05, | |
| "loss": 0.1894, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.03433059360510697, | |
| "learning_rate": 3.125e-05, | |
| "loss": 0.1797, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.035603554562599456, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 0.1749, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "grad_norm": 0.039772993741593606, | |
| "learning_rate": 4.375e-05, | |
| "loss": 0.1717, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.03937828545083326, | |
| "learning_rate": 5e-05, | |
| "loss": 0.1695, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "grad_norm": 0.04137387140973187, | |
| "learning_rate": 4.997620553954645e-05, | |
| "loss": 0.1581, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.0366225618627523, | |
| "learning_rate": 4.990486745229364e-05, | |
| "loss": 0.1537, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 0.031129456262764633, | |
| "learning_rate": 4.9786121534345265e-05, | |
| "loss": 0.1501, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.038046792742615784, | |
| "learning_rate": 4.962019382530521e-05, | |
| "loss": 0.1404, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "grad_norm": 0.0406727748673051, | |
| "learning_rate": 4.940740017799833e-05, | |
| "loss": 0.1347, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 0.03940085410684804, | |
| "learning_rate": 4.914814565722671e-05, | |
| "loss": 0.1321, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.037688137420001884, | |
| "learning_rate": 4.884292376870567e-05, | |
| "loss": 0.1221, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.03429282891214494, | |
| "learning_rate": 4.849231551964771e-05, | |
| "loss": 0.1203, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "grad_norm": 0.03184344902827262, | |
| "learning_rate": 4.8096988312782174e-05, | |
| "loss": 0.1126, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 0.03618849185621362, | |
| "learning_rate": 4.765769467591625e-05, | |
| "loss": 0.1078, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 0.034298104293600626, | |
| "learning_rate": 4.717527082945554e-05, | |
| "loss": 0.1048, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.034935183074113044, | |
| "learning_rate": 4.665063509461097e-05, | |
| "loss": 0.0946, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "grad_norm": 0.031815811600406016, | |
| "learning_rate": 4.608478614532215e-05, | |
| "loss": 0.0914, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 0.03215063512841389, | |
| "learning_rate": 4.54788011072248e-05, | |
| "loss": 0.0842, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "grad_norm": 0.030140183862344357, | |
| "learning_rate": 4.4833833507280884e-05, | |
| "loss": 0.0771, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 0.0293337861992031, | |
| "learning_rate": 4.415111107797445e-05, | |
| "loss": 0.0719, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 0.030203745320595544, | |
| "learning_rate": 4.34319334202531e-05, | |
| "loss": 0.0724, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "grad_norm": 0.028088817651504092, | |
| "learning_rate": 4.267766952966369e-05, | |
| "loss": 0.0723, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "grad_norm": 0.027148000968074536, | |
| "learning_rate": 4.188975519039151e-05, | |
| "loss": 0.0633, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 0.02964713724375281, | |
| "learning_rate": 4.1069690242163484e-05, | |
| "loss": 0.0556, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "grad_norm": 0.027421116188822536, | |
| "learning_rate": 4.021903572521802e-05, | |
| "loss": 0.0532, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 0.02716026363115862, | |
| "learning_rate": 3.933941090877615e-05, | |
| "loss": 0.0511, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "grad_norm": 0.026329098311513383, | |
| "learning_rate": 3.84324902086706e-05, | |
| "loss": 0.0563, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 0.0280978968741894, | |
| "learning_rate": 3.7500000000000003e-05, | |
| "loss": 0.0446, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "grad_norm": 0.028428656261065784, | |
| "learning_rate": 3.654371533087586e-05, | |
| "loss": 0.0421, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "grad_norm": 0.03310118661665954, | |
| "learning_rate": 3.556545654351749e-05, | |
| "loss": 0.0436, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "grad_norm": 0.027621293127998756, | |
| "learning_rate": 3.456708580912725e-05, | |
| "loss": 0.0414, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 0.031305980977346645, | |
| "learning_rate": 3.355050358314172e-05, | |
| "loss": 0.0424, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "grad_norm": 0.02821237752777818, | |
| "learning_rate": 3.251764498760683e-05, | |
| "loss": 0.0413, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "grad_norm": 0.02595028819757713, | |
| "learning_rate": 3.147047612756302e-05, | |
| "loss": 0.0354, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "grad_norm": 0.028249892893483154, | |
| "learning_rate": 3.0410990348452573e-05, | |
| "loss": 0.0425, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.023890505381380638, | |
| "learning_rate": 2.9341204441673266e-05, | |
| "loss": 0.0302, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "grad_norm": 0.021580387415294815, | |
| "learning_rate": 2.8263154805501297e-05, | |
| "loss": 0.0314, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "grad_norm": 0.020588314809361626, | |
| "learning_rate": 2.717889356869146e-05, | |
| "loss": 0.0291, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "grad_norm": 0.020463281695351575, | |
| "learning_rate": 2.6090484684133404e-05, | |
| "loss": 0.0329, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "grad_norm": 0.04001010673333842, | |
| "learning_rate": 2.5e-05, | |
| "loss": 0.0345, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "grad_norm": 0.020584243302568767, | |
| "learning_rate": 2.3909515315866605e-05, | |
| "loss": 0.0272, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "grad_norm": 0.022447337126769707, | |
| "learning_rate": 2.2821106431308544e-05, | |
| "loss": 0.0289, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "grad_norm": 0.020637635680219337, | |
| "learning_rate": 2.173684519449872e-05, | |
| "loss": 0.0274, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 0.019683363570100445, | |
| "learning_rate": 2.0658795558326743e-05, | |
| "loss": 0.0288, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "grad_norm": 0.031243945080274442, | |
| "learning_rate": 1.958900965154743e-05, | |
| "loss": 0.0387, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.01997604058204314, | |
| "learning_rate": 1.852952387243698e-05, | |
| "loss": 0.0275, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.55, | |
| "grad_norm": 0.022378313541631285, | |
| "learning_rate": 1.7482355012393177e-05, | |
| "loss": 0.0297, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "grad_norm": 0.01971201877928536, | |
| "learning_rate": 1.6449496416858284e-05, | |
| "loss": 0.0275, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "grad_norm": 0.022059176470837674, | |
| "learning_rate": 1.5432914190872757e-05, | |
| "loss": 0.0275, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "grad_norm": 0.01811241798703188, | |
| "learning_rate": 1.443454345648252e-05, | |
| "loss": 0.0281, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "grad_norm": 0.018238279525765698, | |
| "learning_rate": 1.3456284669124158e-05, | |
| "loss": 0.0272, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 0.02037184481118651, | |
| "learning_rate": 1.2500000000000006e-05, | |
| "loss": 0.0283, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "grad_norm": 0.0192456262694011, | |
| "learning_rate": 1.1567509791329401e-05, | |
| "loss": 0.0321, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "grad_norm": 0.01550821162034626, | |
| "learning_rate": 1.0660589091223855e-05, | |
| "loss": 0.0242, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "grad_norm": 0.01858583076694851, | |
| "learning_rate": 9.780964274781984e-06, | |
| "loss": 0.0249, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.019073828965141038, | |
| "learning_rate": 8.930309757836517e-06, | |
| "loss": 0.0269, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 3.05, | |
| "grad_norm": 0.016131093812404212, | |
| "learning_rate": 8.110244809608495e-06, | |
| "loss": 0.0238, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 3.1, | |
| "grad_norm": 0.017423126710067707, | |
| "learning_rate": 7.3223304703363135e-06, | |
| "loss": 0.0249, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 3.15, | |
| "grad_norm": 0.017662503677228478, | |
| "learning_rate": 6.568066579746901e-06, | |
| "loss": 0.0255, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 3.2, | |
| "grad_norm": 0.01695460814174754, | |
| "learning_rate": 5.848888922025553e-06, | |
| "loss": 0.0222, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "grad_norm": 0.016043806952368577, | |
| "learning_rate": 5.166166492719124e-06, | |
| "loss": 0.0236, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "grad_norm": 0.015613735206659563, | |
| "learning_rate": 4.521198892775203e-06, | |
| "loss": 0.0283, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 3.35, | |
| "grad_norm": 0.016302457782708404, | |
| "learning_rate": 3.9152138546778625e-06, | |
| "loss": 0.0222, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "grad_norm": 0.015644529851688054, | |
| "learning_rate": 3.3493649053890326e-06, | |
| "loss": 0.0242, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 3.45, | |
| "grad_norm": 0.017824515754806754, | |
| "learning_rate": 2.8247291705444575e-06, | |
| "loss": 0.0312, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "grad_norm": 0.01783878167725992, | |
| "learning_rate": 2.3423053240837515e-06, | |
| "loss": 0.0298, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 3.55, | |
| "grad_norm": 0.01623915947132217, | |
| "learning_rate": 1.9030116872178316e-06, | |
| "loss": 0.0259, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "grad_norm": 0.014691878921968545, | |
| "learning_rate": 1.5076844803522922e-06, | |
| "loss": 0.0252, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 3.65, | |
| "grad_norm": 0.0158012594720991, | |
| "learning_rate": 1.1570762312943295e-06, | |
| "loss": 0.0248, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 3.7, | |
| "grad_norm": 0.016417378319219886, | |
| "learning_rate": 8.51854342773295e-07, | |
| "loss": 0.0256, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "grad_norm": 0.014394437548996657, | |
| "learning_rate": 5.925998220016659e-07, | |
| "loss": 0.0267, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "grad_norm": 0.015252447213812402, | |
| "learning_rate": 3.7980617469479953e-07, | |
| "loss": 0.0233, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "grad_norm": 0.022938011099557724, | |
| "learning_rate": 2.1387846565474045e-07, | |
| "loss": 0.0331, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "grad_norm": 0.019316460523955057, | |
| "learning_rate": 9.513254770636137e-08, | |
| "loss": 0.0317, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 3.95, | |
| "grad_norm": 0.017247184889252695, | |
| "learning_rate": 2.3794460453555047e-08, | |
| "loss": 0.0251, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.01626709020519667, | |
| "learning_rate": 0.0, | |
| "loss": 0.0234, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "step": 80, | |
| "total_flos": 436992876216320.0, | |
| "train_loss": 0.06461741614621133, | |
| "train_runtime": 740.6897, | |
| "train_samples_per_second": 0.848, | |
| "train_steps_per_second": 0.108 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 80, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 436992876216320.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |