{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.00485074853113271, "eval_steps": 500, "global_step": 200, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 2.4253742655663552e-05, "grad_norm": 3.3759636878967285, "learning_rate": 2.0000000000000003e-06, "loss": 1.4074, "step": 1 }, { "epoch": 4.8507485311327104e-05, "grad_norm": 0.8539016246795654, "learning_rate": 4.000000000000001e-06, "loss": 1.4688, "step": 2 }, { "epoch": 7.276122796699065e-05, "grad_norm": NaN, "learning_rate": 4.000000000000001e-06, "loss": 2.2166, "step": 3 }, { "epoch": 9.701497062265421e-05, "grad_norm": 0.8566086888313293, "learning_rate": 6e-06, "loss": 1.3767, "step": 4 }, { "epoch": 0.00012126871327831776, "grad_norm": 1.564802885055542, "learning_rate": 8.000000000000001e-06, "loss": 1.3408, "step": 5 }, { "epoch": 0.0001455224559339813, "grad_norm": 6.496278762817383, "learning_rate": 1e-05, "loss": 2.1242, "step": 6 }, { "epoch": 0.00016977619858964487, "grad_norm": 3.3886003494262695, "learning_rate": 1.2e-05, "loss": 1.639, "step": 7 }, { "epoch": 0.00019402994124530841, "grad_norm": 0.5676108598709106, "learning_rate": 1.4000000000000001e-05, "loss": 1.547, "step": 8 }, { "epoch": 0.00021828368390097196, "grad_norm": 7.636886119842529, "learning_rate": 1.6000000000000003e-05, "loss": 1.6577, "step": 9 }, { "epoch": 0.00024253742655663553, "grad_norm": 13.671367645263672, "learning_rate": 1.8e-05, "loss": 1.9861, "step": 10 }, { "epoch": 0.00026679116921229907, "grad_norm": 1.700162649154663, "learning_rate": 2e-05, "loss": 2.1347, "step": 11 }, { "epoch": 0.0002910449118679626, "grad_norm": 4.465837478637695, "learning_rate": 2.2000000000000003e-05, "loss": 1.2903, "step": 12 }, { "epoch": 0.00031529865452362615, "grad_norm": 5.200563430786133, "learning_rate": 2.4e-05, "loss": 1.727, "step": 13 }, { "epoch": 0.00033955239717928975, "grad_norm": 2.4282960891723633, "learning_rate": 2.6000000000000002e-05, "loss": 1.8629, "step": 14 }, { "epoch": 0.0003638061398349533, "grad_norm": 0.8094596266746521, "learning_rate": 2.8000000000000003e-05, "loss": 1.3653, "step": 15 }, { "epoch": 0.00038805988249061683, "grad_norm": 0.8468468189239502, "learning_rate": 3e-05, "loss": 1.9853, "step": 16 }, { "epoch": 0.00041231362514628037, "grad_norm": 0.9713991284370422, "learning_rate": 3.2000000000000005e-05, "loss": 1.0837, "step": 17 }, { "epoch": 0.0004365673678019439, "grad_norm": 0.8852229118347168, "learning_rate": 3.4000000000000007e-05, "loss": 1.8255, "step": 18 }, { "epoch": 0.0004608211104576075, "grad_norm": NaN, "learning_rate": 3.4000000000000007e-05, "loss": 1.8453, "step": 19 }, { "epoch": 0.00048507485311327105, "grad_norm": 0.7369140982627869, "learning_rate": 3.6e-05, "loss": 1.4695, "step": 20 }, { "epoch": 0.0005093285957689346, "grad_norm": 0.7990018129348755, "learning_rate": 3.8e-05, "loss": 0.9667, "step": 21 }, { "epoch": 0.0005335823384245981, "grad_norm": 51.18899917602539, "learning_rate": 4e-05, "loss": 2.049, "step": 22 }, { "epoch": 0.0005578360810802617, "grad_norm": 2.142543315887451, "learning_rate": 4.2e-05, "loss": 1.7967, "step": 23 }, { "epoch": 0.0005820898237359252, "grad_norm": 14.483644485473633, "learning_rate": 4.4000000000000006e-05, "loss": 1.6066, "step": 24 }, { "epoch": 0.0006063435663915888, "grad_norm": 0.4434223771095276, "learning_rate": 4.600000000000001e-05, "loss": 0.9649, "step": 25 }, { "epoch": 0.0006305973090472523, "grad_norm": 2.3446133136749268, "learning_rate": 4.8e-05, "loss": 2.2895, "step": 26 }, { "epoch": 0.000654851051702916, "grad_norm": 7.05695104598999, "learning_rate": 5e-05, "loss": 1.5174, "step": 27 }, { "epoch": 0.0006791047943585795, "grad_norm": 1.1299482583999634, "learning_rate": 5.2000000000000004e-05, "loss": 1.6502, "step": 28 }, { "epoch": 0.000703358537014243, "grad_norm": 1.1360725164413452, "learning_rate": 5.4000000000000005e-05, "loss": 1.9164, "step": 29 }, { "epoch": 0.0007276122796699066, "grad_norm": 0.6310778260231018, "learning_rate": 5.6000000000000006e-05, "loss": 1.3889, "step": 30 }, { "epoch": 0.0007518660223255701, "grad_norm": 23.649295806884766, "learning_rate": 5.8e-05, "loss": 1.8868, "step": 31 }, { "epoch": 0.0007761197649812337, "grad_norm": 1.4656137228012085, "learning_rate": 6e-05, "loss": 2.1166, "step": 32 }, { "epoch": 0.0008003735076368972, "grad_norm": 1.1650835275650024, "learning_rate": 6.2e-05, "loss": 1.6361, "step": 33 }, { "epoch": 0.0008246272502925607, "grad_norm": 3.5989675521850586, "learning_rate": 6.400000000000001e-05, "loss": 1.9528, "step": 34 }, { "epoch": 0.0008488809929482243, "grad_norm": NaN, "learning_rate": 6.400000000000001e-05, "loss": 1.1095, "step": 35 }, { "epoch": 0.0008731347356038878, "grad_norm": 0.7551969885826111, "learning_rate": 6.6e-05, "loss": 1.4748, "step": 36 }, { "epoch": 0.0008973884782595515, "grad_norm": 0.7345149517059326, "learning_rate": 6.800000000000001e-05, "loss": 1.3746, "step": 37 }, { "epoch": 0.000921642220915215, "grad_norm": 0.96466463804245, "learning_rate": 7e-05, "loss": 1.3491, "step": 38 }, { "epoch": 0.0009458959635708786, "grad_norm": 0.895024836063385, "learning_rate": 7.2e-05, "loss": 1.3976, "step": 39 }, { "epoch": 0.0009701497062265421, "grad_norm": 2.5485947132110596, "learning_rate": 7.4e-05, "loss": 1.6924, "step": 40 }, { "epoch": 0.0009944034488822055, "grad_norm": 14.429283142089844, "learning_rate": 7.6e-05, "loss": 1.2937, "step": 41 }, { "epoch": 0.0010186571915378692, "grad_norm": 0.8819990158081055, "learning_rate": 7.800000000000001e-05, "loss": 1.3237, "step": 42 }, { "epoch": 0.0010429109341935328, "grad_norm": 1.2532185316085815, "learning_rate": 8e-05, "loss": 1.2843, "step": 43 }, { "epoch": 0.0010671646768491963, "grad_norm": 3.7754809856414795, "learning_rate": 8.2e-05, "loss": 2.0701, "step": 44 }, { "epoch": 0.00109141841950486, "grad_norm": 6.247189044952393, "learning_rate": 8.4e-05, "loss": 1.3514, "step": 45 }, { "epoch": 0.0011156721621605234, "grad_norm": 2.4143786430358887, "learning_rate": 8.6e-05, "loss": 1.3366, "step": 46 }, { "epoch": 0.001139925904816187, "grad_norm": 0.9999021887779236, "learning_rate": 8.800000000000001e-05, "loss": 1.6014, "step": 47 }, { "epoch": 0.0011641796474718504, "grad_norm": 10.256402015686035, "learning_rate": 9e-05, "loss": 1.5158, "step": 48 }, { "epoch": 0.001188433390127514, "grad_norm": 3.1409249305725098, "learning_rate": 9.200000000000001e-05, "loss": 1.2433, "step": 49 }, { "epoch": 0.0012126871327831775, "grad_norm": 5.032261371612549, "learning_rate": 9.4e-05, "loss": 1.9222, "step": 50 }, { "epoch": 0.0012369408754388412, "grad_norm": 32.46524429321289, "learning_rate": 9.6e-05, "loss": 1.944, "step": 51 }, { "epoch": 0.0012611946180945046, "grad_norm": 3.3015825748443604, "learning_rate": 9.8e-05, "loss": 1.6905, "step": 52 }, { "epoch": 0.0012854483607501683, "grad_norm": 33.24071502685547, "learning_rate": 0.0001, "loss": 1.8358, "step": 53 }, { "epoch": 0.001309702103405832, "grad_norm": 1.4521846771240234, "learning_rate": 0.00010200000000000001, "loss": 1.6665, "step": 54 }, { "epoch": 0.0013339558460614953, "grad_norm": 1.9511698484420776, "learning_rate": 0.00010400000000000001, "loss": 1.2289, "step": 55 }, { "epoch": 0.001358209588717159, "grad_norm": 7.6353535652160645, "learning_rate": 0.00010600000000000002, "loss": 1.762, "step": 56 }, { "epoch": 0.0013824633313728224, "grad_norm": 1.035163164138794, "learning_rate": 0.00010800000000000001, "loss": 1.4294, "step": 57 }, { "epoch": 0.001406717074028486, "grad_norm": 4.369189262390137, "learning_rate": 0.00011000000000000002, "loss": 1.4502, "step": 58 }, { "epoch": 0.0014309708166841495, "grad_norm": 1.308000087738037, "learning_rate": 0.00011200000000000001, "loss": 1.4871, "step": 59 }, { "epoch": 0.0014552245593398132, "grad_norm": 0.9026333093643188, "learning_rate": 0.00011399999999999999, "loss": 1.431, "step": 60 }, { "epoch": 0.0014794783019954766, "grad_norm": 1.266846776008606, "learning_rate": 0.000116, "loss": 1.5601, "step": 61 }, { "epoch": 0.0015037320446511402, "grad_norm": 2.569223642349243, "learning_rate": 0.000118, "loss": 1.3147, "step": 62 }, { "epoch": 0.0015279857873068039, "grad_norm": 0.6180663108825684, "learning_rate": 0.00012, "loss": 0.735, "step": 63 }, { "epoch": 0.0015522395299624673, "grad_norm": 7.6248650550842285, "learning_rate": 0.000122, "loss": 1.1034, "step": 64 }, { "epoch": 0.001576493272618131, "grad_norm": NaN, "learning_rate": 0.000122, "loss": 2.0957, "step": 65 }, { "epoch": 0.0016007470152737944, "grad_norm": 17.51434326171875, "learning_rate": 0.000124, "loss": 1.7337, "step": 66 }, { "epoch": 0.001625000757929458, "grad_norm": 4.455969333648682, "learning_rate": 0.000126, "loss": 1.8453, "step": 67 }, { "epoch": 0.0016492545005851215, "grad_norm": 3.6060261726379395, "learning_rate": 0.00012800000000000002, "loss": 1.304, "step": 68 }, { "epoch": 0.0016735082432407851, "grad_norm": 1.1424020528793335, "learning_rate": 0.00013000000000000002, "loss": 1.2339, "step": 69 }, { "epoch": 0.0016977619858964486, "grad_norm": 1.294066309928894, "learning_rate": 0.000132, "loss": 1.3406, "step": 70 }, { "epoch": 0.0017220157285521122, "grad_norm": 8.3845796585083, "learning_rate": 0.000134, "loss": 2.0366, "step": 71 }, { "epoch": 0.0017462694712077757, "grad_norm": 7.098409175872803, "learning_rate": 0.00013600000000000003, "loss": 1.566, "step": 72 }, { "epoch": 0.0017705232138634393, "grad_norm": NaN, "learning_rate": 0.00013600000000000003, "loss": 1.1374, "step": 73 }, { "epoch": 0.001794776956519103, "grad_norm": 3.7947475910186768, "learning_rate": 0.000138, "loss": 1.4067, "step": 74 }, { "epoch": 0.0018190306991747664, "grad_norm": 3.4975695610046387, "learning_rate": 0.00014, "loss": 1.4905, "step": 75 }, { "epoch": 0.00184328444183043, "grad_norm": 1.821387529373169, "learning_rate": 0.000142, "loss": 1.289, "step": 76 }, { "epoch": 0.0018675381844860935, "grad_norm": 58.30936050415039, "learning_rate": 0.000144, "loss": 1.4909, "step": 77 }, { "epoch": 0.0018917919271417571, "grad_norm": 1.3323521614074707, "learning_rate": 0.000146, "loss": 1.1766, "step": 78 }, { "epoch": 0.0019160456697974206, "grad_norm": 5.184061050415039, "learning_rate": 0.000148, "loss": 0.9555, "step": 79 }, { "epoch": 0.0019402994124530842, "grad_norm": 3.7887625694274902, "learning_rate": 0.00015000000000000001, "loss": 1.8287, "step": 80 }, { "epoch": 0.001964553155108748, "grad_norm": 0.8411690592765808, "learning_rate": 0.000152, "loss": 1.1176, "step": 81 }, { "epoch": 0.001988806897764411, "grad_norm": 1.881020426750183, "learning_rate": 0.000154, "loss": 1.4231, "step": 82 }, { "epoch": 0.0020130606404200747, "grad_norm": 2.2068991661071777, "learning_rate": 0.00015600000000000002, "loss": 1.4049, "step": 83 }, { "epoch": 0.0020373143830757384, "grad_norm": 4.380375862121582, "learning_rate": 0.00015800000000000002, "loss": 1.0798, "step": 84 }, { "epoch": 0.002061568125731402, "grad_norm": 1.7205640077590942, "learning_rate": 0.00016, "loss": 1.4954, "step": 85 }, { "epoch": 0.0020858218683870657, "grad_norm": 1.5813369750976562, "learning_rate": 0.000162, "loss": 1.4113, "step": 86 }, { "epoch": 0.002110075611042729, "grad_norm": 1.3017473220825195, "learning_rate": 0.000164, "loss": 1.2315, "step": 87 }, { "epoch": 0.0021343293536983925, "grad_norm": 2.0372846126556396, "learning_rate": 0.000166, "loss": 1.5282, "step": 88 }, { "epoch": 0.002158583096354056, "grad_norm": 1.0349255800247192, "learning_rate": 0.000168, "loss": 1.3279, "step": 89 }, { "epoch": 0.00218283683900972, "grad_norm": 2.119569778442383, "learning_rate": 0.00017, "loss": 1.0482, "step": 90 }, { "epoch": 0.002207090581665383, "grad_norm": 2.0538179874420166, "learning_rate": 0.000172, "loss": 1.0129, "step": 91 }, { "epoch": 0.0022313443243210467, "grad_norm": 1.4903944730758667, "learning_rate": 0.000174, "loss": 1.7894, "step": 92 }, { "epoch": 0.0022555980669767104, "grad_norm": 5.269598007202148, "learning_rate": 0.00017600000000000002, "loss": 1.1085, "step": 93 }, { "epoch": 0.002279851809632374, "grad_norm": 2.082563877105713, "learning_rate": 0.00017800000000000002, "loss": 1.4559, "step": 94 }, { "epoch": 0.0023041055522880377, "grad_norm": 1.0695875883102417, "learning_rate": 0.00018, "loss": 1.5581, "step": 95 }, { "epoch": 0.002328359294943701, "grad_norm": 1.9814307689666748, "learning_rate": 0.000182, "loss": 1.5945, "step": 96 }, { "epoch": 0.0023526130375993645, "grad_norm": 8.308389663696289, "learning_rate": 0.00018400000000000003, "loss": 2.0305, "step": 97 }, { "epoch": 0.002376866780255028, "grad_norm": 1.8079288005828857, "learning_rate": 0.00018600000000000002, "loss": 1.4862, "step": 98 }, { "epoch": 0.002401120522910692, "grad_norm": 12.573166847229004, "learning_rate": 0.000188, "loss": 1.7458, "step": 99 }, { "epoch": 0.002425374265566355, "grad_norm": 33.789764404296875, "learning_rate": 0.00019, "loss": 1.3982, "step": 100 }, { "epoch": 0.0024496280082220187, "grad_norm": 27.591976165771484, "learning_rate": 0.000192, "loss": 1.5771, "step": 101 }, { "epoch": 0.0024738817508776823, "grad_norm": 1.4807827472686768, "learning_rate": 0.000194, "loss": 1.462, "step": 102 }, { "epoch": 0.002498135493533346, "grad_norm": 1.479779601097107, "learning_rate": 0.000196, "loss": 1.5715, "step": 103 }, { "epoch": 0.002522389236189009, "grad_norm": 1.569394826889038, "learning_rate": 0.00019800000000000002, "loss": 1.7968, "step": 104 }, { "epoch": 0.002546642978844673, "grad_norm": 1.3037728071212769, "learning_rate": 0.0002, "loss": 1.2281, "step": 105 }, { "epoch": 0.0025708967215003365, "grad_norm": 3.430941104888916, "learning_rate": 0.00019800000000000002, "loss": 1.5046, "step": 106 }, { "epoch": 0.002595150464156, "grad_norm": 1.2662192583084106, "learning_rate": 0.000196, "loss": 1.1589, "step": 107 }, { "epoch": 0.002619404206811664, "grad_norm": 1.0078253746032715, "learning_rate": 0.000194, "loss": 1.2159, "step": 108 }, { "epoch": 0.002643657949467327, "grad_norm": 1.0011709928512573, "learning_rate": 0.000192, "loss": 1.3889, "step": 109 }, { "epoch": 0.0026679116921229907, "grad_norm": 1.0177195072174072, "learning_rate": 0.00019, "loss": 1.5495, "step": 110 }, { "epoch": 0.0026921654347786543, "grad_norm": 14.817512512207031, "learning_rate": 0.000188, "loss": 1.3749, "step": 111 }, { "epoch": 0.002716419177434318, "grad_norm": 2.8248116970062256, "learning_rate": 0.00018600000000000002, "loss": 1.4093, "step": 112 }, { "epoch": 0.002740672920089981, "grad_norm": 1.23388671875, "learning_rate": 0.00018400000000000003, "loss": 1.662, "step": 113 }, { "epoch": 0.002764926662745645, "grad_norm": 1.2018444538116455, "learning_rate": 0.000182, "loss": 1.561, "step": 114 }, { "epoch": 0.0027891804054013085, "grad_norm": 1.6609792709350586, "learning_rate": 0.00018, "loss": 1.449, "step": 115 }, { "epoch": 0.002813434148056972, "grad_norm": 4.717409610748291, "learning_rate": 0.00017800000000000002, "loss": 1.3023, "step": 116 }, { "epoch": 0.002837687890712636, "grad_norm": 0.6800332069396973, "learning_rate": 0.00017600000000000002, "loss": 1.1162, "step": 117 }, { "epoch": 0.002861941633368299, "grad_norm": 1.1162289381027222, "learning_rate": 0.000174, "loss": 1.2826, "step": 118 }, { "epoch": 0.0028861953760239627, "grad_norm": 0.9007352590560913, "learning_rate": 0.000172, "loss": 1.7017, "step": 119 }, { "epoch": 0.0029104491186796263, "grad_norm": 2.0487279891967773, "learning_rate": 0.00017, "loss": 2.0916, "step": 120 }, { "epoch": 0.00293470286133529, "grad_norm": 4.298633098602295, "learning_rate": 0.000168, "loss": 2.2025, "step": 121 }, { "epoch": 0.002958956603990953, "grad_norm": 1.0514113903045654, "learning_rate": 0.000166, "loss": 1.3972, "step": 122 }, { "epoch": 0.002983210346646617, "grad_norm": 0.7477086186408997, "learning_rate": 0.000164, "loss": 1.0473, "step": 123 }, { "epoch": 0.0030074640893022805, "grad_norm": 1.2515380382537842, "learning_rate": 0.000162, "loss": 1.4222, "step": 124 }, { "epoch": 0.003031717831957944, "grad_norm": 1.1901217699050903, "learning_rate": 0.00016, "loss": 1.0811, "step": 125 }, { "epoch": 0.0030559715746136078, "grad_norm": 1.3582122325897217, "learning_rate": 0.00015800000000000002, "loss": 1.2179, "step": 126 }, { "epoch": 0.003080225317269271, "grad_norm": 0.7772126197814941, "learning_rate": 0.00015600000000000002, "loss": 1.1056, "step": 127 }, { "epoch": 0.0031044790599249346, "grad_norm": 3.8524062633514404, "learning_rate": 0.000154, "loss": 1.1881, "step": 128 }, { "epoch": 0.0031287328025805983, "grad_norm": 1.8099156618118286, "learning_rate": 0.000152, "loss": 1.6343, "step": 129 }, { "epoch": 0.003152986545236262, "grad_norm": 1.2134389877319336, "learning_rate": 0.00015000000000000001, "loss": 1.3875, "step": 130 }, { "epoch": 0.003177240287891925, "grad_norm": 0.938895583152771, "learning_rate": 0.000148, "loss": 1.2225, "step": 131 }, { "epoch": 0.003201494030547589, "grad_norm": 1.9306858777999878, "learning_rate": 0.000146, "loss": 1.2144, "step": 132 }, { "epoch": 0.0032257477732032525, "grad_norm": 1.3297972679138184, "learning_rate": 0.000144, "loss": 1.4381, "step": 133 }, { "epoch": 0.003250001515858916, "grad_norm": 1.270509958267212, "learning_rate": 0.000142, "loss": 1.1591, "step": 134 }, { "epoch": 0.0032742552585145793, "grad_norm": 3.317193031311035, "learning_rate": 0.00014, "loss": 1.2585, "step": 135 }, { "epoch": 0.003298509001170243, "grad_norm": 1.2975448369979858, "learning_rate": 0.000138, "loss": 1.571, "step": 136 }, { "epoch": 0.0033227627438259066, "grad_norm": 1.0426270961761475, "learning_rate": 0.00013600000000000003, "loss": 1.8555, "step": 137 }, { "epoch": 0.0033470164864815703, "grad_norm": 1.228779673576355, "learning_rate": 0.000134, "loss": 1.3992, "step": 138 }, { "epoch": 0.003371270229137234, "grad_norm": 0.977001428604126, "learning_rate": 0.000132, "loss": 1.1395, "step": 139 }, { "epoch": 0.003395523971792897, "grad_norm": 1.0894217491149902, "learning_rate": 0.00013000000000000002, "loss": 1.4242, "step": 140 }, { "epoch": 0.003419777714448561, "grad_norm": 1.0413155555725098, "learning_rate": 0.00012800000000000002, "loss": 1.4771, "step": 141 }, { "epoch": 0.0034440314571042244, "grad_norm": 1.2907966375350952, "learning_rate": 0.000126, "loss": 1.5202, "step": 142 }, { "epoch": 0.003468285199759888, "grad_norm": 0.8118016123771667, "learning_rate": 0.000124, "loss": 1.5335, "step": 143 }, { "epoch": 0.0034925389424155513, "grad_norm": 1.329568862915039, "learning_rate": 0.000122, "loss": 1.7877, "step": 144 }, { "epoch": 0.003516792685071215, "grad_norm": 0.6988470554351807, "learning_rate": 0.00012, "loss": 0.6288, "step": 145 }, { "epoch": 0.0035410464277268786, "grad_norm": 2.3587098121643066, "learning_rate": 0.000118, "loss": 1.2359, "step": 146 }, { "epoch": 0.0035653001703825423, "grad_norm": 1.1087194681167603, "learning_rate": 0.000116, "loss": 1.3769, "step": 147 }, { "epoch": 0.003589553913038206, "grad_norm": 1.6822993755340576, "learning_rate": 0.00011399999999999999, "loss": 1.3112, "step": 148 }, { "epoch": 0.003613807655693869, "grad_norm": 1.3227869272232056, "learning_rate": 0.00011200000000000001, "loss": 1.4754, "step": 149 }, { "epoch": 0.0036380613983495328, "grad_norm": 1.1278481483459473, "learning_rate": 0.00011000000000000002, "loss": 1.1768, "step": 150 }, { "epoch": 0.0036623151410051964, "grad_norm": 1.2085745334625244, "learning_rate": 0.00010800000000000001, "loss": 1.4345, "step": 151 }, { "epoch": 0.00368656888366086, "grad_norm": 1.0850399732589722, "learning_rate": 0.00010600000000000002, "loss": 1.2432, "step": 152 }, { "epoch": 0.0037108226263165233, "grad_norm": 1.2463059425354004, "learning_rate": 0.00010400000000000001, "loss": 1.5501, "step": 153 }, { "epoch": 0.003735076368972187, "grad_norm": 1.059967041015625, "learning_rate": 0.00010200000000000001, "loss": 1.0448, "step": 154 }, { "epoch": 0.0037593301116278506, "grad_norm": 2.2503929138183594, "learning_rate": 0.0001, "loss": 1.4892, "step": 155 }, { "epoch": 0.0037835838542835142, "grad_norm": 0.8803784251213074, "learning_rate": 9.8e-05, "loss": 1.2764, "step": 156 }, { "epoch": 0.003807837596939178, "grad_norm": 1.5173571109771729, "learning_rate": 9.6e-05, "loss": 1.0656, "step": 157 }, { "epoch": 0.003832091339594841, "grad_norm": 2.0226430892944336, "learning_rate": 9.4e-05, "loss": 1.1382, "step": 158 }, { "epoch": 0.0038563450822505048, "grad_norm": 1.9149307012557983, "learning_rate": 9.200000000000001e-05, "loss": 1.4105, "step": 159 }, { "epoch": 0.0038805988249061684, "grad_norm": 0.9839889407157898, "learning_rate": 9e-05, "loss": 1.7808, "step": 160 }, { "epoch": 0.003904852567561832, "grad_norm": 1.306378722190857, "learning_rate": 8.800000000000001e-05, "loss": 1.4799, "step": 161 }, { "epoch": 0.003929106310217496, "grad_norm": 1.9649689197540283, "learning_rate": 8.6e-05, "loss": 1.2497, "step": 162 }, { "epoch": 0.003953360052873159, "grad_norm": 0.8306687474250793, "learning_rate": 8.4e-05, "loss": 1.1058, "step": 163 }, { "epoch": 0.003977613795528822, "grad_norm": 1.034351110458374, "learning_rate": 8.2e-05, "loss": 0.9346, "step": 164 }, { "epoch": 0.004001867538184486, "grad_norm": 1.165528416633606, "learning_rate": 8e-05, "loss": 1.2315, "step": 165 }, { "epoch": 0.0040261212808401494, "grad_norm": 1.3174549341201782, "learning_rate": 7.800000000000001e-05, "loss": 1.7447, "step": 166 }, { "epoch": 0.004050375023495813, "grad_norm": 1.8279471397399902, "learning_rate": 7.6e-05, "loss": 1.6226, "step": 167 }, { "epoch": 0.004074628766151477, "grad_norm": 1.1315997838974, "learning_rate": 7.4e-05, "loss": 1.2114, "step": 168 }, { "epoch": 0.00409888250880714, "grad_norm": 1.0842983722686768, "learning_rate": 7.2e-05, "loss": 1.1462, "step": 169 }, { "epoch": 0.004123136251462804, "grad_norm": 0.8647174835205078, "learning_rate": 7e-05, "loss": 0.9729, "step": 170 }, { "epoch": 0.004147389994118468, "grad_norm": 0.9712747931480408, "learning_rate": 6.800000000000001e-05, "loss": 1.2886, "step": 171 }, { "epoch": 0.004171643736774131, "grad_norm": 1.3875259160995483, "learning_rate": 6.6e-05, "loss": 1.5696, "step": 172 }, { "epoch": 0.004195897479429794, "grad_norm": 1.1321961879730225, "learning_rate": 6.400000000000001e-05, "loss": 1.4366, "step": 173 }, { "epoch": 0.004220151222085458, "grad_norm": 5.357350826263428, "learning_rate": 6.2e-05, "loss": 1.5292, "step": 174 }, { "epoch": 0.004244404964741121, "grad_norm": 0.7717924118041992, "learning_rate": 6e-05, "loss": 1.3228, "step": 175 }, { "epoch": 0.004268658707396785, "grad_norm": 1.9575989246368408, "learning_rate": 5.8e-05, "loss": 1.1391, "step": 176 }, { "epoch": 0.004292912450052449, "grad_norm": 1.1362651586532593, "learning_rate": 5.6000000000000006e-05, "loss": 1.5378, "step": 177 }, { "epoch": 0.004317166192708112, "grad_norm": 1.5834994316101074, "learning_rate": 5.4000000000000005e-05, "loss": 1.7488, "step": 178 }, { "epoch": 0.004341419935363776, "grad_norm": Infinity, "learning_rate": 5.4000000000000005e-05, "loss": 1.945, "step": 179 }, { "epoch": 0.00436567367801944, "grad_norm": 1.2279846668243408, "learning_rate": 5.2000000000000004e-05, "loss": 1.2098, "step": 180 }, { "epoch": 0.004389927420675103, "grad_norm": 0.9098599553108215, "learning_rate": 5e-05, "loss": 1.2899, "step": 181 }, { "epoch": 0.004414181163330766, "grad_norm": 1.4090386629104614, "learning_rate": 4.8e-05, "loss": 1.9194, "step": 182 }, { "epoch": 0.00443843490598643, "grad_norm": 0.9789434671401978, "learning_rate": 4.600000000000001e-05, "loss": 0.9409, "step": 183 }, { "epoch": 0.004462688648642093, "grad_norm": 0.8534678220748901, "learning_rate": 4.4000000000000006e-05, "loss": 1.3122, "step": 184 }, { "epoch": 0.004486942391297757, "grad_norm": 0.9604746103286743, "learning_rate": 4.2e-05, "loss": 1.2598, "step": 185 }, { "epoch": 0.004511196133953421, "grad_norm": 1.2235528230667114, "learning_rate": 4e-05, "loss": 1.1669, "step": 186 }, { "epoch": 0.004535449876609084, "grad_norm": 1.1576164960861206, "learning_rate": 3.8e-05, "loss": 1.2882, "step": 187 }, { "epoch": 0.004559703619264748, "grad_norm": 2.6080610752105713, "learning_rate": 3.6e-05, "loss": 1.7071, "step": 188 }, { "epoch": 0.004583957361920412, "grad_norm": 1.0198453664779663, "learning_rate": 3.4000000000000007e-05, "loss": 1.5359, "step": 189 }, { "epoch": 0.004608211104576075, "grad_norm": 0.536956250667572, "learning_rate": 3.2000000000000005e-05, "loss": 0.99, "step": 190 }, { "epoch": 0.004632464847231738, "grad_norm": 1.0942273139953613, "learning_rate": 3e-05, "loss": 1.2758, "step": 191 }, { "epoch": 0.004656718589887402, "grad_norm": 0.7441216707229614, "learning_rate": 2.8000000000000003e-05, "loss": 1.1892, "step": 192 }, { "epoch": 0.004680972332543065, "grad_norm": 1.032776951789856, "learning_rate": 2.6000000000000002e-05, "loss": 1.1905, "step": 193 }, { "epoch": 0.004705226075198729, "grad_norm": 1.4048435688018799, "learning_rate": 2.4e-05, "loss": 1.5178, "step": 194 }, { "epoch": 0.004729479817854393, "grad_norm": 1.3756521940231323, "learning_rate": 2.2000000000000003e-05, "loss": 1.4318, "step": 195 }, { "epoch": 0.004753733560510056, "grad_norm": 1.066291332244873, "learning_rate": 2e-05, "loss": 1.3325, "step": 196 }, { "epoch": 0.00477798730316572, "grad_norm": 1.1176751852035522, "learning_rate": 1.8e-05, "loss": 1.403, "step": 197 }, { "epoch": 0.004802241045821384, "grad_norm": 1.995957851409912, "learning_rate": 1.6000000000000003e-05, "loss": 1.3541, "step": 198 }, { "epoch": 0.004826494788477047, "grad_norm": 2.2354772090911865, "learning_rate": 1.4000000000000001e-05, "loss": 1.6087, "step": 199 }, { "epoch": 0.00485074853113271, "grad_norm": 19.62826919555664, "learning_rate": 1.2e-05, "loss": 1.8031, "step": 200 } ], "logging_steps": 1, "max_steps": 200, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 6033065352437760.0, "train_batch_size": 1, "trial_name": null, "trial_params": null }