| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.9921414538310414, | |
| "eval_steps": 50000, | |
| "global_step": 1016, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03929273084479371, | |
| "grad_norm": 4.240847110748291, | |
| "learning_rate": 1.6129032258064516e-06, | |
| "loss": 0.8413, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.07858546168958742, | |
| "grad_norm": 2.775519371032715, | |
| "learning_rate": 3.225806451612903e-06, | |
| "loss": 0.8393, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.11787819253438114, | |
| "grad_norm": 1.6855406761169434, | |
| "learning_rate": 4.838709677419355e-06, | |
| "loss": 0.6735, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.15717092337917485, | |
| "grad_norm": 1.2230864763259888, | |
| "learning_rate": 4.998970106077018e-06, | |
| "loss": 0.5542, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.19646365422396855, | |
| "grad_norm": 1.1485282182693481, | |
| "learning_rate": 4.99541106832608e-06, | |
| "loss": 0.5265, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.2357563850687623, | |
| "grad_norm": 1.0626362562179565, | |
| "learning_rate": 4.989313791265896e-06, | |
| "loss": 0.6019, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.275049115913556, | |
| "grad_norm": 1.3240628242492676, | |
| "learning_rate": 4.9806844768198724e-06, | |
| "loss": 0.5705, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.3143418467583497, | |
| "grad_norm": 1.8527514934539795, | |
| "learning_rate": 4.969531902405652e-06, | |
| "loss": 0.5798, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.35363457760314343, | |
| "grad_norm": 1.7867568731307983, | |
| "learning_rate": 4.955867412007052e-06, | |
| "loss": 0.5768, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.3929273084479371, | |
| "grad_norm": 1.3669260740280151, | |
| "learning_rate": 4.939704904635388e-06, | |
| "loss": 0.5416, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.43222003929273084, | |
| "grad_norm": 1.1892198324203491, | |
| "learning_rate": 4.921060820191909e-06, | |
| "loss": 0.5135, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.4715127701375246, | |
| "grad_norm": 1.509191632270813, | |
| "learning_rate": 4.8999541227457514e-06, | |
| "loss": 0.5646, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.5108055009823183, | |
| "grad_norm": 1.106804609298706, | |
| "learning_rate": 4.8764062812443875e-06, | |
| "loss": 0.512, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.550098231827112, | |
| "grad_norm": 1.2564148902893066, | |
| "learning_rate": 4.8504412476762105e-06, | |
| "loss": 0.5396, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.5893909626719057, | |
| "grad_norm": 1.2446550130844116, | |
| "learning_rate": 4.822085432707465e-06, | |
| "loss": 0.5246, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.6286836935166994, | |
| "grad_norm": 0.9773355722427368, | |
| "learning_rate": 4.791367678818299e-06, | |
| "loss": 0.498, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.6679764243614931, | |
| "grad_norm": 1.3318967819213867, | |
| "learning_rate": 4.758319230965267e-06, | |
| "loss": 0.5106, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.7072691552062869, | |
| "grad_norm": 1.806362271308899, | |
| "learning_rate": 4.72297370480012e-06, | |
| "loss": 0.4969, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.7465618860510805, | |
| "grad_norm": 1.4682146310806274, | |
| "learning_rate": 4.685367052477218e-06, | |
| "loss": 0.4937, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.7858546168958742, | |
| "grad_norm": 1.461862325668335, | |
| "learning_rate": 4.645537526084331e-06, | |
| "loss": 0.4722, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.825147347740668, | |
| "grad_norm": 1.3028712272644043, | |
| "learning_rate": 4.603525638734049e-06, | |
| "loss": 0.5044, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.8644400785854617, | |
| "grad_norm": 1.2041152715682983, | |
| "learning_rate": 4.559374123355337e-06, | |
| "loss": 0.5001, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.9037328094302554, | |
| "grad_norm": 1.3553584814071655, | |
| "learning_rate": 4.5131278892272e-06, | |
| "loss": 0.4411, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.9430255402750491, | |
| "grad_norm": 1.3149176836013794, | |
| "learning_rate": 4.46483397629863e-06, | |
| "loss": 0.4714, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.9823182711198428, | |
| "grad_norm": 1.2405178546905518, | |
| "learning_rate": 4.414541507341323e-06, | |
| "loss": 0.4632, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.0216110019646365, | |
| "grad_norm": 1.4180119037628174, | |
| "learning_rate": 4.362301637983815e-06, | |
| "loss": 0.4177, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.0609037328094302, | |
| "grad_norm": 1.6266894340515137, | |
| "learning_rate": 4.308167504677893e-06, | |
| "loss": 0.404, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.1001964636542239, | |
| "grad_norm": 1.8860481977462769, | |
| "learning_rate": 4.2521941706501625e-06, | |
| "loss": 0.385, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.1394891944990178, | |
| "grad_norm": 1.4382065534591675, | |
| "learning_rate": 4.194438569893784e-06, | |
| "loss": 0.3881, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.1787819253438114, | |
| "grad_norm": 1.4116265773773193, | |
| "learning_rate": 4.134959449257335e-06, | |
| "loss": 0.3739, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.218074656188605, | |
| "grad_norm": 1.192594289779663, | |
| "learning_rate": 4.0738173086896995e-06, | |
| "loss": 0.3687, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.2573673870333988, | |
| "grad_norm": 1.2677867412567139, | |
| "learning_rate": 4.011074339701772e-06, | |
| "loss": 0.3822, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.2966601178781925, | |
| "grad_norm": 1.2392891645431519, | |
| "learning_rate": 3.946794362107564e-06, | |
| "loss": 0.3787, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.3359528487229864, | |
| "grad_norm": 1.810904622077942, | |
| "learning_rate": 3.8810427591090635e-06, | |
| "loss": 0.4118, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.37524557956778, | |
| "grad_norm": 1.9088846445083618, | |
| "learning_rate": 3.813886410790879e-06, | |
| "loss": 0.3854, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.4145383104125737, | |
| "grad_norm": 1.1440798044204712, | |
| "learning_rate": 3.7453936260922983e-06, | |
| "loss": 0.3636, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.4538310412573674, | |
| "grad_norm": 1.5531620979309082, | |
| "learning_rate": 3.675634073325981e-06, | |
| "loss": 0.4106, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.493123772102161, | |
| "grad_norm": 1.4519999027252197, | |
| "learning_rate": 3.6046787093139415e-06, | |
| "loss": 0.3809, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.5324165029469548, | |
| "grad_norm": 1.8616143465042114, | |
| "learning_rate": 3.5325997072129066e-06, | |
| "loss": 0.3773, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.5717092337917484, | |
| "grad_norm": 1.5181927680969238, | |
| "learning_rate": 3.4594703831024723e-06, | |
| "loss": 0.394, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.611001964636542, | |
| "grad_norm": 1.901669979095459, | |
| "learning_rate": 3.385365121410706e-06, | |
| "loss": 0.366, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.650294695481336, | |
| "grad_norm": 1.4574675559997559, | |
| "learning_rate": 3.310359299253082e-06, | |
| "loss": 0.3554, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.6895874263261297, | |
| "grad_norm": 1.6998835802078247, | |
| "learning_rate": 3.234529209761676e-06, | |
| "loss": 0.3888, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.7288801571709234, | |
| "grad_norm": 1.4896142482757568, | |
| "learning_rate": 3.157951984482635e-06, | |
| "loss": 0.3853, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.768172888015717, | |
| "grad_norm": 1.1636215448379517, | |
| "learning_rate": 3.080705514920836e-06, | |
| "loss": 0.4516, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.807465618860511, | |
| "grad_norm": 1.5409859418869019, | |
| "learning_rate": 3.0028683733115417e-06, | |
| "loss": 0.3768, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.8467583497053046, | |
| "grad_norm": 1.0910699367523193, | |
| "learning_rate": 2.9245197326996515e-06, | |
| "loss": 0.3674, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.8860510805500983, | |
| "grad_norm": 1.6574006080627441, | |
| "learning_rate": 2.845739286407821e-06, | |
| "loss": 0.3872, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.925343811394892, | |
| "grad_norm": 1.2492740154266357, | |
| "learning_rate": 2.7666071669753807e-06, | |
| "loss": 0.3565, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.9646365422396856, | |
| "grad_norm": 1.5630059242248535, | |
| "learning_rate": 2.687203864650497e-06, | |
| "loss": 0.3736, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.0039292730844793, | |
| "grad_norm": 1.4160641431808472, | |
| "learning_rate": 2.6076101455184867e-06, | |
| "loss": 0.3559, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 2.043222003929273, | |
| "grad_norm": 1.2741782665252686, | |
| "learning_rate": 2.527906969349559e-06, | |
| "loss": 0.3241, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 2.0825147347740667, | |
| "grad_norm": 1.7951622009277344, | |
| "learning_rate": 2.44817540724955e-06, | |
| "loss": 0.2905, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 2.1218074656188604, | |
| "grad_norm": 2.1442809104919434, | |
| "learning_rate": 2.3684965591974084e-06, | |
| "loss": 0.3063, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.161100196463654, | |
| "grad_norm": 1.2380913496017456, | |
| "learning_rate": 2.288951471553316e-06, | |
| "loss": 0.3883, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 2.2003929273084477, | |
| "grad_norm": 1.4480243921279907, | |
| "learning_rate": 2.2096210546213397e-06, | |
| "loss": 0.2879, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 2.239685658153242, | |
| "grad_norm": 1.1371639966964722, | |
| "learning_rate": 2.130586000350486e-06, | |
| "loss": 0.288, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 2.2789783889980355, | |
| "grad_norm": 2.1175849437713623, | |
| "learning_rate": 2.0519267002578517e-06, | |
| "loss": 0.2835, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 2.318271119842829, | |
| "grad_norm": 2.0246710777282715, | |
| "learning_rate": 1.9737231636573595e-06, | |
| "loss": 0.3274, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 2.357563850687623, | |
| "grad_norm": 1.6658718585968018, | |
| "learning_rate": 1.8960549362772618e-06, | |
| "loss": 0.284, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.3968565815324165, | |
| "grad_norm": 2.166947364807129, | |
| "learning_rate": 1.8190010193491869e-06, | |
| "loss": 0.3257, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 2.43614931237721, | |
| "grad_norm": 1.4202911853790283, | |
| "learning_rate": 1.7426397892510244e-06, | |
| "loss": 0.3166, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 2.475442043222004, | |
| "grad_norm": 1.6804779767990112, | |
| "learning_rate": 1.6670489177853854e-06, | |
| "loss": 0.3084, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 2.5147347740667976, | |
| "grad_norm": 1.5360515117645264, | |
| "learning_rate": 1.5923052931747408e-06, | |
| "loss": 0.2706, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 2.5540275049115913, | |
| "grad_norm": 1.8666387796401978, | |
| "learning_rate": 1.5184849418535763e-06, | |
| "loss": 0.3063, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 2.593320235756385, | |
| "grad_norm": 1.715324878692627, | |
| "learning_rate": 1.4456629511371346e-06, | |
| "loss": 0.2612, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 2.6326129666011786, | |
| "grad_norm": 1.3589706420898438, | |
| "learning_rate": 1.3739133928453884e-06, | |
| "loss": 0.2843, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 2.6719056974459727, | |
| "grad_norm": 1.6187039613723755, | |
| "learning_rate": 1.3033092479599453e-06, | |
| "loss": 0.2794, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 2.711198428290766, | |
| "grad_norm": 1.5836079120635986, | |
| "learning_rate": 1.2339223323905026e-06, | |
| "loss": 0.2591, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 2.75049115913556, | |
| "grad_norm": 1.5338759422302246, | |
| "learning_rate": 1.1658232239263815e-06, | |
| "loss": 0.2775, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 2.7897838899803538, | |
| "grad_norm": 1.7079037427902222, | |
| "learning_rate": 1.099081190447418e-06, | |
| "loss": 0.2829, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 2.8290766208251474, | |
| "grad_norm": 1.3403918743133545, | |
| "learning_rate": 1.0337641194672608e-06, | |
| "loss": 0.278, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 2.868369351669941, | |
| "grad_norm": 1.9498125314712524, | |
| "learning_rate": 9.699384490807113e-07, | |
| "loss": 0.303, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 2.907662082514735, | |
| "grad_norm": 1.7675586938858032, | |
| "learning_rate": 9.076691003853666e-07, | |
| "loss": 0.2701, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 2.9469548133595285, | |
| "grad_norm": 1.6067301034927368, | |
| "learning_rate": 8.470194114462965e-07, | |
| "loss": 0.2734, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.986247544204322, | |
| "grad_norm": 1.7567888498306274, | |
| "learning_rate": 7.880510728709176e-07, | |
| "loss": 0.3013, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 3.025540275049116, | |
| "grad_norm": 1.446516990661621, | |
| "learning_rate": 7.30824065059603e-07, | |
| "loss": 0.2958, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 3.0648330058939095, | |
| "grad_norm": 2.272646427154541, | |
| "learning_rate": 6.753965971958487e-07, | |
| "loss": 0.2528, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 3.104125736738703, | |
| "grad_norm": 1.5716464519500732, | |
| "learning_rate": 6.218250480380611e-07, | |
| "loss": 0.257, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 3.143418467583497, | |
| "grad_norm": 1.6481252908706665, | |
| "learning_rate": 5.701639085731786e-07, | |
| "loss": 0.2777, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 3.182711198428291, | |
| "grad_norm": 1.6308358907699585, | |
| "learning_rate": 5.204657265904664e-07, | |
| "loss": 0.2403, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 3.2220039292730847, | |
| "grad_norm": 1.8249069452285767, | |
| "learning_rate": 4.7278105323186244e-07, | |
| "loss": 0.264, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 3.2612966601178783, | |
| "grad_norm": 1.533990740776062, | |
| "learning_rate": 4.2715839157323384e-07, | |
| "loss": 0.236, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 3.300589390962672, | |
| "grad_norm": 2.055413007736206, | |
| "learning_rate": 3.8364414728885414e-07, | |
| "loss": 0.2239, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 3.3398821218074657, | |
| "grad_norm": 1.7431176900863647, | |
| "learning_rate": 3.4228258144927587e-07, | |
| "loss": 0.2219, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 3.3791748526522594, | |
| "grad_norm": 1.1028379201889038, | |
| "learning_rate": 3.031157655006187e-07, | |
| "loss": 0.2589, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 3.418467583497053, | |
| "grad_norm": 1.7137134075164795, | |
| "learning_rate": 2.6618353847105703e-07, | |
| "loss": 0.2515, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 3.4577603143418467, | |
| "grad_norm": 1.3465744256973267, | |
| "learning_rate": 2.315234664480448e-07, | |
| "loss": 0.2438, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 3.4970530451866404, | |
| "grad_norm": 1.597948431968689, | |
| "learning_rate": 1.9917080436748615e-07, | |
| "loss": 0.2253, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 3.536345776031434, | |
| "grad_norm": 1.588294506072998, | |
| "learning_rate": 1.6915846015372855e-07, | |
| "loss": 0.2254, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 3.5756385068762278, | |
| "grad_norm": 1.6670293807983398, | |
| "learning_rate": 1.4151696124684504e-07, | |
| "loss": 0.2322, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 3.614931237721022, | |
| "grad_norm": 1.8248482942581177, | |
| "learning_rate": 1.1627442355125834e-07, | |
| "loss": 0.2412, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 3.654223968565815, | |
| "grad_norm": 2.2712173461914062, | |
| "learning_rate": 9.345652283728828e-08, | |
| "loss": 0.2338, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 3.6935166994106092, | |
| "grad_norm": 1.6087329387664795, | |
| "learning_rate": 7.308646862471319e-08, | |
| "loss": 0.2435, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 3.732809430255403, | |
| "grad_norm": 2.1418673992156982, | |
| "learning_rate": 5.5184980574911617e-08, | |
| "loss": 0.253, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 3.7721021611001966, | |
| "grad_norm": 1.753722071647644, | |
| "learning_rate": 3.9770267415590866e-08, | |
| "loss": 0.2306, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 3.8113948919449903, | |
| "grad_norm": 2.183093309402466, | |
| "learning_rate": 2.6858008419548e-08, | |
| "loss": 0.2403, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 3.850687622789784, | |
| "grad_norm": 2.0719070434570312, | |
| "learning_rate": 1.6461337456297187e-08, | |
| "loss": 0.2371, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 3.8899803536345776, | |
| "grad_norm": 1.8452452421188354, | |
| "learning_rate": 8.590829632785513e-09, | |
| "loss": 0.2388, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 3.9292730844793713, | |
| "grad_norm": 2.089444875717163, | |
| "learning_rate": 3.2544905367876134e-09, | |
| "loss": 0.2568, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 3.968565815324165, | |
| "grad_norm": 1.6948010921478271, | |
| "learning_rate": 4.577480939194079e-10, | |
| "loss": 0.3124, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 3.9921414538310414, | |
| "step": 1016, | |
| "total_flos": 1.8674965730112307e+17, | |
| "train_loss": 0.3695383257283939, | |
| "train_runtime": 3236.8261, | |
| "train_samples_per_second": 5.023, | |
| "train_steps_per_second": 0.314 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1016, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.8674965730112307e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |