| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9966555183946488, | |
| "eval_steps": 500, | |
| "global_step": 149, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.006688963210702341, | |
| "grad_norm": 978.0668466051485, | |
| "learning_rate": 1.3333333333333334e-06, | |
| "loss": 20.8322, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.033444816053511704, | |
| "grad_norm": 92.43811540859383, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 16.773, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.06688963210702341, | |
| "grad_norm": 39.36047324647118, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 12.4236, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.10033444816053512, | |
| "grad_norm": 42.841807816592016, | |
| "learning_rate": 2e-05, | |
| "loss": 5.6537, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.13377926421404682, | |
| "grad_norm": 21.70350414016095, | |
| "learning_rate": 1.9931371771625545e-05, | |
| "loss": 1.4611, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.16722408026755853, | |
| "grad_norm": 31.228847450562586, | |
| "learning_rate": 1.972642905324813e-05, | |
| "loss": 1.3315, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.20066889632107024, | |
| "grad_norm": 4.970027232715837, | |
| "learning_rate": 1.9387984816003868e-05, | |
| "loss": 1.0513, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.23411371237458195, | |
| "grad_norm": 11.692501654577818, | |
| "learning_rate": 1.8920684425573865e-05, | |
| "loss": 0.987, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.26755852842809363, | |
| "grad_norm": 6.962292326187154, | |
| "learning_rate": 1.8330941881540917e-05, | |
| "loss": 0.8308, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3010033444816054, | |
| "grad_norm": 5.124735579369663, | |
| "learning_rate": 1.762685178110382e-05, | |
| "loss": 0.7973, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.33444816053511706, | |
| "grad_norm": 13.155611885213123, | |
| "learning_rate": 1.681807821550438e-05, | |
| "loss": 0.7899, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.36789297658862874, | |
| "grad_norm": 2.6222582876110594, | |
| "learning_rate": 1.5915722124135227e-05, | |
| "loss": 0.6927, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.4013377926421405, | |
| "grad_norm": 1.8619066028708027, | |
| "learning_rate": 1.4932168926979074e-05, | |
| "loss": 0.6164, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.43478260869565216, | |
| "grad_norm": 15.496063033909833, | |
| "learning_rate": 1.3880918526722497e-05, | |
| "loss": 0.59, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.4682274247491639, | |
| "grad_norm": 3.0474302842682737, | |
| "learning_rate": 1.2776400013875006e-05, | |
| "loss": 0.6328, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5016722408026756, | |
| "grad_norm": 1.950325828207922, | |
| "learning_rate": 1.1633773618185302e-05, | |
| "loss": 0.5833, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.5351170568561873, | |
| "grad_norm": 13.530105007326785, | |
| "learning_rate": 1.0468722624699401e-05, | |
| "loss": 0.5703, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.568561872909699, | |
| "grad_norm": 10.101684609706085, | |
| "learning_rate": 9.297238110547075e-06, | |
| "loss": 0.5452, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.6020066889632107, | |
| "grad_norm": 3.6089913113064314, | |
| "learning_rate": 8.13539945708319e-06, | |
| "loss": 0.5301, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.6354515050167224, | |
| "grad_norm": 6.774403531621821, | |
| "learning_rate": 6.999153649996595e-06, | |
| "loss": 0.5205, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.6688963210702341, | |
| "grad_norm": 5.1612228146988395, | |
| "learning_rate": 5.904096396634935e-06, | |
| "loss": 0.5005, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7023411371237458, | |
| "grad_norm": 3.0161450951447075, | |
| "learning_rate": 4.865258064851579e-06, | |
| "loss": 0.5017, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.7357859531772575, | |
| "grad_norm": 5.200183890447438, | |
| "learning_rate": 3.896897381502081e-06, | |
| "loss": 0.496, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.7692307692307693, | |
| "grad_norm": 1.8999496228689696, | |
| "learning_rate": 3.0123057222115835e-06, | |
| "loss": 0.4896, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.802675585284281, | |
| "grad_norm": 1.779570100471913, | |
| "learning_rate": 2.2236246786624794e-06, | |
| "loss": 0.4857, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.8361204013377926, | |
| "grad_norm": 1.4437199902884683, | |
| "learning_rate": 1.5416794074090258e-06, | |
| "loss": 0.4809, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.8695652173913043, | |
| "grad_norm": 0.9781902783471523, | |
| "learning_rate": 9.75830047614117e-07, | |
| "loss": 0.4762, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.903010033444816, | |
| "grad_norm": 1.2437451839153923, | |
| "learning_rate": 5.33843247095659e-07, | |
| "loss": 0.4848, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.9364548494983278, | |
| "grad_norm": 0.6199236577734613, | |
| "learning_rate": 2.2178556007054876e-07, | |
| "loss": 0.4726, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.9698996655518395, | |
| "grad_norm": 0.4737959382941702, | |
| "learning_rate": 4.394017978101905e-08, | |
| "loss": 0.4759, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.9966555183946488, | |
| "eval_loss": 1.360945463180542, | |
| "eval_runtime": 0.4572, | |
| "eval_samples_per_second": 21.873, | |
| "eval_steps_per_second": 2.187, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.9966555183946488, | |
| "step": 149, | |
| "total_flos": 20483974103040.0, | |
| "train_loss": 1.7931190865151834, | |
| "train_runtime": 1036.7998, | |
| "train_samples_per_second": 36.899, | |
| "train_steps_per_second": 0.144 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 149, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 20483974103040.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |