| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9970326409495549, | |
| "eval_steps": 500, | |
| "global_step": 168, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.005934718100890208, | |
| "grad_norm": 3.172736644744873, | |
| "learning_rate": 1.1764705882352942e-05, | |
| "loss": 1.7951, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.02967359050445104, | |
| "grad_norm": 3.245225429534912, | |
| "learning_rate": 5.882352941176471e-05, | |
| "loss": 1.8001, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.05934718100890208, | |
| "grad_norm": 2.2378523349761963, | |
| "learning_rate": 0.00011764705882352942, | |
| "loss": 1.7245, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.08902077151335312, | |
| "grad_norm": 2.2833542823791504, | |
| "learning_rate": 0.00017647058823529413, | |
| "loss": 1.6177, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.11869436201780416, | |
| "grad_norm": 2.2818639278411865, | |
| "learning_rate": 0.00019980527694749952, | |
| "loss": 1.4883, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.14836795252225518, | |
| "grad_norm": 2.0385029315948486, | |
| "learning_rate": 0.00019861804788521493, | |
| "loss": 1.3673, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.17804154302670624, | |
| "grad_norm": 1.0247116088867188, | |
| "learning_rate": 0.00019636458959356316, | |
| "loss": 1.3097, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.20771513353115728, | |
| "grad_norm": 0.694072961807251, | |
| "learning_rate": 0.00019306926579854821, | |
| "loss": 1.2812, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.23738872403560832, | |
| "grad_norm": 0.7701340913772583, | |
| "learning_rate": 0.00018876770456851877, | |
| "loss": 1.2651, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.26706231454005935, | |
| "grad_norm": 0.7059149742126465, | |
| "learning_rate": 0.00018350641311400812, | |
| "loss": 1.2556, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.29673590504451036, | |
| "grad_norm": 0.8174459338188171, | |
| "learning_rate": 0.0001773422749654988, | |
| "loss": 1.2434, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3264094955489614, | |
| "grad_norm": 0.999190628528595, | |
| "learning_rate": 0.00017034193496547902, | |
| "loss": 1.2444, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.3560830860534125, | |
| "grad_norm": 0.6954330801963806, | |
| "learning_rate": 0.00016258107872407375, | |
| "loss": 1.2284, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.3857566765578635, | |
| "grad_norm": 0.64048171043396, | |
| "learning_rate": 0.00015414361432856475, | |
| "loss": 1.2226, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.41543026706231456, | |
| "grad_norm": 0.6632175445556641, | |
| "learning_rate": 0.00014512076515391375, | |
| "loss": 1.2228, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.44510385756676557, | |
| "grad_norm": 0.7728683948516846, | |
| "learning_rate": 0.00013561008358255468, | |
| "loss": 1.2283, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.47477744807121663, | |
| "grad_norm": 0.6928794384002686, | |
| "learning_rate": 0.0001257143962968246, | |
| "loss": 1.2079, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5044510385756676, | |
| "grad_norm": 0.6514697074890137, | |
| "learning_rate": 0.00011554069254722051, | |
| "loss": 1.2187, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.5341246290801187, | |
| "grad_norm": 0.6465914249420166, | |
| "learning_rate": 0.00010519896741619803, | |
| "loss": 1.2216, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.5637982195845698, | |
| "grad_norm": 0.7518061399459839, | |
| "learning_rate": 9.480103258380198e-05, | |
| "loss": 1.2095, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.5934718100890207, | |
| "grad_norm": 0.7165743112564087, | |
| "learning_rate": 8.445930745277953e-05, | |
| "loss": 1.2197, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.6231454005934718, | |
| "grad_norm": 0.6865619421005249, | |
| "learning_rate": 7.428560370317542e-05, | |
| "loss": 1.2122, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.6528189910979229, | |
| "grad_norm": 0.6512064933776855, | |
| "learning_rate": 6.43899164174453e-05, | |
| "loss": 1.1987, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.6824925816023739, | |
| "grad_norm": 0.720739483833313, | |
| "learning_rate": 5.487923484608629e-05, | |
| "loss": 1.2059, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.712166172106825, | |
| "grad_norm": 0.7217025756835938, | |
| "learning_rate": 4.585638567143529e-05, | |
| "loss": 1.205, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.7418397626112759, | |
| "grad_norm": 0.6354172229766846, | |
| "learning_rate": 3.741892127592625e-05, | |
| "loss": 1.2139, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.771513353115727, | |
| "grad_norm": 0.6358697414398193, | |
| "learning_rate": 2.9658065034520978e-05, | |
| "loss": 1.2008, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.8011869436201781, | |
| "grad_norm": 0.6895249485969543, | |
| "learning_rate": 2.265772503450122e-05, | |
| "loss": 1.1978, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.8308605341246291, | |
| "grad_norm": 0.6897445321083069, | |
| "learning_rate": 1.649358688599191e-05, | |
| "loss": 1.2049, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.8605341246290801, | |
| "grad_norm": 0.6667594909667969, | |
| "learning_rate": 1.1232295431481222e-05, | |
| "loss": 1.2018, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.8902077151335311, | |
| "grad_norm": 0.6951190829277039, | |
| "learning_rate": 6.930734201451816e-06, | |
| "loss": 1.205, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.9198813056379822, | |
| "grad_norm": 0.7803878784179688, | |
| "learning_rate": 3.6354104064368566e-06, | |
| "loss": 1.1999, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.9495548961424333, | |
| "grad_norm": 0.6693819761276245, | |
| "learning_rate": 1.3819521147851123e-06, | |
| "loss": 1.1995, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.9792284866468842, | |
| "grad_norm": 0.6466339230537415, | |
| "learning_rate": 1.947230525005006e-07, | |
| "loss": 1.1865, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.9970326409495549, | |
| "eval_loss": 1.8819891214370728, | |
| "eval_runtime": 0.6391, | |
| "eval_samples_per_second": 21.907, | |
| "eval_steps_per_second": 1.565, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.9970326409495549, | |
| "step": 168, | |
| "total_flos": 8.227900459906499e+17, | |
| "train_loss": 1.2774092711153484, | |
| "train_runtime": 653.4451, | |
| "train_samples_per_second": 57.691, | |
| "train_steps_per_second": 0.257 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 168, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.227900459906499e+17, | |
| "train_batch_size": 14, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |