| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.6954102920723226, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 1.9393939393939395e-05, | |
| "loss": 1.6312, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 1.8585858585858588e-05, | |
| "loss": 1.5425, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 1.7777777777777777e-05, | |
| "loss": 1.5173, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 1.6969696969696972e-05, | |
| "loss": 1.5135, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 1.616161616161616e-05, | |
| "loss": 1.4955, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 1.5353535353535354e-05, | |
| "loss": 1.4712, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 1.4545454545454546e-05, | |
| "loss": 1.4988, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 1.3737373737373739e-05, | |
| "loss": 1.5034, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 1.2929292929292931e-05, | |
| "loss": 1.5176, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 1.2121212121212122e-05, | |
| "loss": 1.4751, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 1.1313131313131314e-05, | |
| "loss": 1.4306, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 1.0505050505050507e-05, | |
| "loss": 1.4461, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 9.696969696969698e-06, | |
| "loss": 1.4442, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 8.888888888888888e-06, | |
| "loss": 1.4804, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 8.08080808080808e-06, | |
| "loss": 1.4752, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 7.272727272727273e-06, | |
| "loss": 1.4205, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 6.464646464646466e-06, | |
| "loss": 1.4609, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 5.656565656565657e-06, | |
| "loss": 1.4327, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 4.848484848484849e-06, | |
| "loss": 1.4209, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 4.04040404040404e-06, | |
| "loss": 1.4746, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 3.232323232323233e-06, | |
| "loss": 1.4307, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 2.4242424242424244e-06, | |
| "loss": 1.4377, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 1.6161616161616164e-06, | |
| "loss": 1.4378, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 8.080808080808082e-07, | |
| "loss": 1.4669, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 0.0, | |
| "loss": 1.4266, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 500, | |
| "num_train_epochs": 1, | |
| "save_steps": 20, | |
| "total_flos": 3.50548150714368e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |