| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.999829030603522, | |
| "eval_steps": 250, | |
| "global_step": 1462, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05060694135749701, | |
| "grad_norm": 2.899911403656006, | |
| "learning_rate": 4.965986394557823e-07, | |
| "loss": 0.4241, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.10121388271499401, | |
| "grad_norm": 3.3947112560272217, | |
| "learning_rate": 1e-06, | |
| "loss": 0.4599, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.15182082407249103, | |
| "grad_norm": 1.8692643642425537, | |
| "learning_rate": 9.437262357414448e-07, | |
| "loss": 0.4139, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.17096939647803044, | |
| "eval_loss": 0.31114912033081055, | |
| "eval_runtime": 28.2661, | |
| "eval_samples_per_second": 17.689, | |
| "eval_steps_per_second": 8.845, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.20242776542998803, | |
| "grad_norm": 2.969365119934082, | |
| "learning_rate": 8.874524714828897e-07, | |
| "loss": 0.3115, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.253034706787485, | |
| "grad_norm": 3.5337681770324707, | |
| "learning_rate": 8.311787072243346e-07, | |
| "loss": 0.2779, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.30364164814498207, | |
| "grad_norm": 1.5600870847702026, | |
| "learning_rate": 7.749049429657795e-07, | |
| "loss": 0.2133, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 0.3419387929560609, | |
| "eval_loss": 0.20003952085971832, | |
| "eval_runtime": 28.2792, | |
| "eval_samples_per_second": 17.681, | |
| "eval_steps_per_second": 8.84, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.35424858950247906, | |
| "grad_norm": 1.1956393718719482, | |
| "learning_rate": 7.186311787072243e-07, | |
| "loss": 0.1824, | |
| "step": 518 | |
| }, | |
| { | |
| "epoch": 0.40485553085997605, | |
| "grad_norm": 1.9002796411514282, | |
| "learning_rate": 6.623574144486692e-07, | |
| "loss": 0.1918, | |
| "step": 592 | |
| }, | |
| { | |
| "epoch": 0.45546247221747305, | |
| "grad_norm": 1.699954628944397, | |
| "learning_rate": 6.060836501901141e-07, | |
| "loss": 0.174, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 0.50606941357497, | |
| "grad_norm": 1.3418357372283936, | |
| "learning_rate": 5.498098859315589e-07, | |
| "loss": 0.1749, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.5129081894340913, | |
| "eval_loss": 0.1753847748041153, | |
| "eval_runtime": 28.226, | |
| "eval_samples_per_second": 17.714, | |
| "eval_steps_per_second": 8.857, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.5566763549324671, | |
| "grad_norm": 1.6452018022537231, | |
| "learning_rate": 4.935361216730038e-07, | |
| "loss": 0.1697, | |
| "step": 814 | |
| }, | |
| { | |
| "epoch": 0.6072832962899641, | |
| "grad_norm": 1.9864749908447266, | |
| "learning_rate": 4.372623574144487e-07, | |
| "loss": 0.1614, | |
| "step": 888 | |
| }, | |
| { | |
| "epoch": 0.6578902376474611, | |
| "grad_norm": 2.566192865371704, | |
| "learning_rate": 3.8098859315589356e-07, | |
| "loss": 0.161, | |
| "step": 962 | |
| }, | |
| { | |
| "epoch": 0.6838775859121218, | |
| "eval_loss": 0.1615215688943863, | |
| "eval_runtime": 28.1804, | |
| "eval_samples_per_second": 17.743, | |
| "eval_steps_per_second": 8.871, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7084971790049581, | |
| "grad_norm": 1.8036516904830933, | |
| "learning_rate": 3.247148288973384e-07, | |
| "loss": 0.1628, | |
| "step": 1036 | |
| }, | |
| { | |
| "epoch": 0.7591041203624551, | |
| "grad_norm": 2.3174946308135986, | |
| "learning_rate": 2.6844106463878326e-07, | |
| "loss": 0.1464, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.8097110617199521, | |
| "grad_norm": 1.3534860610961914, | |
| "learning_rate": 2.1216730038022811e-07, | |
| "loss": 0.1455, | |
| "step": 1184 | |
| }, | |
| { | |
| "epoch": 0.8548469823901521, | |
| "eval_loss": 0.15634691715240479, | |
| "eval_runtime": 28.2053, | |
| "eval_samples_per_second": 17.727, | |
| "eval_steps_per_second": 8.864, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.8603180030774491, | |
| "grad_norm": 3.979982852935791, | |
| "learning_rate": 1.55893536121673e-07, | |
| "loss": 0.136, | |
| "step": 1258 | |
| }, | |
| { | |
| "epoch": 0.9109249444349461, | |
| "grad_norm": 1.3337361812591553, | |
| "learning_rate": 9.961977186311786e-08, | |
| "loss": 0.1532, | |
| "step": 1332 | |
| }, | |
| { | |
| "epoch": 0.9615318857924432, | |
| "grad_norm": 3.147995948791504, | |
| "learning_rate": 4.3346007604562734e-08, | |
| "loss": 0.1387, | |
| "step": 1406 | |
| }, | |
| { | |
| "epoch": 0.999829030603522, | |
| "step": 1462, | |
| "total_flos": 9.926380124268134e+16, | |
| "train_loss": 0.21817047586193164, | |
| "train_runtime": 2253.2367, | |
| "train_samples_per_second": 5.191, | |
| "train_steps_per_second": 0.649 | |
| } | |
| ], | |
| "logging_steps": 74, | |
| "max_steps": 1462, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9.926380124268134e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |