temp / trainer_state.json
thanhdath's picture
Upload folder using huggingface_hub
e8b00e8 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 171,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03549639489739324,
"grad_norm": 127.17037963867188,
"learning_rate": 5.555555555555555e-07,
"loss": 3.1202,
"step": 2
},
{
"epoch": 0.07099278979478647,
"grad_norm": 127.35641479492188,
"learning_rate": 1.6666666666666667e-06,
"loss": 3.1169,
"step": 4
},
{
"epoch": 0.1064891846921797,
"grad_norm": 94.25624084472656,
"learning_rate": 2.7777777777777783e-06,
"loss": 2.8233,
"step": 6
},
{
"epoch": 0.14198557958957295,
"grad_norm": 17.775108337402344,
"learning_rate": 3.88888888888889e-06,
"loss": 1.8356,
"step": 8
},
{
"epoch": 0.17748197448696618,
"grad_norm": 10.735865592956543,
"learning_rate": 5e-06,
"loss": 1.5481,
"step": 10
},
{
"epoch": 0.2129783693843594,
"grad_norm": 7.582458972930908,
"learning_rate": 6.111111111111112e-06,
"loss": 1.2527,
"step": 12
},
{
"epoch": 0.24847476428175264,
"grad_norm": 5.5655999183654785,
"learning_rate": 7.222222222222223e-06,
"loss": 1.0648,
"step": 14
},
{
"epoch": 0.2839711591791459,
"grad_norm": 4.178959369659424,
"learning_rate": 8.333333333333334e-06,
"loss": 0.8722,
"step": 16
},
{
"epoch": 0.3194675540765391,
"grad_norm": 2.8051388263702393,
"learning_rate": 9.444444444444445e-06,
"loss": 0.6238,
"step": 18
},
{
"epoch": 0.35496394897393235,
"grad_norm": 2.487281322479248,
"learning_rate": 9.998945997517957e-06,
"loss": 0.493,
"step": 20
},
{
"epoch": 0.39046034387132555,
"grad_norm": 2.143423557281494,
"learning_rate": 9.990516643685222e-06,
"loss": 0.4248,
"step": 22
},
{
"epoch": 0.4259567387687188,
"grad_norm": 1.8732106685638428,
"learning_rate": 9.973672149817232e-06,
"loss": 0.2676,
"step": 24
},
{
"epoch": 0.461453133666112,
"grad_norm": 1.6402384042739868,
"learning_rate": 9.948440919541277e-06,
"loss": 0.2245,
"step": 26
},
{
"epoch": 0.49694952856350527,
"grad_norm": 1.3924551010131836,
"learning_rate": 9.91486549841951e-06,
"loss": 0.1811,
"step": 28
},
{
"epoch": 0.5324459234608985,
"grad_norm": 2.457998037338257,
"learning_rate": 9.873002502207502e-06,
"loss": 0.1166,
"step": 30
},
{
"epoch": 0.5679423183582918,
"grad_norm": 1.0437461137771606,
"learning_rate": 9.822922521387277e-06,
"loss": 0.1204,
"step": 32
},
{
"epoch": 0.603438713255685,
"grad_norm": 3.6355228424072266,
"learning_rate": 9.764710002135784e-06,
"loss": 0.1161,
"step": 34
},
{
"epoch": 0.6389351081530782,
"grad_norm": 1.3620433807373047,
"learning_rate": 9.698463103929542e-06,
"loss": 0.1179,
"step": 36
},
{
"epoch": 0.6744315030504714,
"grad_norm": 0.5827310681343079,
"learning_rate": 9.62429353402556e-06,
"loss": 0.0806,
"step": 38
},
{
"epoch": 0.7099278979478647,
"grad_norm": 0.9769260883331299,
"learning_rate": 9.542326359097619e-06,
"loss": 0.0941,
"step": 40
},
{
"epoch": 0.7454242928452579,
"grad_norm": 0.6220430731773376,
"learning_rate": 9.452699794345583e-06,
"loss": 0.0761,
"step": 42
},
{
"epoch": 0.7809206877426511,
"grad_norm": 0.5837266445159912,
"learning_rate": 9.355564970433288e-06,
"loss": 0.068,
"step": 44
},
{
"epoch": 0.8164170826400444,
"grad_norm": 0.445272296667099,
"learning_rate": 9.251085678648072e-06,
"loss": 0.0642,
"step": 46
},
{
"epoch": 0.8519134775374376,
"grad_norm": 0.4069404900074005,
"learning_rate": 9.13943809471159e-06,
"loss": 0.0526,
"step": 48
},
{
"epoch": 0.8874098724348308,
"grad_norm": 0.47115278244018555,
"learning_rate": 9.020810481707709e-06,
"loss": 0.049,
"step": 50
},
{
"epoch": 0.922906267332224,
"grad_norm": 0.9125522375106812,
"learning_rate": 8.895402872628352e-06,
"loss": 0.0547,
"step": 52
},
{
"epoch": 0.9584026622296173,
"grad_norm": 0.3842329978942871,
"learning_rate": 8.763426733072624e-06,
"loss": 0.0449,
"step": 54
},
{
"epoch": 0.9938990571270105,
"grad_norm": 0.7765663266181946,
"learning_rate": 8.625104604667965e-06,
"loss": 0.0618,
"step": 56
},
{
"epoch": 1.0177481974486966,
"grad_norm": 0.39780569076538086,
"learning_rate": 8.480669729814635e-06,
"loss": 0.0477,
"step": 58
},
{
"epoch": 1.0532445923460898,
"grad_norm": 0.3841244578361511,
"learning_rate": 8.330365658386252e-06,
"loss": 0.0412,
"step": 60
},
{
"epoch": 1.088740987243483,
"grad_norm": 0.39046695828437805,
"learning_rate": 8.174445837049614e-06,
"loss": 0.0366,
"step": 62
},
{
"epoch": 1.1242373821408762,
"grad_norm": 0.35411760210990906,
"learning_rate": 8.013173181896283e-06,
"loss": 0.0381,
"step": 64
},
{
"epoch": 1.1597337770382696,
"grad_norm": 0.37250956892967224,
"learning_rate": 7.846819635106569e-06,
"loss": 0.0379,
"step": 66
},
{
"epoch": 1.1952301719356628,
"grad_norm": 0.4131050407886505,
"learning_rate": 7.675665706393502e-06,
"loss": 0.0381,
"step": 68
},
{
"epoch": 1.230726566833056,
"grad_norm": 0.4184521734714508,
"learning_rate": 7.500000000000001e-06,
"loss": 0.0348,
"step": 70
},
{
"epoch": 1.2662229617304492,
"grad_norm": 0.3667065501213074,
"learning_rate": 7.320118728046818e-06,
"loss": 0.0357,
"step": 72
},
{
"epoch": 1.3017193566278424,
"grad_norm": 0.307443231344223,
"learning_rate": 7.136325211051905e-06,
"loss": 0.0344,
"step": 74
},
{
"epoch": 1.3372157515252356,
"grad_norm": 0.3104756474494934,
"learning_rate": 6.948929366463397e-06,
"loss": 0.037,
"step": 76
},
{
"epoch": 1.372712146422629,
"grad_norm": 0.4366794228553772,
"learning_rate": 6.758247186068684e-06,
"loss": 0.0387,
"step": 78
},
{
"epoch": 1.4082085413200223,
"grad_norm": 0.32028865814208984,
"learning_rate": 6.5646002031607726e-06,
"loss": 0.0313,
"step": 80
},
{
"epoch": 1.4437049362174155,
"grad_norm": 0.2846560776233673,
"learning_rate": 6.368314950360416e-06,
"loss": 0.0348,
"step": 82
},
{
"epoch": 1.4792013311148087,
"grad_norm": 0.47968789935112,
"learning_rate": 6.169722409008244e-06,
"loss": 0.0302,
"step": 84
},
{
"epoch": 1.5146977260122019,
"grad_norm": 0.6367243528366089,
"learning_rate": 5.9691574510553505e-06,
"loss": 0.0335,
"step": 86
},
{
"epoch": 1.550194120909595,
"grad_norm": 0.25928691029548645,
"learning_rate": 5.766958274393428e-06,
"loss": 0.0334,
"step": 88
},
{
"epoch": 1.5856905158069883,
"grad_norm": 4.997822284698486,
"learning_rate": 5.5634658325766066e-06,
"loss": 0.0272,
"step": 90
},
{
"epoch": 1.6211869107043815,
"grad_norm": 0.2188279628753662,
"learning_rate": 5.359023259896638e-06,
"loss": 0.0269,
"step": 92
},
{
"epoch": 1.6566833056017747,
"grad_norm": 0.35858553647994995,
"learning_rate": 5.153975292780852e-06,
"loss": 0.0352,
"step": 94
},
{
"epoch": 1.692179700499168,
"grad_norm": 0.308819979429245,
"learning_rate": 4.948667688488552e-06,
"loss": 0.0327,
"step": 96
},
{
"epoch": 1.7276760953965613,
"grad_norm": 0.5604074001312256,
"learning_rate": 4.7434466420860515e-06,
"loss": 0.033,
"step": 98
},
{
"epoch": 1.7631724902939545,
"grad_norm": 0.31649985909461975,
"learning_rate": 4.53865820268349e-06,
"loss": 0.0292,
"step": 100
},
{
"epoch": 1.7986688851913477,
"grad_norm": 0.2754175662994385,
"learning_rate": 4.334647689917734e-06,
"loss": 0.0379,
"step": 102
},
{
"epoch": 1.8341652800887411,
"grad_norm": 0.3243215084075928,
"learning_rate": 4.131759111665349e-06,
"loss": 0.0315,
"step": 104
},
{
"epoch": 1.8696616749861343,
"grad_norm": 0.2798413336277008,
"learning_rate": 3.930334583967514e-06,
"loss": 0.0345,
"step": 106
},
{
"epoch": 1.9051580698835275,
"grad_norm": 0.2298552244901657,
"learning_rate": 3.730713754144961e-06,
"loss": 0.0277,
"step": 108
},
{
"epoch": 1.9406544647809207,
"grad_norm": 0.44037896394729614,
"learning_rate": 3.5332332280757706e-06,
"loss": 0.0276,
"step": 110
},
{
"epoch": 1.976150859678314,
"grad_norm": 0.41545015573501587,
"learning_rate": 3.3382260026017027e-06,
"loss": 0.0383,
"step": 112
},
{
"epoch": 2.0,
"grad_norm": 0.3469395339488983,
"learning_rate": 3.1460209040201967e-06,
"loss": 0.0258,
"step": 114
},
{
"epoch": 2.035496394897393,
"grad_norm": 0.30031341314315796,
"learning_rate": 2.956942033608843e-06,
"loss": 0.0221,
"step": 116
},
{
"epoch": 2.0709927897947864,
"grad_norm": 0.23123888671398163,
"learning_rate": 2.771308221117309e-06,
"loss": 0.027,
"step": 118
},
{
"epoch": 2.1064891846921796,
"grad_norm": 0.2226598858833313,
"learning_rate": 2.5894324871482557e-06,
"loss": 0.0188,
"step": 120
},
{
"epoch": 2.141985579589573,
"grad_norm": 0.24335065484046936,
"learning_rate": 2.411621515333788e-06,
"loss": 0.0226,
"step": 122
},
{
"epoch": 2.177481974486966,
"grad_norm": 0.4972982108592987,
"learning_rate": 2.238175135197471e-06,
"loss": 0.0221,
"step": 124
},
{
"epoch": 2.212978369384359,
"grad_norm": 0.199220210313797,
"learning_rate": 2.069385816573928e-06,
"loss": 0.0182,
"step": 126
},
{
"epoch": 2.2484747642817524,
"grad_norm": 0.2942477762699127,
"learning_rate": 1.9055381764385272e-06,
"loss": 0.0257,
"step": 128
},
{
"epoch": 2.283971159179146,
"grad_norm": 0.2242165505886078,
"learning_rate": 1.746908498978791e-06,
"loss": 0.0217,
"step": 130
},
{
"epoch": 2.3194675540765393,
"grad_norm": 0.19704177975654602,
"learning_rate": 1.5937642697167288e-06,
"loss": 0.0201,
"step": 132
},
{
"epoch": 2.3549639489739325,
"grad_norm": 0.3458758592605591,
"learning_rate": 1.4463637244677648e-06,
"loss": 0.0214,
"step": 134
},
{
"epoch": 2.3904603438713257,
"grad_norm": 0.20692254602909088,
"learning_rate": 1.3049554138967052e-06,
"loss": 0.0173,
"step": 136
},
{
"epoch": 2.425956738768719,
"grad_norm": 0.23293040692806244,
"learning_rate": 1.1697777844051105e-06,
"loss": 0.0217,
"step": 138
},
{
"epoch": 2.461453133666112,
"grad_norm": 0.24933604896068573,
"learning_rate": 1.0410587760567104e-06,
"loss": 0.0174,
"step": 140
},
{
"epoch": 2.4969495285635053,
"grad_norm": 0.24881727993488312,
"learning_rate": 9.190154382188921e-07,
"loss": 0.0201,
"step": 142
},
{
"epoch": 2.5324459234608985,
"grad_norm": 0.22158710658550262,
"learning_rate": 8.03853563568367e-07,
"loss": 0.0194,
"step": 144
},
{
"epoch": 2.5679423183582917,
"grad_norm": 0.2617812156677246,
"learning_rate": 6.957673410781617e-07,
"loss": 0.0238,
"step": 146
},
{
"epoch": 2.603438713255685,
"grad_norm": 0.23341882228851318,
"learning_rate": 5.949390285710777e-07,
"loss": 0.0207,
"step": 148
},
{
"epoch": 2.638935108153078,
"grad_norm": 0.20265567302703857,
"learning_rate": 5.015386453917742e-07,
"loss": 0.0195,
"step": 150
},
{
"epoch": 2.6744315030504713,
"grad_norm": 0.24993731081485748,
"learning_rate": 4.15723685715686e-07,
"loss": 0.0219,
"step": 152
},
{
"epoch": 2.709927897947865,
"grad_norm": 0.20348049700260162,
"learning_rate": 3.3763885297822153e-07,
"loss": 0.0172,
"step": 154
},
{
"epoch": 2.745424292845258,
"grad_norm": 0.43210646510124207,
"learning_rate": 2.6741581587202747e-07,
"loss": 0.0228,
"step": 156
},
{
"epoch": 2.7809206877426513,
"grad_norm": 0.2683945298194885,
"learning_rate": 2.0517298632379445e-07,
"loss": 0.0194,
"step": 158
},
{
"epoch": 2.8164170826400445,
"grad_norm": 0.2853659689426422,
"learning_rate": 1.510153198249531e-07,
"loss": 0.0214,
"step": 160
},
{
"epoch": 2.8519134775374377,
"grad_norm": 0.28844180703163147,
"learning_rate": 1.0503413845297739e-07,
"loss": 0.0217,
"step": 162
},
{
"epoch": 2.887409872434831,
"grad_norm": 0.38325849175453186,
"learning_rate": 6.730697688170251e-08,
"loss": 0.0246,
"step": 164
},
{
"epoch": 2.922906267332224,
"grad_norm": 0.4123646020889282,
"learning_rate": 3.7897451640321326e-08,
"loss": 0.0239,
"step": 166
},
{
"epoch": 2.9584026622296173,
"grad_norm": 0.2761909067630768,
"learning_rate": 1.6855153841527915e-08,
"loss": 0.0198,
"step": 168
},
{
"epoch": 2.9938990571270105,
"grad_norm": 0.28191664814949036,
"learning_rate": 4.2155655596809455e-09,
"loss": 0.0218,
"step": 170
}
],
"logging_steps": 2,
"max_steps": 171,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 208500261126144.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}