|
{ |
|
"best_metric": 0.1333775818347931, |
|
"best_model_checkpoint": "segformer-b0-finetuned-segments-sidewalk-outputs/checkpoint-1380", |
|
"epoch": 34.5, |
|
"global_step": 1380, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.9970000000000004e-05, |
|
"loss": 1.2082, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 5.994e-05, |
|
"loss": 1.0322, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 5.991000000000001e-05, |
|
"loss": 0.9391, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 5.988e-05, |
|
"loss": 0.9601, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 5.9850000000000005e-05, |
|
"loss": 0.9436, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 5.982e-05, |
|
"loss": 0.8901, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 5.979e-05, |
|
"loss": 0.9326, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 5.9760000000000004e-05, |
|
"loss": 0.9164, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 5.9730000000000006e-05, |
|
"loss": 0.8523, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 5.97e-05, |
|
"loss": 0.7235, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 5.967e-05, |
|
"loss": 0.9828, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 5.964e-05, |
|
"loss": 0.7795, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 5.961000000000001e-05, |
|
"loss": 0.8967, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 5.958e-05, |
|
"loss": 0.8618, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 5.9550000000000004e-05, |
|
"loss": 0.8513, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 5.952e-05, |
|
"loss": 0.7067, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 5.949e-05, |
|
"loss": 0.7935, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 5.946e-05, |
|
"loss": 0.9116, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 5.9430000000000005e-05, |
|
"loss": 0.6582, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 5.94e-05, |
|
"loss": 0.6827, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_loss": 0.8834543228149414, |
|
"eval_mean_accuracy": 0.6718939356205333, |
|
"eval_mean_iou": 0.2607108677425002, |
|
"eval_overall_accuracy": 0.5466109819427459, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.44492668085864034, |
|
0.8988611903824264 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.43746558076697917, |
|
0.3446670224605214 |
|
], |
|
"eval_runtime": 47.0306, |
|
"eval_samples_per_second": 0.425, |
|
"eval_steps_per_second": 0.213, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 5.937e-05, |
|
"loss": 1.1641, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 5.934e-05, |
|
"loss": 0.7001, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 5.9310000000000006e-05, |
|
"loss": 0.6366, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 5.928e-05, |
|
"loss": 0.7333, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 5.9250000000000004e-05, |
|
"loss": 0.731, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 5.922e-05, |
|
"loss": 0.6953, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 5.919e-05, |
|
"loss": 0.7026, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 5.916e-05, |
|
"loss": 0.633, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 5.9130000000000005e-05, |
|
"loss": 0.6653, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 5.91e-05, |
|
"loss": 0.6987, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 5.907e-05, |
|
"loss": 0.6367, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 5.9040000000000004e-05, |
|
"loss": 0.5351, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 5.9010000000000006e-05, |
|
"loss": 0.7612, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 5.898e-05, |
|
"loss": 1.0138, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 5.895e-05, |
|
"loss": 0.6202, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 5.892e-05, |
|
"loss": 0.6921, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 5.889000000000001e-05, |
|
"loss": 0.5186, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 5.886e-05, |
|
"loss": 0.5955, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 5.8830000000000004e-05, |
|
"loss": 0.6696, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 5.88e-05, |
|
"loss": 0.6717, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.7762032747268677, |
|
"eval_mean_accuracy": 0.756401809675905, |
|
"eval_mean_iou": 0.3105024097830028, |
|
"eval_overall_accuracy": 0.6408774372137014, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.5471135633014456, |
|
0.9656900560503644 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.5429513256944513, |
|
0.38855590365455717 |
|
], |
|
"eval_runtime": 47.4699, |
|
"eval_samples_per_second": 0.421, |
|
"eval_steps_per_second": 0.211, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 5.877e-05, |
|
"loss": 0.5692, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 5.8740000000000003e-05, |
|
"loss": 0.5096, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 5.8710000000000005e-05, |
|
"loss": 0.5457, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 5.868e-05, |
|
"loss": 0.4582, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 5.865e-05, |
|
"loss": 0.6143, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 5.862e-05, |
|
"loss": 0.6951, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 5.8590000000000007e-05, |
|
"loss": 0.5025, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 5.856e-05, |
|
"loss": 1.1675, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 5.8530000000000004e-05, |
|
"loss": 1.0585, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 5.85e-05, |
|
"loss": 0.6964, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 5.847e-05, |
|
"loss": 0.5112, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 5.844e-05, |
|
"loss": 0.822, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 5.8410000000000005e-05, |
|
"loss": 0.547, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 5.838e-05, |
|
"loss": 0.443, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 5.835e-05, |
|
"loss": 0.7045, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 5.832e-05, |
|
"loss": 0.5225, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 5.8290000000000006e-05, |
|
"loss": 0.4225, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 5.826e-05, |
|
"loss": 0.5549, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 5.823e-05, |
|
"loss": 0.4284, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 5.82e-05, |
|
"loss": 0.5073, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"eval_loss": 0.5137239694595337, |
|
"eval_mean_accuracy": 0.8241622297344804, |
|
"eval_mean_iou": 0.48694262570265306, |
|
"eval_overall_accuracy": 0.8814629163262906, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9279702830244996, |
|
0.7203541764444612 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8606411961780306, |
|
0.6001866809299288 |
|
], |
|
"eval_runtime": 48.2702, |
|
"eval_samples_per_second": 0.414, |
|
"eval_steps_per_second": 0.207, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 5.817e-05, |
|
"loss": 0.4515, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 5.814e-05, |
|
"loss": 0.9959, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 5.8110000000000004e-05, |
|
"loss": 0.7347, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 5.808e-05, |
|
"loss": 0.6901, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 5.805e-05, |
|
"loss": 0.3837, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 5.802e-05, |
|
"loss": 0.6317, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 5.7990000000000006e-05, |
|
"loss": 0.5058, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 5.796e-05, |
|
"loss": 0.403, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 5.793e-05, |
|
"loss": 0.5889, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 5.79e-05, |
|
"loss": 0.421, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 5.787e-05, |
|
"loss": 0.5823, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 5.784e-05, |
|
"loss": 0.5088, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 5.7810000000000004e-05, |
|
"loss": 0.6356, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 5.778e-05, |
|
"loss": 0.433, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 5.775e-05, |
|
"loss": 0.301, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 5.7719999999999996e-05, |
|
"loss": 0.4938, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 5.7690000000000005e-05, |
|
"loss": 0.4548, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 5.766e-05, |
|
"loss": 0.3456, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 5.763e-05, |
|
"loss": 0.4227, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 5.76e-05, |
|
"loss": 0.645, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.5222005844116211, |
|
"eval_mean_accuracy": 0.8593003570537622, |
|
"eval_mean_iou": 0.4324267875542314, |
|
"eval_overall_accuracy": 0.7904938796752369, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.734647985585855, |
|
0.9839527285216694 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.7340596529537314, |
|
0.5632207097089628 |
|
], |
|
"eval_runtime": 48.1367, |
|
"eval_samples_per_second": 0.415, |
|
"eval_steps_per_second": 0.208, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 5.757e-05, |
|
"loss": 0.3101, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 5.754e-05, |
|
"loss": 0.4324, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 5.751e-05, |
|
"loss": 0.5271, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 5.748e-05, |
|
"loss": 0.4749, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 5.745e-05, |
|
"loss": 0.4237, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 5.742e-05, |
|
"loss": 0.5763, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 5.7390000000000004e-05, |
|
"loss": 0.4159, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 5.736e-05, |
|
"loss": 0.5616, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 5.733e-05, |
|
"loss": 0.5792, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 5.73e-05, |
|
"loss": 0.4864, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 5.7270000000000006e-05, |
|
"loss": 0.4517, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 5.724e-05, |
|
"loss": 0.4227, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 5.721e-05, |
|
"loss": 1.0034, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 5.718e-05, |
|
"loss": 0.5311, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 5.715e-05, |
|
"loss": 0.4184, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 5.712e-05, |
|
"loss": 0.465, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 5.7090000000000004e-05, |
|
"loss": 0.7336, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 5.706e-05, |
|
"loss": 0.7865, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 5.703e-05, |
|
"loss": 0.4739, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 5.6999999999999996e-05, |
|
"loss": 0.4173, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"eval_loss": 0.34641411900520325, |
|
"eval_mean_accuracy": 0.7393900107293994, |
|
"eval_mean_iou": 0.4519346247035367, |
|
"eval_overall_accuracy": 0.8488291062247336, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9376539432040742, |
|
0.5411260782547244 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8348604687385176, |
|
0.5209434053720925 |
|
], |
|
"eval_runtime": 48.5002, |
|
"eval_samples_per_second": 0.412, |
|
"eval_steps_per_second": 0.206, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 5.6970000000000005e-05, |
|
"loss": 0.5154, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 5.694e-05, |
|
"loss": 0.4903, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 5.691e-05, |
|
"loss": 0.4885, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 5.688e-05, |
|
"loss": 0.4041, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 5.685e-05, |
|
"loss": 0.3674, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 5.682e-05, |
|
"loss": 0.2295, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 5.6790000000000003e-05, |
|
"loss": 1.0179, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 5.676e-05, |
|
"loss": 0.2604, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 5.673e-05, |
|
"loss": 0.3548, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 5.6699999999999996e-05, |
|
"loss": 0.4074, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 5.6670000000000005e-05, |
|
"loss": 0.6856, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 5.664e-05, |
|
"loss": 1.1289, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 5.661e-05, |
|
"loss": 0.4405, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 5.658e-05, |
|
"loss": 0.8337, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 5.655e-05, |
|
"loss": 0.3255, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 5.652e-05, |
|
"loss": 0.5069, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 5.649e-05, |
|
"loss": 0.4483, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 5.646e-05, |
|
"loss": 0.368, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 5.643e-05, |
|
"loss": 0.7829, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 5.6399999999999995e-05, |
|
"loss": 0.8881, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.3434440791606903, |
|
"eval_mean_accuracy": 0.8174777092760303, |
|
"eval_mean_iou": 0.5078240370773638, |
|
"eval_overall_accuracy": 0.8651856882525534, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9039072569572595, |
|
0.731048161594801 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8416629247588947, |
|
0.6818091864731967 |
|
], |
|
"eval_runtime": 49.2759, |
|
"eval_samples_per_second": 0.406, |
|
"eval_steps_per_second": 0.203, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 5.6370000000000004e-05, |
|
"loss": 0.4869, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 5.634e-05, |
|
"loss": 0.2544, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 5.631e-05, |
|
"loss": 0.5558, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 5.6279999999999996e-05, |
|
"loss": 0.5816, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 5.625e-05, |
|
"loss": 0.5193, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 5.622000000000001e-05, |
|
"loss": 0.4635, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 5.619e-05, |
|
"loss": 0.4327, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 5.6160000000000004e-05, |
|
"loss": 0.3984, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 5.613e-05, |
|
"loss": 0.391, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 5.61e-05, |
|
"loss": 0.6207, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 5.6070000000000004e-05, |
|
"loss": 0.3424, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 5.6040000000000006e-05, |
|
"loss": 0.5947, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 5.601e-05, |
|
"loss": 0.2676, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 5.598e-05, |
|
"loss": 0.3732, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 5.595e-05, |
|
"loss": 0.523, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 5.592000000000001e-05, |
|
"loss": 0.3125, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 5.589e-05, |
|
"loss": 0.4795, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 5.5860000000000004e-05, |
|
"loss": 0.2888, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 5.583e-05, |
|
"loss": 0.5451, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 5.58e-05, |
|
"loss": 0.3846, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"eval_loss": 0.31755417585372925, |
|
"eval_mean_accuracy": 0.9216071847563998, |
|
"eval_mean_iou": 0.569468718641803, |
|
"eval_overall_accuracy": 0.9143545260756287, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9084679987604084, |
|
0.9347463707523912 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8941303774730092, |
|
0.8142757784524 |
|
], |
|
"eval_runtime": 49.4017, |
|
"eval_samples_per_second": 0.405, |
|
"eval_steps_per_second": 0.202, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 5.577e-05, |
|
"loss": 0.5607, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 5.5740000000000005e-05, |
|
"loss": 0.5292, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 5.571e-05, |
|
"loss": 0.2441, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 5.568e-05, |
|
"loss": 0.2737, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 5.5650000000000004e-05, |
|
"loss": 0.4429, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 5.5620000000000006e-05, |
|
"loss": 0.4026, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 5.559e-05, |
|
"loss": 0.4234, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 5.556e-05, |
|
"loss": 0.4091, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 5.553e-05, |
|
"loss": 0.6148, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 5.550000000000001e-05, |
|
"loss": 1.3302, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 5.547e-05, |
|
"loss": 0.2761, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 5.5440000000000005e-05, |
|
"loss": 0.3663, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 5.541e-05, |
|
"loss": 0.2674, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 5.538e-05, |
|
"loss": 0.2528, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 5.5350000000000004e-05, |
|
"loss": 0.6118, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 5.5320000000000006e-05, |
|
"loss": 0.3095, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 5.529e-05, |
|
"loss": 0.3554, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 5.526e-05, |
|
"loss": 0.4051, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 5.523e-05, |
|
"loss": 0.315, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 5.520000000000001e-05, |
|
"loss": 0.9319, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.2767624855041504, |
|
"eval_mean_accuracy": 0.9090526773584855, |
|
"eval_mean_iou": 0.5241215752108322, |
|
"eval_overall_accuracy": 0.8693237754113918, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.8370783221494974, |
|
0.9810270325674735 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8342241618348791, |
|
0.7381405637976174 |
|
], |
|
"eval_runtime": 48.9178, |
|
"eval_samples_per_second": 0.409, |
|
"eval_steps_per_second": 0.204, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 5.517e-05, |
|
"loss": 0.384, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 5.5140000000000004e-05, |
|
"loss": 0.2424, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 5.511e-05, |
|
"loss": 0.5975, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 5.508e-05, |
|
"loss": 0.1757, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 5.505e-05, |
|
"loss": 0.3585, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 5.5020000000000005e-05, |
|
"loss": 0.349, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 5.499e-05, |
|
"loss": 0.243, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 5.496e-05, |
|
"loss": 0.4824, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 5.493e-05, |
|
"loss": 0.2798, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 5.4900000000000006e-05, |
|
"loss": 0.3875, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 5.487e-05, |
|
"loss": 0.7034, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 5.4840000000000003e-05, |
|
"loss": 0.1977, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 5.481e-05, |
|
"loss": 0.1849, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 5.478e-05, |
|
"loss": 0.2269, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 5.475e-05, |
|
"loss": 0.2172, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 5.4720000000000005e-05, |
|
"loss": 0.9048, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 5.469e-05, |
|
"loss": 0.2553, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 5.466e-05, |
|
"loss": 0.4218, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 5.463e-05, |
|
"loss": 0.288, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 5.4600000000000006e-05, |
|
"loss": 0.4806, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"eval_loss": 0.38951587677001953, |
|
"eval_mean_accuracy": 0.9439787770292487, |
|
"eval_mean_iou": 0.5620683451115345, |
|
"eval_overall_accuracy": 0.9275893154935977, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9142870194311686, |
|
0.9736705346273289 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9096082836264106, |
|
0.776596751708193 |
|
], |
|
"eval_runtime": 48.7001, |
|
"eval_samples_per_second": 0.411, |
|
"eval_steps_per_second": 0.205, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 5.457e-05, |
|
"loss": 0.5392, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 5.454e-05, |
|
"loss": 0.37, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 5.451e-05, |
|
"loss": 0.3083, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 5.448e-05, |
|
"loss": 0.5515, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 5.445e-05, |
|
"loss": 0.4385, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 5.4420000000000004e-05, |
|
"loss": 0.252, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 5.439e-05, |
|
"loss": 0.4565, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 5.436e-05, |
|
"loss": 0.2629, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 5.4329999999999997e-05, |
|
"loss": 0.4152, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 5.4300000000000005e-05, |
|
"loss": 0.3362, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 5.427e-05, |
|
"loss": 0.6633, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 5.424e-05, |
|
"loss": 0.6759, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 5.421e-05, |
|
"loss": 0.9825, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 5.418e-05, |
|
"loss": 0.376, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 5.415e-05, |
|
"loss": 0.3648, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 5.4120000000000004e-05, |
|
"loss": 0.2648, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 5.409e-05, |
|
"loss": 0.6202, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 5.406e-05, |
|
"loss": 0.6997, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 5.403e-05, |
|
"loss": 0.4324, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 5.4000000000000005e-05, |
|
"loss": 0.3945, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 0.27207431197166443, |
|
"eval_mean_accuracy": 0.8545890809494763, |
|
"eval_mean_iou": 0.5138796076397971, |
|
"eval_overall_accuracy": 0.8242902050635074, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.7996985114000377, |
|
0.9094796504989148 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.7903048161453703, |
|
0.7513340067740211 |
|
], |
|
"eval_runtime": 48.403, |
|
"eval_samples_per_second": 0.413, |
|
"eval_steps_per_second": 0.207, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 5.397e-05, |
|
"loss": 0.455, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 5.394e-05, |
|
"loss": 0.2955, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 5.391e-05, |
|
"loss": 0.1744, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 5.1, |
|
"learning_rate": 5.3880000000000006e-05, |
|
"loss": 0.1787, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 5.385e-05, |
|
"loss": 0.2821, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 5.15, |
|
"learning_rate": 5.382e-05, |
|
"loss": 0.6153, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 5.17, |
|
"learning_rate": 5.379e-05, |
|
"loss": 0.3641, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"learning_rate": 5.376e-05, |
|
"loss": 0.3918, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 5.22, |
|
"learning_rate": 5.373e-05, |
|
"loss": 0.268, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 5.25, |
|
"learning_rate": 5.3700000000000004e-05, |
|
"loss": 0.4003, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 5.28, |
|
"learning_rate": 5.367e-05, |
|
"loss": 0.2636, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"learning_rate": 5.364e-05, |
|
"loss": 0.3127, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 5.361e-05, |
|
"loss": 0.3199, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"learning_rate": 5.3580000000000005e-05, |
|
"loss": 0.4172, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 5.38, |
|
"learning_rate": 5.355e-05, |
|
"loss": 1.0095, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"learning_rate": 5.352e-05, |
|
"loss": 1.1924, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"learning_rate": 5.349e-05, |
|
"loss": 0.2096, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 5.346e-05, |
|
"loss": 0.1999, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"learning_rate": 5.343e-05, |
|
"loss": 0.4955, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 5.3400000000000004e-05, |
|
"loss": 0.481, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"eval_loss": 0.3197166323661804, |
|
"eval_mean_accuracy": 0.8925474390283793, |
|
"eval_mean_iou": 0.5617007834875217, |
|
"eval_overall_accuracy": 0.933504608974328, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.966746970190107, |
|
0.8183479078666516 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9219297160027523, |
|
0.763172634459813 |
|
], |
|
"eval_runtime": 48.9978, |
|
"eval_samples_per_second": 0.408, |
|
"eval_steps_per_second": 0.204, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 5.53, |
|
"learning_rate": 5.337e-05, |
|
"loss": 0.2962, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"learning_rate": 5.334e-05, |
|
"loss": 0.1479, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"learning_rate": 5.3309999999999996e-05, |
|
"loss": 0.2301, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"learning_rate": 5.3280000000000005e-05, |
|
"loss": 0.3265, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 5.325e-05, |
|
"loss": 0.2636, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 5.65, |
|
"learning_rate": 5.322e-05, |
|
"loss": 0.3857, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 5.319e-05, |
|
"loss": 0.3996, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"learning_rate": 5.316e-05, |
|
"loss": 0.2629, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 5.72, |
|
"learning_rate": 5.313e-05, |
|
"loss": 0.3125, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"learning_rate": 5.31e-05, |
|
"loss": 0.3026, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 5.78, |
|
"learning_rate": 5.307e-05, |
|
"loss": 0.3461, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 5.8, |
|
"learning_rate": 5.304e-05, |
|
"loss": 0.3292, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 5.83, |
|
"learning_rate": 5.3009999999999996e-05, |
|
"loss": 0.8407, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"learning_rate": 5.2980000000000004e-05, |
|
"loss": 0.3062, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 5.295e-05, |
|
"loss": 0.138, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"learning_rate": 5.292e-05, |
|
"loss": 0.169, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 5.92, |
|
"learning_rate": 5.289e-05, |
|
"loss": 0.443, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"learning_rate": 5.286e-05, |
|
"loss": 0.3298, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 5.97, |
|
"learning_rate": 5.283e-05, |
|
"loss": 0.3776, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 5.28e-05, |
|
"loss": 0.3986, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 0.20506949722766876, |
|
"eval_mean_accuracy": 0.9171030416015327, |
|
"eval_mean_iou": 0.5573306667281388, |
|
"eval_overall_accuracy": 0.9116858969091434, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.907289140959936, |
|
0.9269169422431295 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.891243490977623, |
|
0.7807485092067935 |
|
], |
|
"eval_runtime": 48.3589, |
|
"eval_samples_per_second": 0.414, |
|
"eval_steps_per_second": 0.207, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 5.277e-05, |
|
"loss": 0.9028, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 5.274e-05, |
|
"loss": 0.2695, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 5.2709999999999995e-05, |
|
"loss": 0.5083, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 5.2680000000000004e-05, |
|
"loss": 0.2162, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 6.12, |
|
"learning_rate": 5.265e-05, |
|
"loss": 0.4195, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 6.15, |
|
"learning_rate": 5.262e-05, |
|
"loss": 0.6899, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"learning_rate": 5.2589999999999996e-05, |
|
"loss": 0.3309, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 5.256e-05, |
|
"loss": 0.2429, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 5.253e-05, |
|
"loss": 0.2289, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 5.25e-05, |
|
"loss": 0.2147, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 6.28, |
|
"learning_rate": 5.2470000000000004e-05, |
|
"loss": 0.2161, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 5.244e-05, |
|
"loss": 0.3912, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"learning_rate": 5.241e-05, |
|
"loss": 0.654, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 6.35, |
|
"learning_rate": 5.238e-05, |
|
"loss": 0.3199, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 6.38, |
|
"learning_rate": 5.2350000000000005e-05, |
|
"loss": 0.2043, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 5.232e-05, |
|
"loss": 0.1392, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 6.42, |
|
"learning_rate": 5.229e-05, |
|
"loss": 0.1945, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 6.45, |
|
"learning_rate": 5.2260000000000004e-05, |
|
"loss": 0.3461, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 6.47, |
|
"learning_rate": 5.2230000000000006e-05, |
|
"loss": 0.4144, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 5.22e-05, |
|
"loss": 0.282, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"eval_loss": 0.388349711894989, |
|
"eval_mean_accuracy": 0.8259209922494022, |
|
"eval_mean_iou": 0.38122471501239064, |
|
"eval_overall_accuracy": 0.7367579599669372, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.6643899294086868, |
|
0.9874520550901176 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.662672740913094, |
|
0.481001404124078 |
|
], |
|
"eval_runtime": 48.6412, |
|
"eval_samples_per_second": 0.411, |
|
"eval_steps_per_second": 0.206, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 6.53, |
|
"learning_rate": 5.2170000000000004e-05, |
|
"loss": 0.3829, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"learning_rate": 5.214e-05, |
|
"loss": 0.3072, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 6.58, |
|
"learning_rate": 5.211000000000001e-05, |
|
"loss": 0.321, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 6.6, |
|
"learning_rate": 5.208e-05, |
|
"loss": 0.2191, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 6.62, |
|
"learning_rate": 5.2050000000000005e-05, |
|
"loss": 0.3312, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 6.65, |
|
"learning_rate": 5.202e-05, |
|
"loss": 0.7948, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"learning_rate": 5.199e-05, |
|
"loss": 0.3981, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"learning_rate": 5.1960000000000004e-05, |
|
"loss": 0.2188, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"learning_rate": 5.1930000000000006e-05, |
|
"loss": 0.3235, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 6.75, |
|
"learning_rate": 5.19e-05, |
|
"loss": 0.1482, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 6.78, |
|
"learning_rate": 5.187e-05, |
|
"loss": 0.1747, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"learning_rate": 5.184e-05, |
|
"loss": 0.2945, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"learning_rate": 5.181000000000001e-05, |
|
"loss": 0.3469, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 6.85, |
|
"learning_rate": 5.178e-05, |
|
"loss": 0.5744, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"learning_rate": 5.1750000000000004e-05, |
|
"loss": 0.1551, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"learning_rate": 5.172e-05, |
|
"loss": 0.135, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 6.92, |
|
"learning_rate": 5.169e-05, |
|
"loss": 1.0639, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 6.95, |
|
"learning_rate": 5.1660000000000003e-05, |
|
"loss": 0.2964, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"learning_rate": 5.1630000000000005e-05, |
|
"loss": 0.2584, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 5.16e-05, |
|
"loss": 0.3275, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 0.24394457042217255, |
|
"eval_mean_accuracy": 0.8693783875093231, |
|
"eval_mean_iou": 0.5201932822181216, |
|
"eval_overall_accuracy": 0.8368297410563663, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.8104120499927068, |
|
0.9283447250259392 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.7959839091242811, |
|
0.7645959375300835 |
|
], |
|
"eval_runtime": 47.6324, |
|
"eval_samples_per_second": 0.42, |
|
"eval_steps_per_second": 0.21, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 7.03, |
|
"learning_rate": 5.157e-05, |
|
"loss": 0.4328, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"learning_rate": 5.154e-05, |
|
"loss": 0.5671, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 7.08, |
|
"learning_rate": 5.1510000000000007e-05, |
|
"loss": 0.3449, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 5.148e-05, |
|
"loss": 0.3505, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 7.12, |
|
"learning_rate": 5.1450000000000004e-05, |
|
"loss": 0.7337, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 7.15, |
|
"learning_rate": 5.142e-05, |
|
"loss": 0.2992, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 7.17, |
|
"learning_rate": 5.139e-05, |
|
"loss": 0.2461, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"learning_rate": 5.136e-05, |
|
"loss": 0.1222, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 7.22, |
|
"learning_rate": 5.1330000000000005e-05, |
|
"loss": 0.4672, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 7.25, |
|
"learning_rate": 5.13e-05, |
|
"loss": 0.3195, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 7.28, |
|
"learning_rate": 5.127e-05, |
|
"loss": 0.3162, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"learning_rate": 5.124e-05, |
|
"loss": 0.4218, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 5.1210000000000006e-05, |
|
"loss": 0.2803, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"learning_rate": 5.118e-05, |
|
"loss": 0.3924, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"learning_rate": 5.115e-05, |
|
"loss": 0.5085, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 7.4, |
|
"learning_rate": 5.112e-05, |
|
"loss": 0.1882, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 7.42, |
|
"learning_rate": 5.109e-05, |
|
"loss": 0.4084, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"learning_rate": 5.106e-05, |
|
"loss": 0.3493, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 7.47, |
|
"learning_rate": 5.1030000000000004e-05, |
|
"loss": 0.2055, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 5.1e-05, |
|
"loss": 0.3243, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"eval_loss": 0.2048894464969635, |
|
"eval_mean_accuracy": 0.8435106512197734, |
|
"eval_mean_iou": 0.5263646497051458, |
|
"eval_overall_accuracy": 0.8976785141717301, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9416431647789888, |
|
0.7453781376605582 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8818220751562738, |
|
0.6972718739591639 |
|
], |
|
"eval_runtime": 48.7394, |
|
"eval_samples_per_second": 0.41, |
|
"eval_steps_per_second": 0.205, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 7.53, |
|
"learning_rate": 5.097e-05, |
|
"loss": 0.135, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 7.55, |
|
"learning_rate": 5.094e-05, |
|
"loss": 0.2282, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"learning_rate": 5.0910000000000006e-05, |
|
"loss": 0.4108, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"learning_rate": 5.088e-05, |
|
"loss": 0.2306, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 7.62, |
|
"learning_rate": 5.085e-05, |
|
"loss": 0.2897, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 7.65, |
|
"learning_rate": 5.082e-05, |
|
"loss": 0.9242, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 7.67, |
|
"learning_rate": 5.079e-05, |
|
"loss": 0.2076, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"learning_rate": 5.076e-05, |
|
"loss": 0.5449, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 7.72, |
|
"learning_rate": 5.0730000000000004e-05, |
|
"loss": 0.4291, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 7.75, |
|
"learning_rate": 5.07e-05, |
|
"loss": 0.4141, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 7.78, |
|
"learning_rate": 5.067e-05, |
|
"loss": 0.4648, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 7.8, |
|
"learning_rate": 5.0639999999999996e-05, |
|
"loss": 0.8204, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 7.83, |
|
"learning_rate": 5.0610000000000005e-05, |
|
"loss": 0.403, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 7.85, |
|
"learning_rate": 5.058e-05, |
|
"loss": 0.3946, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 7.88, |
|
"learning_rate": 5.055e-05, |
|
"loss": 0.2797, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 7.9, |
|
"learning_rate": 5.052e-05, |
|
"loss": 0.2285, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 7.92, |
|
"learning_rate": 5.0490000000000006e-05, |
|
"loss": 0.3405, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 7.95, |
|
"learning_rate": 5.046e-05, |
|
"loss": 0.1786, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 7.97, |
|
"learning_rate": 5.043e-05, |
|
"loss": 0.1665, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 5.04e-05, |
|
"loss": 0.2753, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 0.22952759265899658, |
|
"eval_mean_accuracy": 0.9036801208276177, |
|
"eval_mean_iou": 0.5711426371831221, |
|
"eval_overall_accuracy": 0.9283498244545524, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9483726727161642, |
|
0.8589875689390711 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9135066161058577, |
|
0.7999212954435085 |
|
], |
|
"eval_runtime": 48.1711, |
|
"eval_samples_per_second": 0.415, |
|
"eval_steps_per_second": 0.208, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 8.03, |
|
"learning_rate": 5.037e-05, |
|
"loss": 0.1384, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 8.05, |
|
"learning_rate": 5.034e-05, |
|
"loss": 0.3371, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 8.07, |
|
"learning_rate": 5.0310000000000005e-05, |
|
"loss": 0.1202, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 8.1, |
|
"learning_rate": 5.028e-05, |
|
"loss": 0.145, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 8.12, |
|
"learning_rate": 5.025e-05, |
|
"loss": 0.2253, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 8.15, |
|
"learning_rate": 5.022e-05, |
|
"loss": 0.2934, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 8.18, |
|
"learning_rate": 5.0190000000000006e-05, |
|
"loss": 0.258, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 8.2, |
|
"learning_rate": 5.016e-05, |
|
"loss": 0.5661, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 8.22, |
|
"learning_rate": 5.013e-05, |
|
"loss": 0.1907, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 8.25, |
|
"learning_rate": 5.01e-05, |
|
"loss": 1.0441, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 8.28, |
|
"learning_rate": 5.007e-05, |
|
"loss": 0.2594, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 8.3, |
|
"learning_rate": 5.004e-05, |
|
"loss": 0.1567, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 8.32, |
|
"learning_rate": 5.0010000000000004e-05, |
|
"loss": 0.1027, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 8.35, |
|
"learning_rate": 4.998e-05, |
|
"loss": 0.7182, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 8.38, |
|
"learning_rate": 4.995e-05, |
|
"loss": 0.2658, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 8.4, |
|
"learning_rate": 4.9919999999999996e-05, |
|
"loss": 0.2252, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 8.43, |
|
"learning_rate": 4.9890000000000005e-05, |
|
"loss": 0.3155, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 8.45, |
|
"learning_rate": 4.986e-05, |
|
"loss": 0.1659, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 8.47, |
|
"learning_rate": 4.983e-05, |
|
"loss": 1.1028, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 4.98e-05, |
|
"loss": 0.2368, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"eval_loss": 0.19084516167640686, |
|
"eval_mean_accuracy": 0.9140556027707284, |
|
"eval_mean_iou": 0.5612282269733403, |
|
"eval_overall_accuracy": 0.9094506934004901, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9057131778237694, |
|
0.9223980277176874 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8891001341699122, |
|
0.7945845467501085 |
|
], |
|
"eval_runtime": 48.1614, |
|
"eval_samples_per_second": 0.415, |
|
"eval_steps_per_second": 0.208, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 8.53, |
|
"learning_rate": 4.977e-05, |
|
"loss": 0.2577, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 8.55, |
|
"learning_rate": 4.974e-05, |
|
"loss": 0.3297, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 8.57, |
|
"learning_rate": 4.9710000000000003e-05, |
|
"loss": 0.313, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 8.6, |
|
"learning_rate": 4.968e-05, |
|
"loss": 0.2803, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 8.62, |
|
"learning_rate": 4.965e-05, |
|
"loss": 0.2727, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 8.65, |
|
"learning_rate": 4.9619999999999996e-05, |
|
"loss": 0.5579, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 8.68, |
|
"learning_rate": 4.9590000000000005e-05, |
|
"loss": 0.276, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 8.7, |
|
"learning_rate": 4.956e-05, |
|
"loss": 0.3912, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 8.72, |
|
"learning_rate": 4.953e-05, |
|
"loss": 0.24, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 8.75, |
|
"learning_rate": 4.95e-05, |
|
"loss": 0.4231, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 8.78, |
|
"learning_rate": 4.947e-05, |
|
"loss": 0.3451, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 8.8, |
|
"learning_rate": 4.944e-05, |
|
"loss": 0.3634, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"learning_rate": 4.941e-05, |
|
"loss": 0.5014, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 8.85, |
|
"learning_rate": 4.938e-05, |
|
"loss": 0.2758, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 8.88, |
|
"learning_rate": 4.935e-05, |
|
"loss": 0.3193, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 8.9, |
|
"learning_rate": 4.9319999999999995e-05, |
|
"loss": 0.3801, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 8.93, |
|
"learning_rate": 4.9290000000000004e-05, |
|
"loss": 0.2007, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 8.95, |
|
"learning_rate": 4.926e-05, |
|
"loss": 0.2294, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 8.97, |
|
"learning_rate": 4.923e-05, |
|
"loss": 0.3337, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 4.9199999999999997e-05, |
|
"loss": 0.9901, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 0.24918393790721893, |
|
"eval_mean_accuracy": 0.8951753051498105, |
|
"eval_mean_iou": 0.4812836580752557, |
|
"eval_overall_accuracy": 0.8465435512223731, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.8070722124373391, |
|
0.9832783978622819 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.803806401181222, |
|
0.640044573044545 |
|
], |
|
"eval_runtime": 49.5136, |
|
"eval_samples_per_second": 0.404, |
|
"eval_steps_per_second": 0.202, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"learning_rate": 4.917e-05, |
|
"loss": 0.1101, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 4.914e-05, |
|
"loss": 0.1172, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"learning_rate": 4.911e-05, |
|
"loss": 0.1172, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"learning_rate": 4.908e-05, |
|
"loss": 0.1692, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 9.12, |
|
"learning_rate": 4.905e-05, |
|
"loss": 0.1617, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 9.15, |
|
"learning_rate": 4.9019999999999995e-05, |
|
"loss": 0.4696, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 9.18, |
|
"learning_rate": 4.8990000000000004e-05, |
|
"loss": 0.3077, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"learning_rate": 4.896e-05, |
|
"loss": 0.3329, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 9.22, |
|
"learning_rate": 4.893e-05, |
|
"loss": 0.6601, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 9.25, |
|
"learning_rate": 4.8899999999999996e-05, |
|
"loss": 0.4652, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 9.28, |
|
"learning_rate": 4.887e-05, |
|
"loss": 0.1901, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"learning_rate": 4.884e-05, |
|
"loss": 0.1811, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 9.32, |
|
"learning_rate": 4.881e-05, |
|
"loss": 0.3049, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 9.35, |
|
"learning_rate": 4.878e-05, |
|
"loss": 0.2756, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 9.38, |
|
"learning_rate": 4.875e-05, |
|
"loss": 0.4155, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 9.4, |
|
"learning_rate": 4.872000000000001e-05, |
|
"loss": 0.3104, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 9.43, |
|
"learning_rate": 4.869e-05, |
|
"loss": 0.1151, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 9.45, |
|
"learning_rate": 4.8660000000000005e-05, |
|
"loss": 0.2135, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"learning_rate": 4.863e-05, |
|
"loss": 0.5245, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 4.86e-05, |
|
"loss": 0.2519, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"eval_loss": 0.23749105632305145, |
|
"eval_mean_accuracy": 0.9468136114330645, |
|
"eval_mean_iou": 0.581027164032012, |
|
"eval_overall_accuracy": 0.9390303832997389, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.93271322609932, |
|
0.9609139967668092 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9236989144275697, |
|
0.8193825776684663 |
|
], |
|
"eval_runtime": 48.4577, |
|
"eval_samples_per_second": 0.413, |
|
"eval_steps_per_second": 0.206, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 9.53, |
|
"learning_rate": 4.8570000000000004e-05, |
|
"loss": 0.7302, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 9.55, |
|
"learning_rate": 4.8540000000000006e-05, |
|
"loss": 0.2297, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 9.57, |
|
"learning_rate": 4.851e-05, |
|
"loss": 0.1605, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"learning_rate": 4.8480000000000003e-05, |
|
"loss": 0.2559, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 9.62, |
|
"learning_rate": 4.845e-05, |
|
"loss": 0.2862, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 9.65, |
|
"learning_rate": 4.842000000000001e-05, |
|
"loss": 0.2086, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 9.68, |
|
"learning_rate": 4.839e-05, |
|
"loss": 0.2762, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 9.7, |
|
"learning_rate": 4.8360000000000005e-05, |
|
"loss": 0.3488, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 9.72, |
|
"learning_rate": 4.833e-05, |
|
"loss": 0.2638, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 9.75, |
|
"learning_rate": 4.83e-05, |
|
"loss": 0.1605, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 9.78, |
|
"learning_rate": 4.8270000000000004e-05, |
|
"loss": 0.2095, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"learning_rate": 4.8240000000000006e-05, |
|
"loss": 0.5525, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 9.82, |
|
"learning_rate": 4.821e-05, |
|
"loss": 0.3645, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 9.85, |
|
"learning_rate": 4.818e-05, |
|
"loss": 0.119, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 9.88, |
|
"learning_rate": 4.815e-05, |
|
"loss": 0.2868, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 9.9, |
|
"learning_rate": 4.812000000000001e-05, |
|
"loss": 0.8835, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 9.93, |
|
"learning_rate": 4.809e-05, |
|
"loss": 0.122, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 9.95, |
|
"learning_rate": 4.8060000000000004e-05, |
|
"loss": 0.1681, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 9.97, |
|
"learning_rate": 4.803e-05, |
|
"loss": 0.281, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 4.8e-05, |
|
"loss": 0.4478, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 0.19183433055877686, |
|
"eval_mean_accuracy": 0.8839528075533328, |
|
"eval_mean_iou": 0.5615844872126089, |
|
"eval_overall_accuracy": 0.9199415278173876, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9491513106292899, |
|
0.8187543044773757 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9064106123096447, |
|
0.7783428493281822 |
|
], |
|
"eval_runtime": 49.2471, |
|
"eval_samples_per_second": 0.406, |
|
"eval_steps_per_second": 0.203, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 10.03, |
|
"learning_rate": 4.797e-05, |
|
"loss": 0.2364, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 10.05, |
|
"learning_rate": 4.7940000000000005e-05, |
|
"loss": 0.186, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 10.07, |
|
"learning_rate": 4.791e-05, |
|
"loss": 0.5067, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 10.1, |
|
"learning_rate": 4.788e-05, |
|
"loss": 0.2468, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 10.12, |
|
"learning_rate": 4.785e-05, |
|
"loss": 0.2668, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 10.15, |
|
"learning_rate": 4.7820000000000006e-05, |
|
"loss": 0.2371, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 10.18, |
|
"learning_rate": 4.779e-05, |
|
"loss": 0.3081, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 10.2, |
|
"learning_rate": 4.7760000000000004e-05, |
|
"loss": 0.1102, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 10.22, |
|
"learning_rate": 4.773e-05, |
|
"loss": 0.4816, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 10.25, |
|
"learning_rate": 4.77e-05, |
|
"loss": 0.2743, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 10.28, |
|
"learning_rate": 4.767e-05, |
|
"loss": 0.3725, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 10.3, |
|
"learning_rate": 4.7640000000000005e-05, |
|
"loss": 0.2469, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 10.32, |
|
"learning_rate": 4.761e-05, |
|
"loss": 0.2175, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 10.35, |
|
"learning_rate": 4.758e-05, |
|
"loss": 0.3623, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 10.38, |
|
"learning_rate": 4.755e-05, |
|
"loss": 0.3651, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 10.4, |
|
"learning_rate": 4.7520000000000006e-05, |
|
"loss": 0.2813, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 10.43, |
|
"learning_rate": 4.749e-05, |
|
"loss": 0.1988, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 10.45, |
|
"learning_rate": 4.746e-05, |
|
"loss": 0.0948, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 10.47, |
|
"learning_rate": 4.743e-05, |
|
"loss": 0.1196, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 4.74e-05, |
|
"loss": 0.2322, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"eval_loss": 0.18487034738063812, |
|
"eval_mean_accuracy": 0.9393724907811466, |
|
"eval_mean_iou": 0.5821265257663112, |
|
"eval_overall_accuracy": 0.9355605611653833, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9324666574268301, |
|
0.9462783241354631 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9208515742743779, |
|
0.8255280030245556 |
|
], |
|
"eval_runtime": 48.2088, |
|
"eval_samples_per_second": 0.415, |
|
"eval_steps_per_second": 0.207, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 10.53, |
|
"learning_rate": 4.737e-05, |
|
"loss": 0.1669, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 10.55, |
|
"learning_rate": 4.7340000000000004e-05, |
|
"loss": 0.6335, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 10.57, |
|
"learning_rate": 4.731e-05, |
|
"loss": 0.1626, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 10.6, |
|
"learning_rate": 4.728e-05, |
|
"loss": 0.5899, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 10.62, |
|
"learning_rate": 4.7249999999999997e-05, |
|
"loss": 0.236, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 10.65, |
|
"learning_rate": 4.7220000000000005e-05, |
|
"loss": 0.2995, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 10.68, |
|
"learning_rate": 4.719e-05, |
|
"loss": 0.232, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 10.7, |
|
"learning_rate": 4.716e-05, |
|
"loss": 0.1955, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 10.72, |
|
"learning_rate": 4.713e-05, |
|
"loss": 0.2382, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 10.75, |
|
"learning_rate": 4.7100000000000006e-05, |
|
"loss": 0.1718, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 10.78, |
|
"learning_rate": 4.707e-05, |
|
"loss": 0.1884, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 10.8, |
|
"learning_rate": 4.7040000000000004e-05, |
|
"loss": 1.017, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 10.82, |
|
"learning_rate": 4.701e-05, |
|
"loss": 0.1411, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 10.85, |
|
"learning_rate": 4.698e-05, |
|
"loss": 0.2267, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 10.88, |
|
"learning_rate": 4.695e-05, |
|
"loss": 0.2418, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 10.9, |
|
"learning_rate": 4.6920000000000005e-05, |
|
"loss": 0.3928, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 10.93, |
|
"learning_rate": 4.689e-05, |
|
"loss": 0.2357, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 10.95, |
|
"learning_rate": 4.686e-05, |
|
"loss": 0.1766, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 10.97, |
|
"learning_rate": 4.683e-05, |
|
"loss": 0.2089, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 4.6800000000000006e-05, |
|
"loss": 0.1698, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 0.19902649521827698, |
|
"eval_mean_accuracy": 0.9215499899486547, |
|
"eval_mean_iou": 0.5553175130512749, |
|
"eval_overall_accuracy": 0.8984438568760805, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.8796900605832205, |
|
0.9634099193140888 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8726210024783129, |
|
0.7933315366755117 |
|
], |
|
"eval_runtime": 49.031, |
|
"eval_samples_per_second": 0.408, |
|
"eval_steps_per_second": 0.204, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 11.03, |
|
"learning_rate": 4.677e-05, |
|
"loss": 0.533, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 11.05, |
|
"learning_rate": 4.674e-05, |
|
"loss": 0.6826, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 11.07, |
|
"learning_rate": 4.671e-05, |
|
"loss": 0.1957, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 11.1, |
|
"learning_rate": 4.668e-05, |
|
"loss": 0.1768, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 11.12, |
|
"learning_rate": 4.665e-05, |
|
"loss": 0.6004, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 11.15, |
|
"learning_rate": 4.6620000000000004e-05, |
|
"loss": 0.314, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 11.18, |
|
"learning_rate": 4.659e-05, |
|
"loss": 1.1865, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 11.2, |
|
"learning_rate": 4.656e-05, |
|
"loss": 0.0892, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 11.22, |
|
"learning_rate": 4.653e-05, |
|
"loss": 0.1509, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 11.25, |
|
"learning_rate": 4.6500000000000005e-05, |
|
"loss": 0.3381, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 11.28, |
|
"learning_rate": 4.647e-05, |
|
"loss": 1.3277, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 11.3, |
|
"learning_rate": 4.644e-05, |
|
"loss": 0.1961, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 11.32, |
|
"learning_rate": 4.641e-05, |
|
"loss": 0.2051, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 11.35, |
|
"learning_rate": 4.638e-05, |
|
"loss": 0.2301, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 11.38, |
|
"learning_rate": 4.635e-05, |
|
"loss": 0.2205, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 11.4, |
|
"learning_rate": 4.6320000000000004e-05, |
|
"loss": 0.1577, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 11.43, |
|
"learning_rate": 4.629e-05, |
|
"loss": 0.264, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 11.45, |
|
"learning_rate": 4.626e-05, |
|
"loss": 0.223, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 11.47, |
|
"learning_rate": 4.6229999999999996e-05, |
|
"loss": 0.3168, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 4.6200000000000005e-05, |
|
"loss": 0.2747, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"eval_loss": 0.1631600707769394, |
|
"eval_mean_accuracy": 0.9303702950626429, |
|
"eval_mean_iou": 0.5869421148668948, |
|
"eval_overall_accuracy": 0.9340532388497624, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9370424528962994, |
|
0.9236981372289865 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9195581410749489, |
|
0.8412682035257355 |
|
], |
|
"eval_runtime": 48.2511, |
|
"eval_samples_per_second": 0.414, |
|
"eval_steps_per_second": 0.207, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 11.53, |
|
"learning_rate": 4.617e-05, |
|
"loss": 0.2376, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 11.55, |
|
"learning_rate": 4.614e-05, |
|
"loss": 0.3034, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 11.57, |
|
"learning_rate": 4.611e-05, |
|
"loss": 0.0951, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 11.6, |
|
"learning_rate": 4.608e-05, |
|
"loss": 0.3745, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 11.62, |
|
"learning_rate": 4.605e-05, |
|
"loss": 0.1535, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 11.65, |
|
"learning_rate": 4.602e-05, |
|
"loss": 0.2144, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 11.68, |
|
"learning_rate": 4.599e-05, |
|
"loss": 0.2269, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 11.7, |
|
"learning_rate": 4.596e-05, |
|
"loss": 0.2681, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 11.72, |
|
"learning_rate": 4.5929999999999996e-05, |
|
"loss": 0.2469, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 11.75, |
|
"learning_rate": 4.5900000000000004e-05, |
|
"loss": 0.1947, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 11.78, |
|
"learning_rate": 4.587e-05, |
|
"loss": 0.2222, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 11.8, |
|
"learning_rate": 4.584e-05, |
|
"loss": 0.2049, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 11.82, |
|
"learning_rate": 4.581e-05, |
|
"loss": 0.1804, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 11.85, |
|
"learning_rate": 4.578e-05, |
|
"loss": 0.1954, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 11.88, |
|
"learning_rate": 4.575e-05, |
|
"loss": 0.2368, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 11.9, |
|
"learning_rate": 4.572e-05, |
|
"loss": 0.3448, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 11.93, |
|
"learning_rate": 4.569e-05, |
|
"loss": 0.3405, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 11.95, |
|
"learning_rate": 4.566e-05, |
|
"loss": 0.1396, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 11.97, |
|
"learning_rate": 4.5629999999999995e-05, |
|
"loss": 0.2008, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 4.5600000000000004e-05, |
|
"loss": 0.4617, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 0.19759610295295715, |
|
"eval_mean_accuracy": 0.9295030231770169, |
|
"eval_mean_iou": 0.5369163642640026, |
|
"eval_overall_accuracy": 0.8968608059139239, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.8703671693033942, |
|
0.9886388770506396 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8686163566323581, |
|
0.7421327361596495 |
|
], |
|
"eval_runtime": 48.2743, |
|
"eval_samples_per_second": 0.414, |
|
"eval_steps_per_second": 0.207, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 12.03, |
|
"learning_rate": 4.557e-05, |
|
"loss": 0.172, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 12.05, |
|
"learning_rate": 4.554e-05, |
|
"loss": 0.1316, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 12.07, |
|
"learning_rate": 4.5509999999999996e-05, |
|
"loss": 0.2638, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 12.1, |
|
"learning_rate": 4.548e-05, |
|
"loss": 0.4121, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 12.12, |
|
"learning_rate": 4.545e-05, |
|
"loss": 0.9736, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 12.15, |
|
"learning_rate": 4.542e-05, |
|
"loss": 0.2648, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 12.18, |
|
"learning_rate": 4.539e-05, |
|
"loss": 0.2924, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 12.2, |
|
"learning_rate": 4.536e-05, |
|
"loss": 0.0925, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 12.22, |
|
"learning_rate": 4.533e-05, |
|
"loss": 0.3587, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 12.25, |
|
"learning_rate": 4.53e-05, |
|
"loss": 0.4169, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 12.28, |
|
"learning_rate": 4.527e-05, |
|
"loss": 0.1362, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 12.3, |
|
"learning_rate": 4.524e-05, |
|
"loss": 0.8605, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 12.32, |
|
"learning_rate": 4.5209999999999996e-05, |
|
"loss": 0.2946, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 12.35, |
|
"learning_rate": 4.5180000000000004e-05, |
|
"loss": 0.5967, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 12.38, |
|
"learning_rate": 4.515e-05, |
|
"loss": 0.0964, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 12.4, |
|
"learning_rate": 4.512e-05, |
|
"loss": 0.2239, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 12.43, |
|
"learning_rate": 4.509e-05, |
|
"loss": 0.1887, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 12.45, |
|
"learning_rate": 4.506e-05, |
|
"loss": 0.2777, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 12.47, |
|
"learning_rate": 4.503e-05, |
|
"loss": 0.2711, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.2906, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"eval_loss": 0.19325587153434753, |
|
"eval_mean_accuracy": 0.9273299026552342, |
|
"eval_mean_iou": 0.5883427062855762, |
|
"eval_overall_accuracy": 0.9495417611260688, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.96756973091831, |
|
0.8870900743921584 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.939462057439274, |
|
0.8255660614174546 |
|
], |
|
"eval_runtime": 48.3895, |
|
"eval_samples_per_second": 0.413, |
|
"eval_steps_per_second": 0.207, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 12.53, |
|
"learning_rate": 4.4970000000000005e-05, |
|
"loss": 0.1969, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 12.55, |
|
"learning_rate": 4.494e-05, |
|
"loss": 0.1268, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 12.57, |
|
"learning_rate": 4.491e-05, |
|
"loss": 0.1312, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 12.6, |
|
"learning_rate": 4.4880000000000004e-05, |
|
"loss": 0.9626, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 12.62, |
|
"learning_rate": 4.4850000000000006e-05, |
|
"loss": 0.2582, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 12.65, |
|
"learning_rate": 4.482e-05, |
|
"loss": 0.2529, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 12.68, |
|
"learning_rate": 4.479e-05, |
|
"loss": 0.1435, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 12.7, |
|
"learning_rate": 4.476e-05, |
|
"loss": 0.2151, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 12.72, |
|
"learning_rate": 4.473000000000001e-05, |
|
"loss": 0.3817, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 12.75, |
|
"learning_rate": 4.47e-05, |
|
"loss": 0.6182, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 12.78, |
|
"learning_rate": 4.4670000000000004e-05, |
|
"loss": 0.1625, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 12.8, |
|
"learning_rate": 4.464e-05, |
|
"loss": 0.2269, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 12.82, |
|
"learning_rate": 4.461e-05, |
|
"loss": 0.1072, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 12.85, |
|
"learning_rate": 4.4580000000000003e-05, |
|
"loss": 0.1614, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 12.88, |
|
"learning_rate": 4.4550000000000005e-05, |
|
"loss": 0.2633, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 12.9, |
|
"learning_rate": 4.452e-05, |
|
"loss": 0.0781, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 12.93, |
|
"learning_rate": 4.449e-05, |
|
"loss": 0.2842, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 12.95, |
|
"learning_rate": 4.446e-05, |
|
"loss": 0.1952, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 12.97, |
|
"learning_rate": 4.4430000000000007e-05, |
|
"loss": 0.2178, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 4.44e-05, |
|
"loss": 0.143, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_loss": 0.16903528571128845, |
|
"eval_mean_accuracy": 0.9235514768344331, |
|
"eval_mean_iou": 0.5737554191549884, |
|
"eval_overall_accuracy": 0.9139972318762819, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9062426516046949, |
|
0.9408603020641712 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8926643773418198, |
|
0.8286018801231455 |
|
], |
|
"eval_runtime": 48.337, |
|
"eval_samples_per_second": 0.414, |
|
"eval_steps_per_second": 0.207, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 13.03, |
|
"learning_rate": 4.4370000000000004e-05, |
|
"loss": 0.2364, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 13.05, |
|
"learning_rate": 4.434e-05, |
|
"loss": 0.146, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 13.07, |
|
"learning_rate": 4.431e-05, |
|
"loss": 0.5023, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 13.1, |
|
"learning_rate": 4.428e-05, |
|
"loss": 0.2974, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 13.12, |
|
"learning_rate": 4.4250000000000005e-05, |
|
"loss": 0.1238, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 13.15, |
|
"learning_rate": 4.422e-05, |
|
"loss": 0.096, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 13.18, |
|
"learning_rate": 4.419e-05, |
|
"loss": 0.2125, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 13.2, |
|
"learning_rate": 4.416e-05, |
|
"loss": 0.167, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 13.22, |
|
"learning_rate": 4.4130000000000006e-05, |
|
"loss": 0.0964, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 13.25, |
|
"learning_rate": 4.41e-05, |
|
"loss": 0.4862, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 13.28, |
|
"learning_rate": 4.407e-05, |
|
"loss": 0.197, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 13.3, |
|
"learning_rate": 4.404e-05, |
|
"loss": 0.2174, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 13.32, |
|
"learning_rate": 4.401e-05, |
|
"loss": 0.165, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 13.35, |
|
"learning_rate": 4.398e-05, |
|
"loss": 0.2082, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 13.38, |
|
"learning_rate": 4.3950000000000004e-05, |
|
"loss": 0.2521, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 13.4, |
|
"learning_rate": 4.392e-05, |
|
"loss": 0.2875, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 13.43, |
|
"learning_rate": 4.389e-05, |
|
"loss": 0.2588, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 13.45, |
|
"learning_rate": 4.386e-05, |
|
"loss": 0.4512, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 13.47, |
|
"learning_rate": 4.3830000000000006e-05, |
|
"loss": 0.093, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 4.38e-05, |
|
"loss": 0.226, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"eval_loss": 0.1501137614250183, |
|
"eval_mean_accuracy": 0.9168172988787836, |
|
"eval_mean_iou": 0.5812350863104522, |
|
"eval_overall_accuracy": 0.9313205625832813, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9430919503893449, |
|
0.8905426473682223 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9173144580162539, |
|
0.8263908009151028 |
|
], |
|
"eval_runtime": 48.653, |
|
"eval_samples_per_second": 0.411, |
|
"eval_steps_per_second": 0.206, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 13.53, |
|
"learning_rate": 4.377e-05, |
|
"loss": 0.1305, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 13.55, |
|
"learning_rate": 4.374e-05, |
|
"loss": 0.0774, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 13.57, |
|
"learning_rate": 4.371e-05, |
|
"loss": 0.6389, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 13.6, |
|
"learning_rate": 4.368e-05, |
|
"loss": 0.2436, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 13.62, |
|
"learning_rate": 4.3650000000000004e-05, |
|
"loss": 0.331, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 13.65, |
|
"learning_rate": 4.362e-05, |
|
"loss": 0.2941, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 13.68, |
|
"learning_rate": 4.359e-05, |
|
"loss": 1.1999, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 13.7, |
|
"learning_rate": 4.356e-05, |
|
"loss": 0.3663, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 13.72, |
|
"learning_rate": 4.3530000000000005e-05, |
|
"loss": 0.7002, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 13.75, |
|
"learning_rate": 4.35e-05, |
|
"loss": 0.2695, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 13.78, |
|
"learning_rate": 4.347e-05, |
|
"loss": 0.0617, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 13.8, |
|
"learning_rate": 4.344e-05, |
|
"loss": 0.1818, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 13.82, |
|
"learning_rate": 4.3410000000000006e-05, |
|
"loss": 0.2036, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 13.85, |
|
"learning_rate": 4.338e-05, |
|
"loss": 0.1806, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 13.88, |
|
"learning_rate": 4.335e-05, |
|
"loss": 0.2204, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 13.9, |
|
"learning_rate": 4.332e-05, |
|
"loss": 0.0898, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 13.93, |
|
"learning_rate": 4.329e-05, |
|
"loss": 0.1312, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 13.95, |
|
"learning_rate": 4.326e-05, |
|
"loss": 0.2342, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 13.97, |
|
"learning_rate": 4.3230000000000005e-05, |
|
"loss": 0.3738, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 4.32e-05, |
|
"loss": 0.4826, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_loss": 0.22021767497062683, |
|
"eval_mean_accuracy": 0.9476397824599877, |
|
"eval_mean_iou": 0.590360191297902, |
|
"eval_overall_accuracy": 0.9489838665757923, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9500747751942572, |
|
0.9452047897257182 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9366055231315997, |
|
0.8344750507621063 |
|
], |
|
"eval_runtime": 48.3176, |
|
"eval_samples_per_second": 0.414, |
|
"eval_steps_per_second": 0.207, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 14.03, |
|
"learning_rate": 4.317e-05, |
|
"loss": 0.659, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 14.05, |
|
"learning_rate": 4.314e-05, |
|
"loss": 0.7442, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 14.07, |
|
"learning_rate": 4.3110000000000006e-05, |
|
"loss": 0.2401, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 14.1, |
|
"learning_rate": 4.308e-05, |
|
"loss": 0.1112, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 14.12, |
|
"learning_rate": 4.305e-05, |
|
"loss": 1.1314, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 14.15, |
|
"learning_rate": 4.302e-05, |
|
"loss": 0.2401, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 14.18, |
|
"learning_rate": 4.299e-05, |
|
"loss": 0.1054, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 14.2, |
|
"learning_rate": 4.296e-05, |
|
"loss": 0.0728, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 14.22, |
|
"learning_rate": 4.2930000000000004e-05, |
|
"loss": 0.2716, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 14.25, |
|
"learning_rate": 4.29e-05, |
|
"loss": 0.1054, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 14.28, |
|
"learning_rate": 4.287e-05, |
|
"loss": 0.2411, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 14.3, |
|
"learning_rate": 4.2839999999999996e-05, |
|
"loss": 0.2392, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 14.32, |
|
"learning_rate": 4.2810000000000005e-05, |
|
"loss": 0.331, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 14.35, |
|
"learning_rate": 4.278e-05, |
|
"loss": 0.1585, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 14.38, |
|
"learning_rate": 4.275e-05, |
|
"loss": 0.1978, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 14.4, |
|
"learning_rate": 4.272e-05, |
|
"loss": 0.1201, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 14.43, |
|
"learning_rate": 4.269e-05, |
|
"loss": 0.1685, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 14.45, |
|
"learning_rate": 4.266e-05, |
|
"loss": 0.2495, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 14.47, |
|
"learning_rate": 4.2630000000000004e-05, |
|
"loss": 0.1228, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 4.26e-05, |
|
"loss": 0.0776, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"eval_loss": 0.20236675441265106, |
|
"eval_mean_accuracy": 0.8875189182338821, |
|
"eval_mean_iou": 0.5520159873990785, |
|
"eval_overall_accuracy": 0.8935424410726399, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.8984313560602167, |
|
0.8766064804075475 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.87157656518941, |
|
0.7844713970078256 |
|
], |
|
"eval_runtime": 48.5407, |
|
"eval_samples_per_second": 0.412, |
|
"eval_steps_per_second": 0.206, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 14.53, |
|
"learning_rate": 4.257e-05, |
|
"loss": 0.1925, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 14.55, |
|
"learning_rate": 4.2539999999999996e-05, |
|
"loss": 0.7366, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 14.57, |
|
"learning_rate": 4.2510000000000005e-05, |
|
"loss": 0.1727, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 14.6, |
|
"learning_rate": 4.248e-05, |
|
"loss": 0.2376, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 14.62, |
|
"learning_rate": 4.245e-05, |
|
"loss": 0.1822, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 14.65, |
|
"learning_rate": 4.242e-05, |
|
"loss": 0.1792, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 14.68, |
|
"learning_rate": 4.239e-05, |
|
"loss": 0.6131, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 14.7, |
|
"learning_rate": 4.236e-05, |
|
"loss": 1.037, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 14.72, |
|
"learning_rate": 4.233e-05, |
|
"loss": 0.4163, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 14.75, |
|
"learning_rate": 4.23e-05, |
|
"loss": 0.2271, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 14.78, |
|
"learning_rate": 4.227e-05, |
|
"loss": 0.2849, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 14.8, |
|
"learning_rate": 4.2239999999999995e-05, |
|
"loss": 0.26, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 14.82, |
|
"learning_rate": 4.2210000000000004e-05, |
|
"loss": 0.08, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 14.85, |
|
"learning_rate": 4.218e-05, |
|
"loss": 1.0104, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 14.88, |
|
"learning_rate": 4.215e-05, |
|
"loss": 0.3434, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 14.9, |
|
"learning_rate": 4.2119999999999997e-05, |
|
"loss": 0.2438, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 14.93, |
|
"learning_rate": 4.209e-05, |
|
"loss": 0.3231, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 14.95, |
|
"learning_rate": 4.206e-05, |
|
"loss": 0.2779, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 14.97, |
|
"learning_rate": 4.203e-05, |
|
"loss": 0.1847, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 4.2e-05, |
|
"loss": 0.2478, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_loss": 0.31428685784339905, |
|
"eval_mean_accuracy": 0.9244168410195777, |
|
"eval_mean_iou": 0.5165786682460257, |
|
"eval_overall_accuracy": 0.8922635131324752, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.8661666773080515, |
|
0.9826670047311039 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8628620362091799, |
|
0.6868739685288975 |
|
], |
|
"eval_runtime": 49.0378, |
|
"eval_samples_per_second": 0.408, |
|
"eval_steps_per_second": 0.204, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 15.03, |
|
"learning_rate": 4.197e-05, |
|
"loss": 0.0973, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 15.05, |
|
"learning_rate": 4.194e-05, |
|
"loss": 0.125, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 15.07, |
|
"learning_rate": 4.1910000000000004e-05, |
|
"loss": 0.0719, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 15.1, |
|
"learning_rate": 4.188e-05, |
|
"loss": 0.535, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 15.12, |
|
"learning_rate": 4.185e-05, |
|
"loss": 0.6612, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 15.15, |
|
"learning_rate": 4.1819999999999996e-05, |
|
"loss": 0.0919, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 15.18, |
|
"learning_rate": 4.1790000000000005e-05, |
|
"loss": 0.2264, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 15.2, |
|
"learning_rate": 4.176e-05, |
|
"loss": 0.124, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 15.22, |
|
"learning_rate": 4.173e-05, |
|
"loss": 1.0385, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 15.25, |
|
"learning_rate": 4.17e-05, |
|
"loss": 0.08, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 15.28, |
|
"learning_rate": 4.167e-05, |
|
"loss": 0.1723, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 15.3, |
|
"learning_rate": 4.164e-05, |
|
"loss": 0.2141, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 15.32, |
|
"learning_rate": 4.161e-05, |
|
"loss": 0.0843, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 15.35, |
|
"learning_rate": 4.158e-05, |
|
"loss": 0.3209, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 15.38, |
|
"learning_rate": 4.155e-05, |
|
"loss": 0.1918, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 15.4, |
|
"learning_rate": 4.1519999999999995e-05, |
|
"loss": 0.2061, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 15.43, |
|
"learning_rate": 4.1490000000000004e-05, |
|
"loss": 0.1689, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 15.45, |
|
"learning_rate": 4.146e-05, |
|
"loss": 0.0737, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 15.47, |
|
"learning_rate": 4.143e-05, |
|
"loss": 0.0635, |
|
"step": 619 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 4.14e-05, |
|
"loss": 0.2655, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"eval_loss": 0.19317014515399933, |
|
"eval_mean_accuracy": 0.8589319719879445, |
|
"eval_mean_iou": 0.5429208488927418, |
|
"eval_overall_accuracy": 0.8903078611168848, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9157736987532968, |
|
0.8020902452225921 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8719395715980975, |
|
0.7568229750801281 |
|
], |
|
"eval_runtime": 48.5099, |
|
"eval_samples_per_second": 0.412, |
|
"eval_steps_per_second": 0.206, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 15.53, |
|
"learning_rate": 4.137e-05, |
|
"loss": 1.1966, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 15.55, |
|
"learning_rate": 4.134e-05, |
|
"loss": 0.41, |
|
"step": 622 |
|
}, |
|
{ |
|
"epoch": 15.57, |
|
"learning_rate": 4.131e-05, |
|
"loss": 0.3119, |
|
"step": 623 |
|
}, |
|
{ |
|
"epoch": 15.6, |
|
"learning_rate": 4.128e-05, |
|
"loss": 0.4695, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 15.62, |
|
"learning_rate": 4.125e-05, |
|
"loss": 0.2726, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 15.65, |
|
"learning_rate": 4.122e-05, |
|
"loss": 0.4765, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 15.68, |
|
"learning_rate": 4.1190000000000004e-05, |
|
"loss": 0.1646, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 15.7, |
|
"learning_rate": 4.1160000000000006e-05, |
|
"loss": 0.1446, |
|
"step": 628 |
|
}, |
|
{ |
|
"epoch": 15.72, |
|
"learning_rate": 4.113e-05, |
|
"loss": 0.1289, |
|
"step": 629 |
|
}, |
|
{ |
|
"epoch": 15.75, |
|
"learning_rate": 4.11e-05, |
|
"loss": 0.2353, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 15.78, |
|
"learning_rate": 4.107e-05, |
|
"loss": 0.109, |
|
"step": 631 |
|
}, |
|
{ |
|
"epoch": 15.8, |
|
"learning_rate": 4.104000000000001e-05, |
|
"loss": 0.167, |
|
"step": 632 |
|
}, |
|
{ |
|
"epoch": 15.82, |
|
"learning_rate": 4.101e-05, |
|
"loss": 0.307, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 15.85, |
|
"learning_rate": 4.0980000000000004e-05, |
|
"loss": 0.1727, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 15.88, |
|
"learning_rate": 4.095e-05, |
|
"loss": 0.1382, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 15.9, |
|
"learning_rate": 4.092e-05, |
|
"loss": 0.3089, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 15.93, |
|
"learning_rate": 4.089e-05, |
|
"loss": 0.138, |
|
"step": 637 |
|
}, |
|
{ |
|
"epoch": 15.95, |
|
"learning_rate": 4.0860000000000005e-05, |
|
"loss": 0.4216, |
|
"step": 638 |
|
}, |
|
{ |
|
"epoch": 15.97, |
|
"learning_rate": 4.083e-05, |
|
"loss": 0.1724, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 4.08e-05, |
|
"loss": 0.176, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_loss": 0.1851038783788681, |
|
"eval_mean_accuracy": 0.9435937525042573, |
|
"eval_mean_iou": 0.5721625613288966, |
|
"eval_overall_accuracy": 0.9333205239133342, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9249823898058648, |
|
0.9622051152026498 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9165148934878891, |
|
0.799972790498801 |
|
], |
|
"eval_runtime": 48.6944, |
|
"eval_samples_per_second": 0.411, |
|
"eval_steps_per_second": 0.205, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 16.02, |
|
"learning_rate": 4.077e-05, |
|
"loss": 0.1323, |
|
"step": 641 |
|
}, |
|
{ |
|
"epoch": 16.05, |
|
"learning_rate": 4.0740000000000006e-05, |
|
"loss": 0.1133, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 16.07, |
|
"learning_rate": 4.071e-05, |
|
"loss": 0.1772, |
|
"step": 643 |
|
}, |
|
{ |
|
"epoch": 16.1, |
|
"learning_rate": 4.0680000000000004e-05, |
|
"loss": 0.1973, |
|
"step": 644 |
|
}, |
|
{ |
|
"epoch": 16.12, |
|
"learning_rate": 4.065e-05, |
|
"loss": 0.1182, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 16.15, |
|
"learning_rate": 4.062e-05, |
|
"loss": 0.1707, |
|
"step": 646 |
|
}, |
|
{ |
|
"epoch": 16.18, |
|
"learning_rate": 4.059e-05, |
|
"loss": 0.1126, |
|
"step": 647 |
|
}, |
|
{ |
|
"epoch": 16.2, |
|
"learning_rate": 4.0560000000000005e-05, |
|
"loss": 0.1887, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 16.23, |
|
"learning_rate": 4.053e-05, |
|
"loss": 0.0731, |
|
"step": 649 |
|
}, |
|
{ |
|
"epoch": 16.25, |
|
"learning_rate": 4.05e-05, |
|
"loss": 0.2288, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 16.27, |
|
"learning_rate": 4.047e-05, |
|
"loss": 0.0975, |
|
"step": 651 |
|
}, |
|
{ |
|
"epoch": 16.3, |
|
"learning_rate": 4.0440000000000006e-05, |
|
"loss": 0.6953, |
|
"step": 652 |
|
}, |
|
{ |
|
"epoch": 16.32, |
|
"learning_rate": 4.041e-05, |
|
"loss": 0.3601, |
|
"step": 653 |
|
}, |
|
{ |
|
"epoch": 16.35, |
|
"learning_rate": 4.038e-05, |
|
"loss": 0.3311, |
|
"step": 654 |
|
}, |
|
{ |
|
"epoch": 16.38, |
|
"learning_rate": 4.035e-05, |
|
"loss": 0.1751, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 16.4, |
|
"learning_rate": 4.032e-05, |
|
"loss": 0.0658, |
|
"step": 656 |
|
}, |
|
{ |
|
"epoch": 16.43, |
|
"learning_rate": 4.029e-05, |
|
"loss": 0.0907, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 16.45, |
|
"learning_rate": 4.0260000000000004e-05, |
|
"loss": 0.0836, |
|
"step": 658 |
|
}, |
|
{ |
|
"epoch": 16.48, |
|
"learning_rate": 4.023e-05, |
|
"loss": 0.2174, |
|
"step": 659 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 4.02e-05, |
|
"loss": 1.2637, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"eval_loss": 0.17593476176261902, |
|
"eval_mean_accuracy": 0.9422583491499954, |
|
"eval_mean_iou": 0.5795477089728069, |
|
"eval_overall_accuracy": 0.9374328311073945, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9335162613337237, |
|
0.9510004369662672 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9226226113759224, |
|
0.8160205155424984 |
|
], |
|
"eval_runtime": 48.2239, |
|
"eval_samples_per_second": 0.415, |
|
"eval_steps_per_second": 0.207, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 16.52, |
|
"learning_rate": 4.017e-05, |
|
"loss": 0.527, |
|
"step": 661 |
|
}, |
|
{ |
|
"epoch": 16.55, |
|
"learning_rate": 4.0140000000000005e-05, |
|
"loss": 0.098, |
|
"step": 662 |
|
}, |
|
{ |
|
"epoch": 16.57, |
|
"learning_rate": 4.011e-05, |
|
"loss": 0.4374, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 16.6, |
|
"learning_rate": 4.008e-05, |
|
"loss": 0.138, |
|
"step": 664 |
|
}, |
|
{ |
|
"epoch": 16.62, |
|
"learning_rate": 4.005e-05, |
|
"loss": 0.1325, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 16.65, |
|
"learning_rate": 4.0020000000000006e-05, |
|
"loss": 0.0577, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 16.68, |
|
"learning_rate": 3.999e-05, |
|
"loss": 0.338, |
|
"step": 667 |
|
}, |
|
{ |
|
"epoch": 16.7, |
|
"learning_rate": 3.9960000000000004e-05, |
|
"loss": 0.1206, |
|
"step": 668 |
|
}, |
|
{ |
|
"epoch": 16.73, |
|
"learning_rate": 3.993e-05, |
|
"loss": 0.2343, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 16.75, |
|
"learning_rate": 3.99e-05, |
|
"loss": 0.1541, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 16.77, |
|
"learning_rate": 3.987e-05, |
|
"loss": 0.2382, |
|
"step": 671 |
|
}, |
|
{ |
|
"epoch": 16.8, |
|
"learning_rate": 3.9840000000000005e-05, |
|
"loss": 0.0979, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 16.82, |
|
"learning_rate": 3.981e-05, |
|
"loss": 0.2911, |
|
"step": 673 |
|
}, |
|
{ |
|
"epoch": 16.85, |
|
"learning_rate": 3.978e-05, |
|
"loss": 0.1072, |
|
"step": 674 |
|
}, |
|
{ |
|
"epoch": 16.88, |
|
"learning_rate": 3.975e-05, |
|
"loss": 0.0793, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 16.9, |
|
"learning_rate": 3.9720000000000006e-05, |
|
"loss": 0.1476, |
|
"step": 676 |
|
}, |
|
{ |
|
"epoch": 16.93, |
|
"learning_rate": 3.969e-05, |
|
"loss": 0.2238, |
|
"step": 677 |
|
}, |
|
{ |
|
"epoch": 16.95, |
|
"learning_rate": 3.966e-05, |
|
"loss": 0.2254, |
|
"step": 678 |
|
}, |
|
{ |
|
"epoch": 16.98, |
|
"learning_rate": 3.963e-05, |
|
"loss": 0.2471, |
|
"step": 679 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 3.96e-05, |
|
"loss": 0.3748, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_loss": 0.1429952085018158, |
|
"eval_mean_accuracy": 0.9260606241904736, |
|
"eval_mean_iou": 0.5748151114628661, |
|
"eval_overall_accuracy": 0.9240236241152235, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9223703191532987, |
|
0.9297509292276487 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9080772684274999, |
|
0.8163680659610985 |
|
], |
|
"eval_runtime": 47.4552, |
|
"eval_samples_per_second": 0.421, |
|
"eval_steps_per_second": 0.211, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 17.02, |
|
"learning_rate": 3.957e-05, |
|
"loss": 0.3839, |
|
"step": 681 |
|
}, |
|
{ |
|
"epoch": 17.05, |
|
"learning_rate": 3.9540000000000004e-05, |
|
"loss": 0.0602, |
|
"step": 682 |
|
}, |
|
{ |
|
"epoch": 17.07, |
|
"learning_rate": 3.951e-05, |
|
"loss": 0.0754, |
|
"step": 683 |
|
}, |
|
{ |
|
"epoch": 17.1, |
|
"learning_rate": 3.948e-05, |
|
"loss": 0.1139, |
|
"step": 684 |
|
}, |
|
{ |
|
"epoch": 17.12, |
|
"learning_rate": 3.945e-05, |
|
"loss": 0.2653, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 17.15, |
|
"learning_rate": 3.9420000000000005e-05, |
|
"loss": 0.2246, |
|
"step": 686 |
|
}, |
|
{ |
|
"epoch": 17.18, |
|
"learning_rate": 3.939e-05, |
|
"loss": 0.0618, |
|
"step": 687 |
|
}, |
|
{ |
|
"epoch": 17.2, |
|
"learning_rate": 3.936e-05, |
|
"loss": 0.0635, |
|
"step": 688 |
|
}, |
|
{ |
|
"epoch": 17.23, |
|
"learning_rate": 3.933e-05, |
|
"loss": 0.0675, |
|
"step": 689 |
|
}, |
|
{ |
|
"epoch": 17.25, |
|
"learning_rate": 3.93e-05, |
|
"loss": 0.0761, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 17.27, |
|
"learning_rate": 3.927e-05, |
|
"loss": 0.0886, |
|
"step": 691 |
|
}, |
|
{ |
|
"epoch": 17.3, |
|
"learning_rate": 3.9240000000000004e-05, |
|
"loss": 0.1016, |
|
"step": 692 |
|
}, |
|
{ |
|
"epoch": 17.32, |
|
"learning_rate": 3.921e-05, |
|
"loss": 0.4037, |
|
"step": 693 |
|
}, |
|
{ |
|
"epoch": 17.35, |
|
"learning_rate": 3.918e-05, |
|
"loss": 0.2332, |
|
"step": 694 |
|
}, |
|
{ |
|
"epoch": 17.38, |
|
"learning_rate": 3.9149999999999996e-05, |
|
"loss": 0.1594, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 17.4, |
|
"learning_rate": 3.9120000000000005e-05, |
|
"loss": 0.4753, |
|
"step": 696 |
|
}, |
|
{ |
|
"epoch": 17.43, |
|
"learning_rate": 3.909e-05, |
|
"loss": 0.382, |
|
"step": 697 |
|
}, |
|
{ |
|
"epoch": 17.45, |
|
"learning_rate": 3.906e-05, |
|
"loss": 0.5817, |
|
"step": 698 |
|
}, |
|
{ |
|
"epoch": 17.48, |
|
"learning_rate": 3.903e-05, |
|
"loss": 0.1298, |
|
"step": 699 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 3.9e-05, |
|
"loss": 0.9875, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"eval_loss": 0.16197028756141663, |
|
"eval_mean_accuracy": 0.9293318312587986, |
|
"eval_mean_iou": 0.563817892491426, |
|
"eval_overall_accuracy": 0.9149937886397362, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9033565004067086, |
|
0.9553071621108887 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8939333699071477, |
|
0.7975203075671302 |
|
], |
|
"eval_runtime": 48.691, |
|
"eval_samples_per_second": 0.411, |
|
"eval_steps_per_second": 0.205, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 17.52, |
|
"learning_rate": 3.897e-05, |
|
"loss": 0.1113, |
|
"step": 701 |
|
}, |
|
{ |
|
"epoch": 17.55, |
|
"learning_rate": 3.894e-05, |
|
"loss": 0.2021, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 17.57, |
|
"learning_rate": 3.891e-05, |
|
"loss": 0.1874, |
|
"step": 703 |
|
}, |
|
{ |
|
"epoch": 17.6, |
|
"learning_rate": 3.888e-05, |
|
"loss": 0.5318, |
|
"step": 704 |
|
}, |
|
{ |
|
"epoch": 17.62, |
|
"learning_rate": 3.8849999999999996e-05, |
|
"loss": 0.1391, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 17.65, |
|
"learning_rate": 3.8820000000000004e-05, |
|
"loss": 0.3109, |
|
"step": 706 |
|
}, |
|
{ |
|
"epoch": 17.68, |
|
"learning_rate": 3.879e-05, |
|
"loss": 0.7604, |
|
"step": 707 |
|
}, |
|
{ |
|
"epoch": 17.7, |
|
"learning_rate": 3.876e-05, |
|
"loss": 0.1211, |
|
"step": 708 |
|
}, |
|
{ |
|
"epoch": 17.73, |
|
"learning_rate": 3.873e-05, |
|
"loss": 0.0583, |
|
"step": 709 |
|
}, |
|
{ |
|
"epoch": 17.75, |
|
"learning_rate": 3.87e-05, |
|
"loss": 0.1001, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 17.77, |
|
"learning_rate": 3.867e-05, |
|
"loss": 0.2483, |
|
"step": 711 |
|
}, |
|
{ |
|
"epoch": 17.8, |
|
"learning_rate": 3.864e-05, |
|
"loss": 0.3419, |
|
"step": 712 |
|
}, |
|
{ |
|
"epoch": 17.82, |
|
"learning_rate": 3.861e-05, |
|
"loss": 0.1669, |
|
"step": 713 |
|
}, |
|
{ |
|
"epoch": 17.85, |
|
"learning_rate": 3.858e-05, |
|
"loss": 0.2238, |
|
"step": 714 |
|
}, |
|
{ |
|
"epoch": 17.88, |
|
"learning_rate": 3.855e-05, |
|
"loss": 0.1803, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 17.9, |
|
"learning_rate": 3.8520000000000004e-05, |
|
"loss": 0.6072, |
|
"step": 716 |
|
}, |
|
{ |
|
"epoch": 17.93, |
|
"learning_rate": 3.849e-05, |
|
"loss": 0.1341, |
|
"step": 717 |
|
}, |
|
{ |
|
"epoch": 17.95, |
|
"learning_rate": 3.846e-05, |
|
"loss": 0.0749, |
|
"step": 718 |
|
}, |
|
{ |
|
"epoch": 17.98, |
|
"learning_rate": 3.8429999999999996e-05, |
|
"loss": 0.1464, |
|
"step": 719 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 3.8400000000000005e-05, |
|
"loss": 0.16, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_loss": 0.15552404522895813, |
|
"eval_mean_accuracy": 0.9247679225016672, |
|
"eval_mean_iou": 0.5778008229677836, |
|
"eval_overall_accuracy": 0.9264860135634839, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9278804801185191, |
|
0.9216553648848154 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9099117121849809, |
|
0.8234907567183699 |
|
], |
|
"eval_runtime": 48.7573, |
|
"eval_samples_per_second": 0.41, |
|
"eval_steps_per_second": 0.205, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 18.02, |
|
"learning_rate": 3.837e-05, |
|
"loss": 0.1312, |
|
"step": 721 |
|
}, |
|
{ |
|
"epoch": 18.05, |
|
"learning_rate": 3.834e-05, |
|
"loss": 0.2353, |
|
"step": 722 |
|
}, |
|
{ |
|
"epoch": 18.07, |
|
"learning_rate": 3.831e-05, |
|
"loss": 0.1771, |
|
"step": 723 |
|
}, |
|
{ |
|
"epoch": 18.1, |
|
"learning_rate": 3.828e-05, |
|
"loss": 0.1728, |
|
"step": 724 |
|
}, |
|
{ |
|
"epoch": 18.12, |
|
"learning_rate": 3.825e-05, |
|
"loss": 0.263, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 18.15, |
|
"learning_rate": 3.822e-05, |
|
"loss": 0.0744, |
|
"step": 726 |
|
}, |
|
{ |
|
"epoch": 18.18, |
|
"learning_rate": 3.819e-05, |
|
"loss": 0.0975, |
|
"step": 727 |
|
}, |
|
{ |
|
"epoch": 18.2, |
|
"learning_rate": 3.816e-05, |
|
"loss": 0.1179, |
|
"step": 728 |
|
}, |
|
{ |
|
"epoch": 18.23, |
|
"learning_rate": 3.8129999999999996e-05, |
|
"loss": 0.3915, |
|
"step": 729 |
|
}, |
|
{ |
|
"epoch": 18.25, |
|
"learning_rate": 3.8100000000000005e-05, |
|
"loss": 0.5075, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 18.27, |
|
"learning_rate": 3.807e-05, |
|
"loss": 0.2501, |
|
"step": 731 |
|
}, |
|
{ |
|
"epoch": 18.3, |
|
"learning_rate": 3.804e-05, |
|
"loss": 0.081, |
|
"step": 732 |
|
}, |
|
{ |
|
"epoch": 18.32, |
|
"learning_rate": 3.801e-05, |
|
"loss": 0.125, |
|
"step": 733 |
|
}, |
|
{ |
|
"epoch": 18.35, |
|
"learning_rate": 3.798e-05, |
|
"loss": 0.1165, |
|
"step": 734 |
|
}, |
|
{ |
|
"epoch": 18.38, |
|
"learning_rate": 3.795e-05, |
|
"loss": 0.1679, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 18.4, |
|
"learning_rate": 3.792e-05, |
|
"loss": 0.7162, |
|
"step": 736 |
|
}, |
|
{ |
|
"epoch": 18.43, |
|
"learning_rate": 3.789e-05, |
|
"loss": 0.3411, |
|
"step": 737 |
|
}, |
|
{ |
|
"epoch": 18.45, |
|
"learning_rate": 3.786e-05, |
|
"loss": 0.2077, |
|
"step": 738 |
|
}, |
|
{ |
|
"epoch": 18.48, |
|
"learning_rate": 3.7829999999999995e-05, |
|
"loss": 0.2529, |
|
"step": 739 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 3.7800000000000004e-05, |
|
"loss": 0.3697, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"eval_loss": 0.15720753371715546, |
|
"eval_mean_accuracy": 0.9430400754892173, |
|
"eval_mean_iou": 0.5705525377970841, |
|
"eval_overall_accuracy": 0.929726635698484, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9189209531981514, |
|
0.9671591977802833 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9117028284079924, |
|
0.79995478498326 |
|
], |
|
"eval_runtime": 49.0295, |
|
"eval_samples_per_second": 0.408, |
|
"eval_steps_per_second": 0.204, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 18.52, |
|
"learning_rate": 3.777e-05, |
|
"loss": 0.2686, |
|
"step": 741 |
|
}, |
|
{ |
|
"epoch": 18.55, |
|
"learning_rate": 3.774e-05, |
|
"loss": 0.269, |
|
"step": 742 |
|
}, |
|
{ |
|
"epoch": 18.57, |
|
"learning_rate": 3.7709999999999996e-05, |
|
"loss": 0.4568, |
|
"step": 743 |
|
}, |
|
{ |
|
"epoch": 18.6, |
|
"learning_rate": 3.768e-05, |
|
"loss": 0.1015, |
|
"step": 744 |
|
}, |
|
{ |
|
"epoch": 18.62, |
|
"learning_rate": 3.765e-05, |
|
"loss": 0.0878, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 18.65, |
|
"learning_rate": 3.762e-05, |
|
"loss": 0.1061, |
|
"step": 746 |
|
}, |
|
{ |
|
"epoch": 18.68, |
|
"learning_rate": 3.759e-05, |
|
"loss": 0.1726, |
|
"step": 747 |
|
}, |
|
{ |
|
"epoch": 18.7, |
|
"learning_rate": 3.756e-05, |
|
"loss": 0.152, |
|
"step": 748 |
|
}, |
|
{ |
|
"epoch": 18.73, |
|
"learning_rate": 3.7529999999999995e-05, |
|
"loss": 0.0627, |
|
"step": 749 |
|
}, |
|
{ |
|
"epoch": 18.75, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.0597, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 18.77, |
|
"learning_rate": 3.7470000000000005e-05, |
|
"loss": 0.2041, |
|
"step": 751 |
|
}, |
|
{ |
|
"epoch": 18.8, |
|
"learning_rate": 3.744e-05, |
|
"loss": 0.1792, |
|
"step": 752 |
|
}, |
|
{ |
|
"epoch": 18.82, |
|
"learning_rate": 3.741e-05, |
|
"loss": 0.1539, |
|
"step": 753 |
|
}, |
|
{ |
|
"epoch": 18.85, |
|
"learning_rate": 3.738e-05, |
|
"loss": 0.1403, |
|
"step": 754 |
|
}, |
|
{ |
|
"epoch": 18.88, |
|
"learning_rate": 3.735000000000001e-05, |
|
"loss": 0.2269, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 18.9, |
|
"learning_rate": 3.732e-05, |
|
"loss": 0.0968, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 18.93, |
|
"learning_rate": 3.7290000000000004e-05, |
|
"loss": 0.1496, |
|
"step": 757 |
|
}, |
|
{ |
|
"epoch": 18.95, |
|
"learning_rate": 3.726e-05, |
|
"loss": 0.3167, |
|
"step": 758 |
|
}, |
|
{ |
|
"epoch": 18.98, |
|
"learning_rate": 3.723e-05, |
|
"loss": 0.1403, |
|
"step": 759 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 3.72e-05, |
|
"loss": 0.1338, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_loss": 0.14783106744289398, |
|
"eval_mean_accuracy": 0.9454575459882153, |
|
"eval_mean_iou": 0.5758130853178719, |
|
"eval_overall_accuracy": 0.9243466792988494, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9072123153525076, |
|
0.9837027766239231 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9046942157145423, |
|
0.8227450402390731 |
|
], |
|
"eval_runtime": 48.5789, |
|
"eval_samples_per_second": 0.412, |
|
"eval_steps_per_second": 0.206, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 19.02, |
|
"learning_rate": 3.7170000000000005e-05, |
|
"loss": 0.1798, |
|
"step": 761 |
|
}, |
|
{ |
|
"epoch": 19.05, |
|
"learning_rate": 3.714e-05, |
|
"loss": 0.0672, |
|
"step": 762 |
|
}, |
|
{ |
|
"epoch": 19.07, |
|
"learning_rate": 3.711e-05, |
|
"loss": 1.237, |
|
"step": 763 |
|
}, |
|
{ |
|
"epoch": 19.1, |
|
"learning_rate": 3.708e-05, |
|
"loss": 0.2717, |
|
"step": 764 |
|
}, |
|
{ |
|
"epoch": 19.12, |
|
"learning_rate": 3.7050000000000006e-05, |
|
"loss": 0.2913, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 19.15, |
|
"learning_rate": 3.702e-05, |
|
"loss": 0.1676, |
|
"step": 766 |
|
}, |
|
{ |
|
"epoch": 19.18, |
|
"learning_rate": 3.699e-05, |
|
"loss": 0.1547, |
|
"step": 767 |
|
}, |
|
{ |
|
"epoch": 19.2, |
|
"learning_rate": 3.696e-05, |
|
"loss": 0.1109, |
|
"step": 768 |
|
}, |
|
{ |
|
"epoch": 19.23, |
|
"learning_rate": 3.693e-05, |
|
"loss": 0.1951, |
|
"step": 769 |
|
}, |
|
{ |
|
"epoch": 19.25, |
|
"learning_rate": 3.69e-05, |
|
"loss": 0.077, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 19.27, |
|
"learning_rate": 3.6870000000000004e-05, |
|
"loss": 0.1245, |
|
"step": 771 |
|
}, |
|
{ |
|
"epoch": 19.3, |
|
"learning_rate": 3.684e-05, |
|
"loss": 0.0799, |
|
"step": 772 |
|
}, |
|
{ |
|
"epoch": 19.32, |
|
"learning_rate": 3.681e-05, |
|
"loss": 0.2119, |
|
"step": 773 |
|
}, |
|
{ |
|
"epoch": 19.35, |
|
"learning_rate": 3.6780000000000004e-05, |
|
"loss": 0.3093, |
|
"step": 774 |
|
}, |
|
{ |
|
"epoch": 19.38, |
|
"learning_rate": 3.6750000000000006e-05, |
|
"loss": 0.0728, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 19.4, |
|
"learning_rate": 3.672e-05, |
|
"loss": 0.1206, |
|
"step": 776 |
|
}, |
|
{ |
|
"epoch": 19.43, |
|
"learning_rate": 3.669e-05, |
|
"loss": 0.1638, |
|
"step": 777 |
|
}, |
|
{ |
|
"epoch": 19.45, |
|
"learning_rate": 3.666e-05, |
|
"loss": 0.3086, |
|
"step": 778 |
|
}, |
|
{ |
|
"epoch": 19.48, |
|
"learning_rate": 3.663000000000001e-05, |
|
"loss": 0.712, |
|
"step": 779 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 3.66e-05, |
|
"loss": 0.7522, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"eval_loss": 0.30090320110321045, |
|
"eval_mean_accuracy": 0.9207649216566469, |
|
"eval_mean_iou": 0.5125294777072046, |
|
"eval_overall_accuracy": 0.8864320045372738, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.8585661330920594, |
|
0.9829637102212344 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8555010673222496, |
|
0.6820873657993641 |
|
], |
|
"eval_runtime": 47.7776, |
|
"eval_samples_per_second": 0.419, |
|
"eval_steps_per_second": 0.209, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 19.52, |
|
"learning_rate": 3.6570000000000004e-05, |
|
"loss": 0.1087, |
|
"step": 781 |
|
}, |
|
{ |
|
"epoch": 19.55, |
|
"learning_rate": 3.654e-05, |
|
"loss": 0.3194, |
|
"step": 782 |
|
}, |
|
{ |
|
"epoch": 19.57, |
|
"learning_rate": 3.651e-05, |
|
"loss": 0.1143, |
|
"step": 783 |
|
}, |
|
{ |
|
"epoch": 19.6, |
|
"learning_rate": 3.648e-05, |
|
"loss": 0.1599, |
|
"step": 784 |
|
}, |
|
{ |
|
"epoch": 19.62, |
|
"learning_rate": 3.6450000000000005e-05, |
|
"loss": 0.1602, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 19.65, |
|
"learning_rate": 3.642e-05, |
|
"loss": 0.1478, |
|
"step": 786 |
|
}, |
|
{ |
|
"epoch": 19.68, |
|
"learning_rate": 3.639e-05, |
|
"loss": 0.2748, |
|
"step": 787 |
|
}, |
|
{ |
|
"epoch": 19.7, |
|
"learning_rate": 3.636e-05, |
|
"loss": 0.1033, |
|
"step": 788 |
|
}, |
|
{ |
|
"epoch": 19.73, |
|
"learning_rate": 3.6330000000000006e-05, |
|
"loss": 0.0757, |
|
"step": 789 |
|
}, |
|
{ |
|
"epoch": 19.75, |
|
"learning_rate": 3.63e-05, |
|
"loss": 0.7663, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 19.77, |
|
"learning_rate": 3.6270000000000003e-05, |
|
"loss": 0.0485, |
|
"step": 791 |
|
}, |
|
{ |
|
"epoch": 19.8, |
|
"learning_rate": 3.624e-05, |
|
"loss": 0.0576, |
|
"step": 792 |
|
}, |
|
{ |
|
"epoch": 19.82, |
|
"learning_rate": 3.621e-05, |
|
"loss": 0.1733, |
|
"step": 793 |
|
}, |
|
{ |
|
"epoch": 19.85, |
|
"learning_rate": 3.618e-05, |
|
"loss": 0.1801, |
|
"step": 794 |
|
}, |
|
{ |
|
"epoch": 19.88, |
|
"learning_rate": 3.6150000000000005e-05, |
|
"loss": 0.6399, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 19.9, |
|
"learning_rate": 3.612e-05, |
|
"loss": 0.2138, |
|
"step": 796 |
|
}, |
|
{ |
|
"epoch": 19.93, |
|
"learning_rate": 3.609e-05, |
|
"loss": 1.1095, |
|
"step": 797 |
|
}, |
|
{ |
|
"epoch": 19.95, |
|
"learning_rate": 3.606e-05, |
|
"loss": 0.2221, |
|
"step": 798 |
|
}, |
|
{ |
|
"epoch": 19.98, |
|
"learning_rate": 3.6030000000000006e-05, |
|
"loss": 0.1944, |
|
"step": 799 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 3.6e-05, |
|
"loss": 0.1449, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_loss": 0.161110058426857, |
|
"eval_mean_accuracy": 0.9330079294552827, |
|
"eval_mean_iou": 0.5745221298756912, |
|
"eval_overall_accuracy": 0.9217948655977649, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9126939262609133, |
|
0.9533219326496519 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9032388150253235, |
|
0.8203275746017501 |
|
], |
|
"eval_runtime": 48.6183, |
|
"eval_samples_per_second": 0.411, |
|
"eval_steps_per_second": 0.206, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 20.02, |
|
"learning_rate": 3.597e-05, |
|
"loss": 0.1831, |
|
"step": 801 |
|
}, |
|
{ |
|
"epoch": 20.05, |
|
"learning_rate": 3.594e-05, |
|
"loss": 0.2915, |
|
"step": 802 |
|
}, |
|
{ |
|
"epoch": 20.07, |
|
"learning_rate": 3.591e-05, |
|
"loss": 0.2098, |
|
"step": 803 |
|
}, |
|
{ |
|
"epoch": 20.1, |
|
"learning_rate": 3.588e-05, |
|
"loss": 0.1522, |
|
"step": 804 |
|
}, |
|
{ |
|
"epoch": 20.12, |
|
"learning_rate": 3.5850000000000004e-05, |
|
"loss": 0.1788, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 20.15, |
|
"learning_rate": 3.582e-05, |
|
"loss": 0.0554, |
|
"step": 806 |
|
}, |
|
{ |
|
"epoch": 20.18, |
|
"learning_rate": 3.579e-05, |
|
"loss": 0.0983, |
|
"step": 807 |
|
}, |
|
{ |
|
"epoch": 20.2, |
|
"learning_rate": 3.5759999999999996e-05, |
|
"loss": 0.1853, |
|
"step": 808 |
|
}, |
|
{ |
|
"epoch": 20.23, |
|
"learning_rate": 3.5730000000000005e-05, |
|
"loss": 0.23, |
|
"step": 809 |
|
}, |
|
{ |
|
"epoch": 20.25, |
|
"learning_rate": 3.57e-05, |
|
"loss": 0.1719, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 20.27, |
|
"learning_rate": 3.567e-05, |
|
"loss": 0.0485, |
|
"step": 811 |
|
}, |
|
{ |
|
"epoch": 20.3, |
|
"learning_rate": 3.564e-05, |
|
"loss": 0.1964, |
|
"step": 812 |
|
}, |
|
{ |
|
"epoch": 20.32, |
|
"learning_rate": 3.561e-05, |
|
"loss": 0.1697, |
|
"step": 813 |
|
}, |
|
{ |
|
"epoch": 20.35, |
|
"learning_rate": 3.558e-05, |
|
"loss": 0.0638, |
|
"step": 814 |
|
}, |
|
{ |
|
"epoch": 20.38, |
|
"learning_rate": 3.5550000000000004e-05, |
|
"loss": 0.1374, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 20.4, |
|
"learning_rate": 3.552e-05, |
|
"loss": 0.045, |
|
"step": 816 |
|
}, |
|
{ |
|
"epoch": 20.43, |
|
"learning_rate": 3.549e-05, |
|
"loss": 0.1264, |
|
"step": 817 |
|
}, |
|
{ |
|
"epoch": 20.45, |
|
"learning_rate": 3.5459999999999996e-05, |
|
"loss": 0.2464, |
|
"step": 818 |
|
}, |
|
{ |
|
"epoch": 20.48, |
|
"learning_rate": 3.5430000000000005e-05, |
|
"loss": 0.234, |
|
"step": 819 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 3.54e-05, |
|
"loss": 0.2661, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"eval_loss": 0.15575893223285675, |
|
"eval_mean_accuracy": 0.9424208546546318, |
|
"eval_mean_iou": 0.5733499196680856, |
|
"eval_overall_accuracy": 0.9244240191931837, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9098171187178844, |
|
0.975024590591379 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9057178734574919, |
|
0.8143318855467647 |
|
], |
|
"eval_runtime": 48.847, |
|
"eval_samples_per_second": 0.409, |
|
"eval_steps_per_second": 0.205, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 20.52, |
|
"learning_rate": 3.537e-05, |
|
"loss": 1.111, |
|
"step": 821 |
|
}, |
|
{ |
|
"epoch": 20.55, |
|
"learning_rate": 3.534e-05, |
|
"loss": 0.1015, |
|
"step": 822 |
|
}, |
|
{ |
|
"epoch": 20.57, |
|
"learning_rate": 3.531e-05, |
|
"loss": 0.1468, |
|
"step": 823 |
|
}, |
|
{ |
|
"epoch": 20.6, |
|
"learning_rate": 3.528e-05, |
|
"loss": 0.1679, |
|
"step": 824 |
|
}, |
|
{ |
|
"epoch": 20.62, |
|
"learning_rate": 3.525e-05, |
|
"loss": 0.1482, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 20.65, |
|
"learning_rate": 3.522e-05, |
|
"loss": 0.1424, |
|
"step": 826 |
|
}, |
|
{ |
|
"epoch": 20.68, |
|
"learning_rate": 3.519e-05, |
|
"loss": 0.0609, |
|
"step": 827 |
|
}, |
|
{ |
|
"epoch": 20.7, |
|
"learning_rate": 3.516e-05, |
|
"loss": 0.1189, |
|
"step": 828 |
|
}, |
|
{ |
|
"epoch": 20.73, |
|
"learning_rate": 3.5130000000000004e-05, |
|
"loss": 0.0904, |
|
"step": 829 |
|
}, |
|
{ |
|
"epoch": 20.75, |
|
"learning_rate": 3.51e-05, |
|
"loss": 0.1175, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 20.77, |
|
"learning_rate": 3.507e-05, |
|
"loss": 0.1293, |
|
"step": 831 |
|
}, |
|
{ |
|
"epoch": 20.8, |
|
"learning_rate": 3.5039999999999997e-05, |
|
"loss": 0.0566, |
|
"step": 832 |
|
}, |
|
{ |
|
"epoch": 20.82, |
|
"learning_rate": 3.5010000000000005e-05, |
|
"loss": 0.0742, |
|
"step": 833 |
|
}, |
|
{ |
|
"epoch": 20.85, |
|
"learning_rate": 3.498e-05, |
|
"loss": 0.1171, |
|
"step": 834 |
|
}, |
|
{ |
|
"epoch": 20.88, |
|
"learning_rate": 3.495e-05, |
|
"loss": 0.193, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 20.9, |
|
"learning_rate": 3.492e-05, |
|
"loss": 0.154, |
|
"step": 836 |
|
}, |
|
{ |
|
"epoch": 20.93, |
|
"learning_rate": 3.489e-05, |
|
"loss": 0.1993, |
|
"step": 837 |
|
}, |
|
{ |
|
"epoch": 20.95, |
|
"learning_rate": 3.486e-05, |
|
"loss": 0.1253, |
|
"step": 838 |
|
}, |
|
{ |
|
"epoch": 20.98, |
|
"learning_rate": 3.4830000000000004e-05, |
|
"loss": 0.0621, |
|
"step": 839 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 3.48e-05, |
|
"loss": 0.1951, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"eval_loss": 0.2067786008119583, |
|
"eval_mean_accuracy": 0.954513781309211, |
|
"eval_mean_iou": 0.5791708070121998, |
|
"eval_overall_accuracy": 0.9421984992838004, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9322029587202515, |
|
0.9768246038981707 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9270619743496028, |
|
0.8104504466869968 |
|
], |
|
"eval_runtime": 48.2927, |
|
"eval_samples_per_second": 0.414, |
|
"eval_steps_per_second": 0.207, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 21.02, |
|
"learning_rate": 3.477e-05, |
|
"loss": 0.1603, |
|
"step": 841 |
|
}, |
|
{ |
|
"epoch": 21.05, |
|
"learning_rate": 3.4739999999999996e-05, |
|
"loss": 0.0336, |
|
"step": 842 |
|
}, |
|
{ |
|
"epoch": 21.07, |
|
"learning_rate": 3.4710000000000005e-05, |
|
"loss": 0.2433, |
|
"step": 843 |
|
}, |
|
{ |
|
"epoch": 21.1, |
|
"learning_rate": 3.468e-05, |
|
"loss": 0.0526, |
|
"step": 844 |
|
}, |
|
{ |
|
"epoch": 21.12, |
|
"learning_rate": 3.465e-05, |
|
"loss": 0.1717, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 21.15, |
|
"learning_rate": 3.462e-05, |
|
"loss": 0.1157, |
|
"step": 846 |
|
}, |
|
{ |
|
"epoch": 21.18, |
|
"learning_rate": 3.459e-05, |
|
"loss": 0.5927, |
|
"step": 847 |
|
}, |
|
{ |
|
"epoch": 21.2, |
|
"learning_rate": 3.456e-05, |
|
"loss": 0.1884, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 21.23, |
|
"learning_rate": 3.453e-05, |
|
"loss": 0.1366, |
|
"step": 849 |
|
}, |
|
{ |
|
"epoch": 21.25, |
|
"learning_rate": 3.45e-05, |
|
"loss": 0.0697, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 21.27, |
|
"learning_rate": 3.447e-05, |
|
"loss": 0.0812, |
|
"step": 851 |
|
}, |
|
{ |
|
"epoch": 21.3, |
|
"learning_rate": 3.4439999999999996e-05, |
|
"loss": 0.1395, |
|
"step": 852 |
|
}, |
|
{ |
|
"epoch": 21.32, |
|
"learning_rate": 3.4410000000000004e-05, |
|
"loss": 0.1977, |
|
"step": 853 |
|
}, |
|
{ |
|
"epoch": 21.35, |
|
"learning_rate": 3.438e-05, |
|
"loss": 0.1142, |
|
"step": 854 |
|
}, |
|
{ |
|
"epoch": 21.38, |
|
"learning_rate": 3.435e-05, |
|
"loss": 0.1353, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 21.4, |
|
"learning_rate": 3.432e-05, |
|
"loss": 0.5259, |
|
"step": 856 |
|
}, |
|
{ |
|
"epoch": 21.43, |
|
"learning_rate": 3.429e-05, |
|
"loss": 0.3162, |
|
"step": 857 |
|
}, |
|
{ |
|
"epoch": 21.45, |
|
"learning_rate": 3.426e-05, |
|
"loss": 0.3085, |
|
"step": 858 |
|
}, |
|
{ |
|
"epoch": 21.48, |
|
"learning_rate": 3.423e-05, |
|
"loss": 0.0997, |
|
"step": 859 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 3.42e-05, |
|
"loss": 0.1015, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"eval_loss": 0.1526772677898407, |
|
"eval_mean_accuracy": 0.9402454093097647, |
|
"eval_mean_iou": 0.5674436765724752, |
|
"eval_overall_accuracy": 0.9198090026859501, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9032220555937088, |
|
0.9772687630258206 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9001446953405203, |
|
0.802186334376905 |
|
], |
|
"eval_runtime": 47.7529, |
|
"eval_samples_per_second": 0.419, |
|
"eval_steps_per_second": 0.209, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 21.52, |
|
"learning_rate": 3.417e-05, |
|
"loss": 0.1098, |
|
"step": 861 |
|
}, |
|
{ |
|
"epoch": 21.55, |
|
"learning_rate": 3.4139999999999995e-05, |
|
"loss": 0.0643, |
|
"step": 862 |
|
}, |
|
{ |
|
"epoch": 21.57, |
|
"learning_rate": 3.4110000000000004e-05, |
|
"loss": 0.1593, |
|
"step": 863 |
|
}, |
|
{ |
|
"epoch": 21.6, |
|
"learning_rate": 3.408e-05, |
|
"loss": 0.9459, |
|
"step": 864 |
|
}, |
|
{ |
|
"epoch": 21.62, |
|
"learning_rate": 3.405e-05, |
|
"loss": 0.0629, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 21.65, |
|
"learning_rate": 3.4019999999999996e-05, |
|
"loss": 0.1155, |
|
"step": 866 |
|
}, |
|
{ |
|
"epoch": 21.68, |
|
"learning_rate": 3.399e-05, |
|
"loss": 0.2053, |
|
"step": 867 |
|
}, |
|
{ |
|
"epoch": 21.7, |
|
"learning_rate": 3.396e-05, |
|
"loss": 0.1684, |
|
"step": 868 |
|
}, |
|
{ |
|
"epoch": 21.73, |
|
"learning_rate": 3.393e-05, |
|
"loss": 0.2342, |
|
"step": 869 |
|
}, |
|
{ |
|
"epoch": 21.75, |
|
"learning_rate": 3.39e-05, |
|
"loss": 0.0851, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 21.77, |
|
"learning_rate": 3.387e-05, |
|
"loss": 0.114, |
|
"step": 871 |
|
}, |
|
{ |
|
"epoch": 21.8, |
|
"learning_rate": 3.3839999999999994e-05, |
|
"loss": 0.0905, |
|
"step": 872 |
|
}, |
|
{ |
|
"epoch": 21.82, |
|
"learning_rate": 3.381e-05, |
|
"loss": 0.1668, |
|
"step": 873 |
|
}, |
|
{ |
|
"epoch": 21.85, |
|
"learning_rate": 3.378e-05, |
|
"loss": 0.0753, |
|
"step": 874 |
|
}, |
|
{ |
|
"epoch": 21.88, |
|
"learning_rate": 3.375e-05, |
|
"loss": 0.24, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 21.9, |
|
"learning_rate": 3.372e-05, |
|
"loss": 0.0768, |
|
"step": 876 |
|
}, |
|
{ |
|
"epoch": 21.93, |
|
"learning_rate": 3.369e-05, |
|
"loss": 0.1481, |
|
"step": 877 |
|
}, |
|
{ |
|
"epoch": 21.95, |
|
"learning_rate": 3.3660000000000006e-05, |
|
"loss": 0.0601, |
|
"step": 878 |
|
}, |
|
{ |
|
"epoch": 21.98, |
|
"learning_rate": 3.363e-05, |
|
"loss": 0.1339, |
|
"step": 879 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 3.3600000000000004e-05, |
|
"loss": 0.293, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_loss": 0.14977200329303741, |
|
"eval_mean_accuracy": 0.9319219947774842, |
|
"eval_mean_iou": 0.5899828627725737, |
|
"eval_overall_accuracy": 0.9393002673060098, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9452887474882439, |
|
0.9185552420667246 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9254936739096737, |
|
0.8444549144080475 |
|
], |
|
"eval_runtime": 47.9376, |
|
"eval_samples_per_second": 0.417, |
|
"eval_steps_per_second": 0.209, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 22.02, |
|
"learning_rate": 3.357e-05, |
|
"loss": 0.161, |
|
"step": 881 |
|
}, |
|
{ |
|
"epoch": 22.05, |
|
"learning_rate": 3.354e-05, |
|
"loss": 0.0778, |
|
"step": 882 |
|
}, |
|
{ |
|
"epoch": 22.07, |
|
"learning_rate": 3.351e-05, |
|
"loss": 0.1908, |
|
"step": 883 |
|
}, |
|
{ |
|
"epoch": 22.1, |
|
"learning_rate": 3.3480000000000005e-05, |
|
"loss": 0.2216, |
|
"step": 884 |
|
}, |
|
{ |
|
"epoch": 22.12, |
|
"learning_rate": 3.345e-05, |
|
"loss": 0.5083, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 22.15, |
|
"learning_rate": 3.342e-05, |
|
"loss": 0.1677, |
|
"step": 886 |
|
}, |
|
{ |
|
"epoch": 22.18, |
|
"learning_rate": 3.3390000000000004e-05, |
|
"loss": 0.0513, |
|
"step": 887 |
|
}, |
|
{ |
|
"epoch": 22.2, |
|
"learning_rate": 3.3360000000000006e-05, |
|
"loss": 0.2139, |
|
"step": 888 |
|
}, |
|
{ |
|
"epoch": 22.23, |
|
"learning_rate": 3.333e-05, |
|
"loss": 0.152, |
|
"step": 889 |
|
}, |
|
{ |
|
"epoch": 22.25, |
|
"learning_rate": 3.33e-05, |
|
"loss": 0.2707, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 22.27, |
|
"learning_rate": 3.327e-05, |
|
"loss": 0.1546, |
|
"step": 891 |
|
}, |
|
{ |
|
"epoch": 22.3, |
|
"learning_rate": 3.324000000000001e-05, |
|
"loss": 0.0585, |
|
"step": 892 |
|
}, |
|
{ |
|
"epoch": 22.32, |
|
"learning_rate": 3.321e-05, |
|
"loss": 0.425, |
|
"step": 893 |
|
}, |
|
{ |
|
"epoch": 22.35, |
|
"learning_rate": 3.3180000000000004e-05, |
|
"loss": 0.1308, |
|
"step": 894 |
|
}, |
|
{ |
|
"epoch": 22.38, |
|
"learning_rate": 3.315e-05, |
|
"loss": 0.1866, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 22.4, |
|
"learning_rate": 3.312e-05, |
|
"loss": 0.05, |
|
"step": 896 |
|
}, |
|
{ |
|
"epoch": 22.43, |
|
"learning_rate": 3.309e-05, |
|
"loss": 0.1705, |
|
"step": 897 |
|
}, |
|
{ |
|
"epoch": 22.45, |
|
"learning_rate": 3.3060000000000005e-05, |
|
"loss": 0.1573, |
|
"step": 898 |
|
}, |
|
{ |
|
"epoch": 22.48, |
|
"learning_rate": 3.303e-05, |
|
"loss": 0.081, |
|
"step": 899 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 3.3e-05, |
|
"loss": 0.1246, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"eval_loss": 0.18260009586811066, |
|
"eval_mean_accuracy": 0.9329330838917222, |
|
"eval_mean_iou": 0.5442009612433875, |
|
"eval_overall_accuracy": 0.909885730306121, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.8911796416293051, |
|
0.9746865261541394 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8858857774156546, |
|
0.7467171063145076 |
|
], |
|
"eval_runtime": 48.5884, |
|
"eval_samples_per_second": 0.412, |
|
"eval_steps_per_second": 0.206, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 22.52, |
|
"learning_rate": 3.297e-05, |
|
"loss": 0.1514, |
|
"step": 901 |
|
}, |
|
{ |
|
"epoch": 22.55, |
|
"learning_rate": 3.2940000000000006e-05, |
|
"loss": 0.2313, |
|
"step": 902 |
|
}, |
|
{ |
|
"epoch": 22.57, |
|
"learning_rate": 3.291e-05, |
|
"loss": 0.1051, |
|
"step": 903 |
|
}, |
|
{ |
|
"epoch": 22.6, |
|
"learning_rate": 3.2880000000000004e-05, |
|
"loss": 0.045, |
|
"step": 904 |
|
}, |
|
{ |
|
"epoch": 22.62, |
|
"learning_rate": 3.285e-05, |
|
"loss": 0.1758, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 22.65, |
|
"learning_rate": 3.282e-05, |
|
"loss": 0.0755, |
|
"step": 906 |
|
}, |
|
{ |
|
"epoch": 22.68, |
|
"learning_rate": 3.279e-05, |
|
"loss": 0.8215, |
|
"step": 907 |
|
}, |
|
{ |
|
"epoch": 22.7, |
|
"learning_rate": 3.2760000000000005e-05, |
|
"loss": 0.039, |
|
"step": 908 |
|
}, |
|
{ |
|
"epoch": 22.73, |
|
"learning_rate": 3.273e-05, |
|
"loss": 0.1883, |
|
"step": 909 |
|
}, |
|
{ |
|
"epoch": 22.75, |
|
"learning_rate": 3.27e-05, |
|
"loss": 0.1038, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 22.77, |
|
"learning_rate": 3.267e-05, |
|
"loss": 0.5631, |
|
"step": 911 |
|
}, |
|
{ |
|
"epoch": 22.8, |
|
"learning_rate": 3.2640000000000006e-05, |
|
"loss": 0.3108, |
|
"step": 912 |
|
}, |
|
{ |
|
"epoch": 22.82, |
|
"learning_rate": 3.261e-05, |
|
"loss": 0.1469, |
|
"step": 913 |
|
}, |
|
{ |
|
"epoch": 22.85, |
|
"learning_rate": 3.258e-05, |
|
"loss": 0.0762, |
|
"step": 914 |
|
}, |
|
{ |
|
"epoch": 22.88, |
|
"learning_rate": 3.255e-05, |
|
"loss": 0.1664, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 22.9, |
|
"learning_rate": 3.252e-05, |
|
"loss": 0.1231, |
|
"step": 916 |
|
}, |
|
{ |
|
"epoch": 22.93, |
|
"learning_rate": 3.249e-05, |
|
"loss": 0.5659, |
|
"step": 917 |
|
}, |
|
{ |
|
"epoch": 22.95, |
|
"learning_rate": 3.2460000000000004e-05, |
|
"loss": 0.159, |
|
"step": 918 |
|
}, |
|
{ |
|
"epoch": 22.98, |
|
"learning_rate": 3.243e-05, |
|
"loss": 0.0483, |
|
"step": 919 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 3.24e-05, |
|
"loss": 0.1243, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"eval_loss": 0.16654913127422333, |
|
"eval_mean_accuracy": 0.9321520220158931, |
|
"eval_mean_iou": 0.562641953931424, |
|
"eval_overall_accuracy": 0.9077226301364566, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.8878948278197982, |
|
0.976409216211988 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8845905410374336, |
|
0.8033353207568384 |
|
], |
|
"eval_runtime": 48.2046, |
|
"eval_samples_per_second": 0.415, |
|
"eval_steps_per_second": 0.207, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 23.02, |
|
"learning_rate": 3.237e-05, |
|
"loss": 0.0707, |
|
"step": 921 |
|
}, |
|
{ |
|
"epoch": 23.05, |
|
"learning_rate": 3.2340000000000005e-05, |
|
"loss": 0.0467, |
|
"step": 922 |
|
}, |
|
{ |
|
"epoch": 23.07, |
|
"learning_rate": 3.231e-05, |
|
"loss": 0.2368, |
|
"step": 923 |
|
}, |
|
{ |
|
"epoch": 23.1, |
|
"learning_rate": 3.228e-05, |
|
"loss": 0.1229, |
|
"step": 924 |
|
}, |
|
{ |
|
"epoch": 23.12, |
|
"learning_rate": 3.225e-05, |
|
"loss": 0.0503, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 23.15, |
|
"learning_rate": 3.222e-05, |
|
"loss": 0.0769, |
|
"step": 926 |
|
}, |
|
{ |
|
"epoch": 23.18, |
|
"learning_rate": 3.219e-05, |
|
"loss": 0.0734, |
|
"step": 927 |
|
}, |
|
{ |
|
"epoch": 23.2, |
|
"learning_rate": 3.2160000000000004e-05, |
|
"loss": 0.1102, |
|
"step": 928 |
|
}, |
|
{ |
|
"epoch": 23.23, |
|
"learning_rate": 3.213e-05, |
|
"loss": 0.2293, |
|
"step": 929 |
|
}, |
|
{ |
|
"epoch": 23.25, |
|
"learning_rate": 3.21e-05, |
|
"loss": 1.006, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 23.27, |
|
"learning_rate": 3.2069999999999996e-05, |
|
"loss": 0.182, |
|
"step": 931 |
|
}, |
|
{ |
|
"epoch": 23.3, |
|
"learning_rate": 3.2040000000000005e-05, |
|
"loss": 0.1221, |
|
"step": 932 |
|
}, |
|
{ |
|
"epoch": 23.32, |
|
"learning_rate": 3.201e-05, |
|
"loss": 0.0659, |
|
"step": 933 |
|
}, |
|
{ |
|
"epoch": 23.35, |
|
"learning_rate": 3.198e-05, |
|
"loss": 0.1934, |
|
"step": 934 |
|
}, |
|
{ |
|
"epoch": 23.38, |
|
"learning_rate": 3.195e-05, |
|
"loss": 0.3662, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 23.4, |
|
"learning_rate": 3.192e-05, |
|
"loss": 0.1094, |
|
"step": 936 |
|
}, |
|
{ |
|
"epoch": 23.43, |
|
"learning_rate": 3.189e-05, |
|
"loss": 0.3466, |
|
"step": 937 |
|
}, |
|
{ |
|
"epoch": 23.45, |
|
"learning_rate": 3.186e-05, |
|
"loss": 0.1565, |
|
"step": 938 |
|
}, |
|
{ |
|
"epoch": 23.48, |
|
"learning_rate": 3.183e-05, |
|
"loss": 0.0585, |
|
"step": 939 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 3.18e-05, |
|
"loss": 0.2825, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"eval_loss": 0.256104052066803, |
|
"eval_mean_accuracy": 0.8992670882287088, |
|
"eval_mean_iou": 0.4865682121604448, |
|
"eval_overall_accuracy": 0.8505583779246162, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.8110245784843657, |
|
0.9875095979730519 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8092426358962087, |
|
0.6504620005851257 |
|
], |
|
"eval_runtime": 48.9285, |
|
"eval_samples_per_second": 0.409, |
|
"eval_steps_per_second": 0.204, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 23.52, |
|
"learning_rate": 3.1769999999999996e-05, |
|
"loss": 0.1726, |
|
"step": 941 |
|
}, |
|
{ |
|
"epoch": 23.55, |
|
"learning_rate": 3.1740000000000004e-05, |
|
"loss": 0.0788, |
|
"step": 942 |
|
}, |
|
{ |
|
"epoch": 23.57, |
|
"learning_rate": 3.171e-05, |
|
"loss": 0.2621, |
|
"step": 943 |
|
}, |
|
{ |
|
"epoch": 23.6, |
|
"learning_rate": 3.168e-05, |
|
"loss": 0.0422, |
|
"step": 944 |
|
}, |
|
{ |
|
"epoch": 23.62, |
|
"learning_rate": 3.165e-05, |
|
"loss": 0.1793, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 23.65, |
|
"learning_rate": 3.1620000000000006e-05, |
|
"loss": 0.0958, |
|
"step": 946 |
|
}, |
|
{ |
|
"epoch": 23.68, |
|
"learning_rate": 3.159e-05, |
|
"loss": 0.1503, |
|
"step": 947 |
|
}, |
|
{ |
|
"epoch": 23.7, |
|
"learning_rate": 3.156e-05, |
|
"loss": 0.1169, |
|
"step": 948 |
|
}, |
|
{ |
|
"epoch": 23.73, |
|
"learning_rate": 3.153e-05, |
|
"loss": 0.2745, |
|
"step": 949 |
|
}, |
|
{ |
|
"epoch": 23.75, |
|
"learning_rate": 3.15e-05, |
|
"loss": 0.2336, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 23.77, |
|
"learning_rate": 3.147e-05, |
|
"loss": 0.1181, |
|
"step": 951 |
|
}, |
|
{ |
|
"epoch": 23.8, |
|
"learning_rate": 3.1440000000000004e-05, |
|
"loss": 0.0454, |
|
"step": 952 |
|
}, |
|
{ |
|
"epoch": 23.82, |
|
"learning_rate": 3.141e-05, |
|
"loss": 0.2745, |
|
"step": 953 |
|
}, |
|
{ |
|
"epoch": 23.85, |
|
"learning_rate": 3.138e-05, |
|
"loss": 0.1712, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 23.88, |
|
"learning_rate": 3.1349999999999996e-05, |
|
"loss": 0.1363, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 23.9, |
|
"learning_rate": 3.1320000000000005e-05, |
|
"loss": 0.2775, |
|
"step": 956 |
|
}, |
|
{ |
|
"epoch": 23.93, |
|
"learning_rate": 3.129e-05, |
|
"loss": 0.2212, |
|
"step": 957 |
|
}, |
|
{ |
|
"epoch": 23.95, |
|
"learning_rate": 3.126e-05, |
|
"loss": 0.0593, |
|
"step": 958 |
|
}, |
|
{ |
|
"epoch": 23.98, |
|
"learning_rate": 3.123e-05, |
|
"loss": 0.0591, |
|
"step": 959 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 3.12e-05, |
|
"loss": 0.1744, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_loss": 0.16813531517982483, |
|
"eval_mean_accuracy": 0.9478273629765915, |
|
"eval_mean_iou": 0.5834502854952942, |
|
"eval_overall_accuracy": 0.9403922905015331, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9343577093718417, |
|
0.9612970165813414 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9259582204169315, |
|
0.8243926360689513 |
|
], |
|
"eval_runtime": 47.5753, |
|
"eval_samples_per_second": 0.42, |
|
"eval_steps_per_second": 0.21, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 24.02, |
|
"learning_rate": 3.117e-05, |
|
"loss": 0.1811, |
|
"step": 961 |
|
}, |
|
{ |
|
"epoch": 24.05, |
|
"learning_rate": 3.1140000000000003e-05, |
|
"loss": 0.0841, |
|
"step": 962 |
|
}, |
|
{ |
|
"epoch": 24.07, |
|
"learning_rate": 3.111e-05, |
|
"loss": 0.0302, |
|
"step": 963 |
|
}, |
|
{ |
|
"epoch": 24.1, |
|
"learning_rate": 3.108e-05, |
|
"loss": 0.2162, |
|
"step": 964 |
|
}, |
|
{ |
|
"epoch": 24.12, |
|
"learning_rate": 3.1049999999999996e-05, |
|
"loss": 0.1507, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 24.15, |
|
"learning_rate": 3.1020000000000005e-05, |
|
"loss": 0.0737, |
|
"step": 966 |
|
}, |
|
{ |
|
"epoch": 24.18, |
|
"learning_rate": 3.099e-05, |
|
"loss": 0.2242, |
|
"step": 967 |
|
}, |
|
{ |
|
"epoch": 24.2, |
|
"learning_rate": 3.096e-05, |
|
"loss": 0.2893, |
|
"step": 968 |
|
}, |
|
{ |
|
"epoch": 24.23, |
|
"learning_rate": 3.093e-05, |
|
"loss": 0.0528, |
|
"step": 969 |
|
}, |
|
{ |
|
"epoch": 24.25, |
|
"learning_rate": 3.09e-05, |
|
"loss": 0.1138, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 24.27, |
|
"learning_rate": 3.087e-05, |
|
"loss": 0.0636, |
|
"step": 971 |
|
}, |
|
{ |
|
"epoch": 24.3, |
|
"learning_rate": 3.084e-05, |
|
"loss": 0.1288, |
|
"step": 972 |
|
}, |
|
{ |
|
"epoch": 24.32, |
|
"learning_rate": 3.081e-05, |
|
"loss": 0.1742, |
|
"step": 973 |
|
}, |
|
{ |
|
"epoch": 24.35, |
|
"learning_rate": 3.078e-05, |
|
"loss": 0.1587, |
|
"step": 974 |
|
}, |
|
{ |
|
"epoch": 24.38, |
|
"learning_rate": 3.0749999999999995e-05, |
|
"loss": 0.1458, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 24.4, |
|
"learning_rate": 3.0720000000000004e-05, |
|
"loss": 0.0764, |
|
"step": 976 |
|
}, |
|
{ |
|
"epoch": 24.43, |
|
"learning_rate": 3.069e-05, |
|
"loss": 0.194, |
|
"step": 977 |
|
}, |
|
{ |
|
"epoch": 24.45, |
|
"learning_rate": 3.066e-05, |
|
"loss": 0.0569, |
|
"step": 978 |
|
}, |
|
{ |
|
"epoch": 24.48, |
|
"learning_rate": 3.0629999999999996e-05, |
|
"loss": 0.0761, |
|
"step": 979 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 3.06e-05, |
|
"loss": 0.5815, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"eval_loss": 0.14417901635169983, |
|
"eval_mean_accuracy": 0.9263959317675383, |
|
"eval_mean_iou": 0.5814876190615167, |
|
"eval_overall_accuracy": 0.9300380093355698, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9329940548399873, |
|
0.9197978086950892 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9139827267771568, |
|
0.8304801304073932 |
|
], |
|
"eval_runtime": 48.6861, |
|
"eval_samples_per_second": 0.411, |
|
"eval_steps_per_second": 0.205, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 24.52, |
|
"learning_rate": 3.057e-05, |
|
"loss": 0.7866, |
|
"step": 981 |
|
}, |
|
{ |
|
"epoch": 24.55, |
|
"learning_rate": 3.054e-05, |
|
"loss": 0.1626, |
|
"step": 982 |
|
}, |
|
{ |
|
"epoch": 24.57, |
|
"learning_rate": 3.0509999999999998e-05, |
|
"loss": 0.149, |
|
"step": 983 |
|
}, |
|
{ |
|
"epoch": 24.6, |
|
"learning_rate": 3.048e-05, |
|
"loss": 0.233, |
|
"step": 984 |
|
}, |
|
{ |
|
"epoch": 24.62, |
|
"learning_rate": 3.0449999999999998e-05, |
|
"loss": 0.1141, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 24.65, |
|
"learning_rate": 3.042e-05, |
|
"loss": 0.1139, |
|
"step": 986 |
|
}, |
|
{ |
|
"epoch": 24.68, |
|
"learning_rate": 3.039e-05, |
|
"loss": 0.0739, |
|
"step": 987 |
|
}, |
|
{ |
|
"epoch": 24.7, |
|
"learning_rate": 3.036e-05, |
|
"loss": 0.1264, |
|
"step": 988 |
|
}, |
|
{ |
|
"epoch": 24.73, |
|
"learning_rate": 3.033e-05, |
|
"loss": 0.1145, |
|
"step": 989 |
|
}, |
|
{ |
|
"epoch": 24.75, |
|
"learning_rate": 3.03e-05, |
|
"loss": 0.9447, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 24.77, |
|
"learning_rate": 3.0269999999999996e-05, |
|
"loss": 0.1732, |
|
"step": 991 |
|
}, |
|
{ |
|
"epoch": 24.8, |
|
"learning_rate": 3.0240000000000002e-05, |
|
"loss": 0.1101, |
|
"step": 992 |
|
}, |
|
{ |
|
"epoch": 24.82, |
|
"learning_rate": 3.0209999999999997e-05, |
|
"loss": 0.144, |
|
"step": 993 |
|
}, |
|
{ |
|
"epoch": 24.85, |
|
"learning_rate": 3.0180000000000002e-05, |
|
"loss": 0.377, |
|
"step": 994 |
|
}, |
|
{ |
|
"epoch": 24.88, |
|
"learning_rate": 3.0149999999999998e-05, |
|
"loss": 0.1945, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 24.9, |
|
"learning_rate": 3.012e-05, |
|
"loss": 0.057, |
|
"step": 996 |
|
}, |
|
{ |
|
"epoch": 24.93, |
|
"learning_rate": 3.0089999999999998e-05, |
|
"loss": 0.1585, |
|
"step": 997 |
|
}, |
|
{ |
|
"epoch": 24.95, |
|
"learning_rate": 3.006e-05, |
|
"loss": 0.2699, |
|
"step": 998 |
|
}, |
|
{ |
|
"epoch": 24.98, |
|
"learning_rate": 3.003e-05, |
|
"loss": 0.173, |
|
"step": 999 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 3e-05, |
|
"loss": 0.1304, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_loss": 0.1669236570596695, |
|
"eval_mean_accuracy": 0.9456798873018607, |
|
"eval_mean_iou": 0.5816811024664563, |
|
"eval_overall_accuracy": 0.9389155818940862, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.933425420243859, |
|
0.9579343543598624 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9242141792859011, |
|
0.8208291281134678 |
|
], |
|
"eval_runtime": 47.8829, |
|
"eval_samples_per_second": 0.418, |
|
"eval_steps_per_second": 0.209, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 25.02, |
|
"learning_rate": 2.997e-05, |
|
"loss": 0.1638, |
|
"step": 1001 |
|
}, |
|
{ |
|
"epoch": 25.05, |
|
"learning_rate": 2.994e-05, |
|
"loss": 0.2992, |
|
"step": 1002 |
|
}, |
|
{ |
|
"epoch": 25.07, |
|
"learning_rate": 2.991e-05, |
|
"loss": 0.2946, |
|
"step": 1003 |
|
}, |
|
{ |
|
"epoch": 25.1, |
|
"learning_rate": 2.9880000000000002e-05, |
|
"loss": 0.2507, |
|
"step": 1004 |
|
}, |
|
{ |
|
"epoch": 25.12, |
|
"learning_rate": 2.985e-05, |
|
"loss": 0.139, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 25.15, |
|
"learning_rate": 2.982e-05, |
|
"loss": 0.1165, |
|
"step": 1006 |
|
}, |
|
{ |
|
"epoch": 25.18, |
|
"learning_rate": 2.979e-05, |
|
"loss": 0.1621, |
|
"step": 1007 |
|
}, |
|
{ |
|
"epoch": 25.2, |
|
"learning_rate": 2.976e-05, |
|
"loss": 0.091, |
|
"step": 1008 |
|
}, |
|
{ |
|
"epoch": 25.23, |
|
"learning_rate": 2.973e-05, |
|
"loss": 0.0911, |
|
"step": 1009 |
|
}, |
|
{ |
|
"epoch": 25.25, |
|
"learning_rate": 2.97e-05, |
|
"loss": 0.0695, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 25.27, |
|
"learning_rate": 2.967e-05, |
|
"loss": 0.0873, |
|
"step": 1011 |
|
}, |
|
{ |
|
"epoch": 25.3, |
|
"learning_rate": 2.964e-05, |
|
"loss": 0.0346, |
|
"step": 1012 |
|
}, |
|
{ |
|
"epoch": 25.32, |
|
"learning_rate": 2.961e-05, |
|
"loss": 0.1843, |
|
"step": 1013 |
|
}, |
|
{ |
|
"epoch": 25.35, |
|
"learning_rate": 2.958e-05, |
|
"loss": 0.0884, |
|
"step": 1014 |
|
}, |
|
{ |
|
"epoch": 25.38, |
|
"learning_rate": 2.955e-05, |
|
"loss": 0.2015, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 25.4, |
|
"learning_rate": 2.9520000000000002e-05, |
|
"loss": 0.1084, |
|
"step": 1016 |
|
}, |
|
{ |
|
"epoch": 25.43, |
|
"learning_rate": 2.949e-05, |
|
"loss": 0.0809, |
|
"step": 1017 |
|
}, |
|
{ |
|
"epoch": 25.45, |
|
"learning_rate": 2.946e-05, |
|
"loss": 0.0906, |
|
"step": 1018 |
|
}, |
|
{ |
|
"epoch": 25.48, |
|
"learning_rate": 2.943e-05, |
|
"loss": 0.2197, |
|
"step": 1019 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 2.94e-05, |
|
"loss": 0.065, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"eval_loss": 0.1728467345237732, |
|
"eval_mean_accuracy": 0.9262948263485297, |
|
"eval_mean_iou": 0.566378377855929, |
|
"eval_overall_accuracy": 0.9134864663241153, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9030907253323616, |
|
0.9494989273646978 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8919851355550018, |
|
0.8071499980127854 |
|
], |
|
"eval_runtime": 48.3367, |
|
"eval_samples_per_second": 0.414, |
|
"eval_steps_per_second": 0.207, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 25.52, |
|
"learning_rate": 2.9370000000000002e-05, |
|
"loss": 0.1145, |
|
"step": 1021 |
|
}, |
|
{ |
|
"epoch": 25.55, |
|
"learning_rate": 2.934e-05, |
|
"loss": 0.1756, |
|
"step": 1022 |
|
}, |
|
{ |
|
"epoch": 25.57, |
|
"learning_rate": 2.931e-05, |
|
"loss": 0.0636, |
|
"step": 1023 |
|
}, |
|
{ |
|
"epoch": 25.6, |
|
"learning_rate": 2.928e-05, |
|
"loss": 0.1047, |
|
"step": 1024 |
|
}, |
|
{ |
|
"epoch": 25.62, |
|
"learning_rate": 2.925e-05, |
|
"loss": 0.0842, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 25.65, |
|
"learning_rate": 2.922e-05, |
|
"loss": 0.8233, |
|
"step": 1026 |
|
}, |
|
{ |
|
"epoch": 25.68, |
|
"learning_rate": 2.919e-05, |
|
"loss": 0.0665, |
|
"step": 1027 |
|
}, |
|
{ |
|
"epoch": 25.7, |
|
"learning_rate": 2.916e-05, |
|
"loss": 0.1181, |
|
"step": 1028 |
|
}, |
|
{ |
|
"epoch": 25.73, |
|
"learning_rate": 2.913e-05, |
|
"loss": 0.1675, |
|
"step": 1029 |
|
}, |
|
{ |
|
"epoch": 25.75, |
|
"learning_rate": 2.91e-05, |
|
"loss": 0.0643, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 25.77, |
|
"learning_rate": 2.907e-05, |
|
"loss": 0.1912, |
|
"step": 1031 |
|
}, |
|
{ |
|
"epoch": 25.8, |
|
"learning_rate": 2.904e-05, |
|
"loss": 0.1314, |
|
"step": 1032 |
|
}, |
|
{ |
|
"epoch": 25.82, |
|
"learning_rate": 2.901e-05, |
|
"loss": 0.1107, |
|
"step": 1033 |
|
}, |
|
{ |
|
"epoch": 25.85, |
|
"learning_rate": 2.898e-05, |
|
"loss": 0.1791, |
|
"step": 1034 |
|
}, |
|
{ |
|
"epoch": 25.88, |
|
"learning_rate": 2.895e-05, |
|
"loss": 0.0753, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 25.9, |
|
"learning_rate": 2.892e-05, |
|
"loss": 0.1201, |
|
"step": 1036 |
|
}, |
|
{ |
|
"epoch": 25.93, |
|
"learning_rate": 2.889e-05, |
|
"loss": 0.1003, |
|
"step": 1037 |
|
}, |
|
{ |
|
"epoch": 25.95, |
|
"learning_rate": 2.8859999999999998e-05, |
|
"loss": 0.0946, |
|
"step": 1038 |
|
}, |
|
{ |
|
"epoch": 25.98, |
|
"learning_rate": 2.883e-05, |
|
"loss": 0.139, |
|
"step": 1039 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 2.88e-05, |
|
"loss": 0.2734, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_loss": 0.16094741225242615, |
|
"eval_mean_accuracy": 0.9361853274982819, |
|
"eval_mean_iou": 0.5693030045598508, |
|
"eval_overall_accuracy": 0.9267023235804505, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9190055651847111, |
|
0.9533650898118528 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9087456004057097, |
|
0.7991634132738428 |
|
], |
|
"eval_runtime": 48.458, |
|
"eval_samples_per_second": 0.413, |
|
"eval_steps_per_second": 0.206, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 26.02, |
|
"learning_rate": 2.877e-05, |
|
"loss": 0.124, |
|
"step": 1041 |
|
}, |
|
{ |
|
"epoch": 26.05, |
|
"learning_rate": 2.874e-05, |
|
"loss": 0.1015, |
|
"step": 1042 |
|
}, |
|
{ |
|
"epoch": 26.07, |
|
"learning_rate": 2.871e-05, |
|
"loss": 0.1019, |
|
"step": 1043 |
|
}, |
|
{ |
|
"epoch": 26.1, |
|
"learning_rate": 2.868e-05, |
|
"loss": 0.0594, |
|
"step": 1044 |
|
}, |
|
{ |
|
"epoch": 26.12, |
|
"learning_rate": 2.865e-05, |
|
"loss": 0.1333, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 26.15, |
|
"learning_rate": 2.862e-05, |
|
"loss": 0.109, |
|
"step": 1046 |
|
}, |
|
{ |
|
"epoch": 26.18, |
|
"learning_rate": 2.859e-05, |
|
"loss": 0.0929, |
|
"step": 1047 |
|
}, |
|
{ |
|
"epoch": 26.2, |
|
"learning_rate": 2.856e-05, |
|
"loss": 0.3913, |
|
"step": 1048 |
|
}, |
|
{ |
|
"epoch": 26.23, |
|
"learning_rate": 2.853e-05, |
|
"loss": 0.1095, |
|
"step": 1049 |
|
}, |
|
{ |
|
"epoch": 26.25, |
|
"learning_rate": 2.8499999999999998e-05, |
|
"loss": 0.1029, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 26.27, |
|
"learning_rate": 2.847e-05, |
|
"loss": 0.113, |
|
"step": 1051 |
|
}, |
|
{ |
|
"epoch": 26.3, |
|
"learning_rate": 2.844e-05, |
|
"loss": 0.0627, |
|
"step": 1052 |
|
}, |
|
{ |
|
"epoch": 26.32, |
|
"learning_rate": 2.841e-05, |
|
"loss": 0.1225, |
|
"step": 1053 |
|
}, |
|
{ |
|
"epoch": 26.35, |
|
"learning_rate": 2.838e-05, |
|
"loss": 0.3177, |
|
"step": 1054 |
|
}, |
|
{ |
|
"epoch": 26.38, |
|
"learning_rate": 2.8349999999999998e-05, |
|
"loss": 0.164, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 26.4, |
|
"learning_rate": 2.832e-05, |
|
"loss": 0.0786, |
|
"step": 1056 |
|
}, |
|
{ |
|
"epoch": 26.43, |
|
"learning_rate": 2.829e-05, |
|
"loss": 0.0325, |
|
"step": 1057 |
|
}, |
|
{ |
|
"epoch": 26.45, |
|
"learning_rate": 2.826e-05, |
|
"loss": 0.2409, |
|
"step": 1058 |
|
}, |
|
{ |
|
"epoch": 26.48, |
|
"learning_rate": 2.823e-05, |
|
"loss": 0.2664, |
|
"step": 1059 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 2.8199999999999998e-05, |
|
"loss": 0.1941, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"eval_loss": 0.1887330263853073, |
|
"eval_mean_accuracy": 0.9286438591581025, |
|
"eval_mean_iou": 0.5598981742260135, |
|
"eval_overall_accuracy": 0.9052537956970016, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.8862695509491336, |
|
0.9710181673670715 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8815020187313487, |
|
0.7981925039466916 |
|
], |
|
"eval_runtime": 48.4501, |
|
"eval_samples_per_second": 0.413, |
|
"eval_steps_per_second": 0.206, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 26.52, |
|
"learning_rate": 2.817e-05, |
|
"loss": 0.5032, |
|
"step": 1061 |
|
}, |
|
{ |
|
"epoch": 26.55, |
|
"learning_rate": 2.8139999999999998e-05, |
|
"loss": 0.0904, |
|
"step": 1062 |
|
}, |
|
{ |
|
"epoch": 26.57, |
|
"learning_rate": 2.8110000000000004e-05, |
|
"loss": 0.0895, |
|
"step": 1063 |
|
}, |
|
{ |
|
"epoch": 26.6, |
|
"learning_rate": 2.8080000000000002e-05, |
|
"loss": 0.1405, |
|
"step": 1064 |
|
}, |
|
{ |
|
"epoch": 26.62, |
|
"learning_rate": 2.805e-05, |
|
"loss": 0.1647, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 26.65, |
|
"learning_rate": 2.8020000000000003e-05, |
|
"loss": 0.1523, |
|
"step": 1066 |
|
}, |
|
{ |
|
"epoch": 26.68, |
|
"learning_rate": 2.799e-05, |
|
"loss": 0.0459, |
|
"step": 1067 |
|
}, |
|
{ |
|
"epoch": 26.7, |
|
"learning_rate": 2.7960000000000003e-05, |
|
"loss": 0.0915, |
|
"step": 1068 |
|
}, |
|
{ |
|
"epoch": 26.73, |
|
"learning_rate": 2.7930000000000002e-05, |
|
"loss": 0.2523, |
|
"step": 1069 |
|
}, |
|
{ |
|
"epoch": 26.75, |
|
"learning_rate": 2.79e-05, |
|
"loss": 0.0447, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 26.77, |
|
"learning_rate": 2.7870000000000003e-05, |
|
"loss": 0.186, |
|
"step": 1071 |
|
}, |
|
{ |
|
"epoch": 26.8, |
|
"learning_rate": 2.784e-05, |
|
"loss": 0.1905, |
|
"step": 1072 |
|
}, |
|
{ |
|
"epoch": 26.82, |
|
"learning_rate": 2.7810000000000003e-05, |
|
"loss": 0.2621, |
|
"step": 1073 |
|
}, |
|
{ |
|
"epoch": 26.85, |
|
"learning_rate": 2.778e-05, |
|
"loss": 0.0945, |
|
"step": 1074 |
|
}, |
|
{ |
|
"epoch": 26.88, |
|
"learning_rate": 2.7750000000000004e-05, |
|
"loss": 0.1728, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 26.9, |
|
"learning_rate": 2.7720000000000002e-05, |
|
"loss": 0.0536, |
|
"step": 1076 |
|
}, |
|
{ |
|
"epoch": 26.93, |
|
"learning_rate": 2.769e-05, |
|
"loss": 0.1973, |
|
"step": 1077 |
|
}, |
|
{ |
|
"epoch": 26.95, |
|
"learning_rate": 2.7660000000000003e-05, |
|
"loss": 0.2486, |
|
"step": 1078 |
|
}, |
|
{ |
|
"epoch": 26.98, |
|
"learning_rate": 2.763e-05, |
|
"loss": 0.0485, |
|
"step": 1079 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 2.7600000000000003e-05, |
|
"loss": 0.2251, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"eval_loss": 0.15258844196796417, |
|
"eval_mean_accuracy": 0.9275157112111172, |
|
"eval_mean_iou": 0.5864343359742814, |
|
"eval_overall_accuracy": 0.9324786469385486, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9365067500120688, |
|
0.9185246724101657 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9172787790258706, |
|
0.8420242288969736 |
|
], |
|
"eval_runtime": 50.2818, |
|
"eval_samples_per_second": 0.398, |
|
"eval_steps_per_second": 0.199, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 27.02, |
|
"learning_rate": 2.7570000000000002e-05, |
|
"loss": 0.0773, |
|
"step": 1081 |
|
}, |
|
{ |
|
"epoch": 27.05, |
|
"learning_rate": 2.754e-05, |
|
"loss": 0.0622, |
|
"step": 1082 |
|
}, |
|
{ |
|
"epoch": 27.07, |
|
"learning_rate": 2.7510000000000003e-05, |
|
"loss": 0.3959, |
|
"step": 1083 |
|
}, |
|
{ |
|
"epoch": 27.1, |
|
"learning_rate": 2.748e-05, |
|
"loss": 0.2353, |
|
"step": 1084 |
|
}, |
|
{ |
|
"epoch": 27.12, |
|
"learning_rate": 2.7450000000000003e-05, |
|
"loss": 0.101, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 27.15, |
|
"learning_rate": 2.7420000000000002e-05, |
|
"loss": 0.1155, |
|
"step": 1086 |
|
}, |
|
{ |
|
"epoch": 27.18, |
|
"learning_rate": 2.739e-05, |
|
"loss": 0.0893, |
|
"step": 1087 |
|
}, |
|
{ |
|
"epoch": 27.2, |
|
"learning_rate": 2.7360000000000002e-05, |
|
"loss": 0.0436, |
|
"step": 1088 |
|
}, |
|
{ |
|
"epoch": 27.23, |
|
"learning_rate": 2.733e-05, |
|
"loss": 0.1927, |
|
"step": 1089 |
|
}, |
|
{ |
|
"epoch": 27.25, |
|
"learning_rate": 2.7300000000000003e-05, |
|
"loss": 0.1195, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 27.27, |
|
"learning_rate": 2.727e-05, |
|
"loss": 0.1076, |
|
"step": 1091 |
|
}, |
|
{ |
|
"epoch": 27.3, |
|
"learning_rate": 2.724e-05, |
|
"loss": 0.2382, |
|
"step": 1092 |
|
}, |
|
{ |
|
"epoch": 27.32, |
|
"learning_rate": 2.7210000000000002e-05, |
|
"loss": 0.1401, |
|
"step": 1093 |
|
}, |
|
{ |
|
"epoch": 27.35, |
|
"learning_rate": 2.718e-05, |
|
"loss": 0.1697, |
|
"step": 1094 |
|
}, |
|
{ |
|
"epoch": 27.38, |
|
"learning_rate": 2.7150000000000003e-05, |
|
"loss": 0.2192, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 27.4, |
|
"learning_rate": 2.712e-05, |
|
"loss": 0.1683, |
|
"step": 1096 |
|
}, |
|
{ |
|
"epoch": 27.43, |
|
"learning_rate": 2.709e-05, |
|
"loss": 0.1108, |
|
"step": 1097 |
|
}, |
|
{ |
|
"epoch": 27.45, |
|
"learning_rate": 2.7060000000000002e-05, |
|
"loss": 0.0669, |
|
"step": 1098 |
|
}, |
|
{ |
|
"epoch": 27.48, |
|
"learning_rate": 2.703e-05, |
|
"loss": 0.2028, |
|
"step": 1099 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 2.7000000000000002e-05, |
|
"loss": 0.0538, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"eval_loss": 0.15644869208335876, |
|
"eval_mean_accuracy": 0.9438489255368437, |
|
"eval_mean_iou": 0.5867915271754648, |
|
"eval_overall_accuracy": 0.9422081667705922, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.940876465980531, |
|
0.9468213850931565 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9294100936925702, |
|
0.8309644878338242 |
|
], |
|
"eval_runtime": 48.8799, |
|
"eval_samples_per_second": 0.409, |
|
"eval_steps_per_second": 0.205, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 27.52, |
|
"learning_rate": 2.697e-05, |
|
"loss": 0.2306, |
|
"step": 1101 |
|
}, |
|
{ |
|
"epoch": 27.55, |
|
"learning_rate": 2.6940000000000003e-05, |
|
"loss": 0.0404, |
|
"step": 1102 |
|
}, |
|
{ |
|
"epoch": 27.57, |
|
"learning_rate": 2.691e-05, |
|
"loss": 0.1266, |
|
"step": 1103 |
|
}, |
|
{ |
|
"epoch": 27.6, |
|
"learning_rate": 2.688e-05, |
|
"loss": 0.1199, |
|
"step": 1104 |
|
}, |
|
{ |
|
"epoch": 27.62, |
|
"learning_rate": 2.6850000000000002e-05, |
|
"loss": 0.0922, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 27.65, |
|
"learning_rate": 2.682e-05, |
|
"loss": 0.3366, |
|
"step": 1106 |
|
}, |
|
{ |
|
"epoch": 27.68, |
|
"learning_rate": 2.6790000000000003e-05, |
|
"loss": 0.0693, |
|
"step": 1107 |
|
}, |
|
{ |
|
"epoch": 27.7, |
|
"learning_rate": 2.676e-05, |
|
"loss": 0.2045, |
|
"step": 1108 |
|
}, |
|
{ |
|
"epoch": 27.73, |
|
"learning_rate": 2.673e-05, |
|
"loss": 0.1242, |
|
"step": 1109 |
|
}, |
|
{ |
|
"epoch": 27.75, |
|
"learning_rate": 2.6700000000000002e-05, |
|
"loss": 0.2138, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 27.77, |
|
"learning_rate": 2.667e-05, |
|
"loss": 0.0715, |
|
"step": 1111 |
|
}, |
|
{ |
|
"epoch": 27.8, |
|
"learning_rate": 2.6640000000000002e-05, |
|
"loss": 0.0564, |
|
"step": 1112 |
|
}, |
|
{ |
|
"epoch": 27.82, |
|
"learning_rate": 2.661e-05, |
|
"loss": 0.1358, |
|
"step": 1113 |
|
}, |
|
{ |
|
"epoch": 27.85, |
|
"learning_rate": 2.658e-05, |
|
"loss": 0.2516, |
|
"step": 1114 |
|
}, |
|
{ |
|
"epoch": 27.88, |
|
"learning_rate": 2.655e-05, |
|
"loss": 0.1498, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 27.9, |
|
"learning_rate": 2.652e-05, |
|
"loss": 0.2401, |
|
"step": 1116 |
|
}, |
|
{ |
|
"epoch": 27.93, |
|
"learning_rate": 2.6490000000000002e-05, |
|
"loss": 0.137, |
|
"step": 1117 |
|
}, |
|
{ |
|
"epoch": 27.95, |
|
"learning_rate": 2.646e-05, |
|
"loss": 0.1819, |
|
"step": 1118 |
|
}, |
|
{ |
|
"epoch": 27.98, |
|
"learning_rate": 2.643e-05, |
|
"loss": 0.0706, |
|
"step": 1119 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 2.64e-05, |
|
"loss": 0.1654, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_loss": 0.1621515303850174, |
|
"eval_mean_accuracy": 0.9355972044866241, |
|
"eval_mean_iou": 0.5692378473169154, |
|
"eval_overall_accuracy": 0.9203213794859153, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9079229522212203, |
|
0.963271456752028 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9000634499807797, |
|
0.8076500919699665 |
|
], |
|
"eval_runtime": 48.0753, |
|
"eval_samples_per_second": 0.416, |
|
"eval_steps_per_second": 0.208, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 28.02, |
|
"learning_rate": 2.637e-05, |
|
"loss": 0.2287, |
|
"step": 1121 |
|
}, |
|
{ |
|
"epoch": 28.05, |
|
"learning_rate": 2.6340000000000002e-05, |
|
"loss": 0.1468, |
|
"step": 1122 |
|
}, |
|
{ |
|
"epoch": 28.07, |
|
"learning_rate": 2.631e-05, |
|
"loss": 0.1446, |
|
"step": 1123 |
|
}, |
|
{ |
|
"epoch": 28.1, |
|
"learning_rate": 2.628e-05, |
|
"loss": 0.092, |
|
"step": 1124 |
|
}, |
|
{ |
|
"epoch": 28.12, |
|
"learning_rate": 2.625e-05, |
|
"loss": 0.2824, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 28.15, |
|
"learning_rate": 2.622e-05, |
|
"loss": 0.1425, |
|
"step": 1126 |
|
}, |
|
{ |
|
"epoch": 28.18, |
|
"learning_rate": 2.619e-05, |
|
"loss": 0.0563, |
|
"step": 1127 |
|
}, |
|
{ |
|
"epoch": 28.2, |
|
"learning_rate": 2.616e-05, |
|
"loss": 0.1111, |
|
"step": 1128 |
|
}, |
|
{ |
|
"epoch": 28.23, |
|
"learning_rate": 2.6130000000000002e-05, |
|
"loss": 0.1351, |
|
"step": 1129 |
|
}, |
|
{ |
|
"epoch": 28.25, |
|
"learning_rate": 2.61e-05, |
|
"loss": 0.1631, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 28.27, |
|
"learning_rate": 2.607e-05, |
|
"loss": 0.2917, |
|
"step": 1131 |
|
}, |
|
{ |
|
"epoch": 28.3, |
|
"learning_rate": 2.604e-05, |
|
"loss": 0.1254, |
|
"step": 1132 |
|
}, |
|
{ |
|
"epoch": 28.32, |
|
"learning_rate": 2.601e-05, |
|
"loss": 0.0973, |
|
"step": 1133 |
|
}, |
|
{ |
|
"epoch": 28.35, |
|
"learning_rate": 2.5980000000000002e-05, |
|
"loss": 0.0614, |
|
"step": 1134 |
|
}, |
|
{ |
|
"epoch": 28.38, |
|
"learning_rate": 2.595e-05, |
|
"loss": 0.251, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 28.4, |
|
"learning_rate": 2.592e-05, |
|
"loss": 0.0512, |
|
"step": 1136 |
|
}, |
|
{ |
|
"epoch": 28.43, |
|
"learning_rate": 2.589e-05, |
|
"loss": 0.1044, |
|
"step": 1137 |
|
}, |
|
{ |
|
"epoch": 28.45, |
|
"learning_rate": 2.586e-05, |
|
"loss": 0.1466, |
|
"step": 1138 |
|
}, |
|
{ |
|
"epoch": 28.48, |
|
"learning_rate": 2.5830000000000002e-05, |
|
"loss": 0.1983, |
|
"step": 1139 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 2.58e-05, |
|
"loss": 0.1489, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"eval_loss": 0.15565548837184906, |
|
"eval_mean_accuracy": 0.9209178913121763, |
|
"eval_mean_iou": 0.5668986820631488, |
|
"eval_overall_accuracy": 0.9245754764862553, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9275441085400488, |
|
0.9142916740843039 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9072242990796557, |
|
0.7934717471097905 |
|
], |
|
"eval_runtime": 48.5499, |
|
"eval_samples_per_second": 0.412, |
|
"eval_steps_per_second": 0.206, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 28.52, |
|
"learning_rate": 2.577e-05, |
|
"loss": 0.0775, |
|
"step": 1141 |
|
}, |
|
{ |
|
"epoch": 28.55, |
|
"learning_rate": 2.574e-05, |
|
"loss": 0.1085, |
|
"step": 1142 |
|
}, |
|
{ |
|
"epoch": 28.57, |
|
"learning_rate": 2.571e-05, |
|
"loss": 0.0434, |
|
"step": 1143 |
|
}, |
|
{ |
|
"epoch": 28.6, |
|
"learning_rate": 2.568e-05, |
|
"loss": 0.0625, |
|
"step": 1144 |
|
}, |
|
{ |
|
"epoch": 28.62, |
|
"learning_rate": 2.565e-05, |
|
"loss": 0.1077, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 28.65, |
|
"learning_rate": 2.562e-05, |
|
"loss": 0.0601, |
|
"step": 1146 |
|
}, |
|
{ |
|
"epoch": 28.68, |
|
"learning_rate": 2.559e-05, |
|
"loss": 0.0678, |
|
"step": 1147 |
|
}, |
|
{ |
|
"epoch": 28.7, |
|
"learning_rate": 2.556e-05, |
|
"loss": 0.1832, |
|
"step": 1148 |
|
}, |
|
{ |
|
"epoch": 28.73, |
|
"learning_rate": 2.553e-05, |
|
"loss": 0.1015, |
|
"step": 1149 |
|
}, |
|
{ |
|
"epoch": 28.75, |
|
"learning_rate": 2.55e-05, |
|
"loss": 0.2185, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 28.77, |
|
"learning_rate": 2.547e-05, |
|
"loss": 0.1194, |
|
"step": 1151 |
|
}, |
|
{ |
|
"epoch": 28.8, |
|
"learning_rate": 2.544e-05, |
|
"loss": 0.0534, |
|
"step": 1152 |
|
}, |
|
{ |
|
"epoch": 28.82, |
|
"learning_rate": 2.541e-05, |
|
"loss": 0.144, |
|
"step": 1153 |
|
}, |
|
{ |
|
"epoch": 28.85, |
|
"learning_rate": 2.538e-05, |
|
"loss": 0.039, |
|
"step": 1154 |
|
}, |
|
{ |
|
"epoch": 28.88, |
|
"learning_rate": 2.535e-05, |
|
"loss": 0.0868, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 28.9, |
|
"learning_rate": 2.5319999999999998e-05, |
|
"loss": 1.4289, |
|
"step": 1156 |
|
}, |
|
{ |
|
"epoch": 28.93, |
|
"learning_rate": 2.529e-05, |
|
"loss": 0.1481, |
|
"step": 1157 |
|
}, |
|
{ |
|
"epoch": 28.95, |
|
"learning_rate": 2.526e-05, |
|
"loss": 0.2125, |
|
"step": 1158 |
|
}, |
|
{ |
|
"epoch": 28.98, |
|
"learning_rate": 2.523e-05, |
|
"loss": 0.0757, |
|
"step": 1159 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 2.52e-05, |
|
"loss": 0.1042, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"eval_loss": 0.15596379339694977, |
|
"eval_mean_accuracy": 0.9337504296205468, |
|
"eval_mean_iou": 0.5750669188738581, |
|
"eval_overall_accuracy": 0.9230653344869868, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9143929141873538, |
|
0.9531079450537396 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9047500915523738, |
|
0.8204506650692003 |
|
], |
|
"eval_runtime": 49.0758, |
|
"eval_samples_per_second": 0.408, |
|
"eval_steps_per_second": 0.204, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 29.02, |
|
"learning_rate": 2.517e-05, |
|
"loss": 0.1404, |
|
"step": 1161 |
|
}, |
|
{ |
|
"epoch": 29.05, |
|
"learning_rate": 2.514e-05, |
|
"loss": 0.1236, |
|
"step": 1162 |
|
}, |
|
{ |
|
"epoch": 29.07, |
|
"learning_rate": 2.511e-05, |
|
"loss": 0.1096, |
|
"step": 1163 |
|
}, |
|
{ |
|
"epoch": 29.1, |
|
"learning_rate": 2.508e-05, |
|
"loss": 0.0628, |
|
"step": 1164 |
|
}, |
|
{ |
|
"epoch": 29.12, |
|
"learning_rate": 2.505e-05, |
|
"loss": 0.0571, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 29.15, |
|
"learning_rate": 2.502e-05, |
|
"loss": 0.0366, |
|
"step": 1166 |
|
}, |
|
{ |
|
"epoch": 29.18, |
|
"learning_rate": 2.499e-05, |
|
"loss": 0.2344, |
|
"step": 1167 |
|
}, |
|
{ |
|
"epoch": 29.2, |
|
"learning_rate": 2.4959999999999998e-05, |
|
"loss": 0.1239, |
|
"step": 1168 |
|
}, |
|
{ |
|
"epoch": 29.23, |
|
"learning_rate": 2.493e-05, |
|
"loss": 0.0831, |
|
"step": 1169 |
|
}, |
|
{ |
|
"epoch": 29.25, |
|
"learning_rate": 2.49e-05, |
|
"loss": 0.2479, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 29.27, |
|
"learning_rate": 2.487e-05, |
|
"loss": 0.1274, |
|
"step": 1171 |
|
}, |
|
{ |
|
"epoch": 29.3, |
|
"learning_rate": 2.484e-05, |
|
"loss": 0.1368, |
|
"step": 1172 |
|
}, |
|
{ |
|
"epoch": 29.32, |
|
"learning_rate": 2.4809999999999998e-05, |
|
"loss": 0.0378, |
|
"step": 1173 |
|
}, |
|
{ |
|
"epoch": 29.35, |
|
"learning_rate": 2.478e-05, |
|
"loss": 0.0683, |
|
"step": 1174 |
|
}, |
|
{ |
|
"epoch": 29.38, |
|
"learning_rate": 2.475e-05, |
|
"loss": 0.1213, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 29.4, |
|
"learning_rate": 2.472e-05, |
|
"loss": 0.0405, |
|
"step": 1176 |
|
}, |
|
{ |
|
"epoch": 29.43, |
|
"learning_rate": 2.469e-05, |
|
"loss": 0.1525, |
|
"step": 1177 |
|
}, |
|
{ |
|
"epoch": 29.45, |
|
"learning_rate": 2.4659999999999998e-05, |
|
"loss": 0.2468, |
|
"step": 1178 |
|
}, |
|
{ |
|
"epoch": 29.48, |
|
"learning_rate": 2.463e-05, |
|
"loss": 0.0625, |
|
"step": 1179 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 2.4599999999999998e-05, |
|
"loss": 0.2673, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"eval_loss": 0.14658869802951813, |
|
"eval_mean_accuracy": 0.9183105779793636, |
|
"eval_mean_iou": 0.5774838423825798, |
|
"eval_overall_accuracy": 0.9306454497556542, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9406568900890294, |
|
0.8959642658696977 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.915568638090893, |
|
0.8168828890568463 |
|
], |
|
"eval_runtime": 48.0493, |
|
"eval_samples_per_second": 0.416, |
|
"eval_steps_per_second": 0.208, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 29.52, |
|
"learning_rate": 2.457e-05, |
|
"loss": 0.0919, |
|
"step": 1181 |
|
}, |
|
{ |
|
"epoch": 29.55, |
|
"learning_rate": 2.454e-05, |
|
"loss": 0.1101, |
|
"step": 1182 |
|
}, |
|
{ |
|
"epoch": 29.57, |
|
"learning_rate": 2.4509999999999997e-05, |
|
"loss": 0.1841, |
|
"step": 1183 |
|
}, |
|
{ |
|
"epoch": 29.6, |
|
"learning_rate": 2.448e-05, |
|
"loss": 0.0482, |
|
"step": 1184 |
|
}, |
|
{ |
|
"epoch": 29.62, |
|
"learning_rate": 2.4449999999999998e-05, |
|
"loss": 0.047, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 29.65, |
|
"learning_rate": 2.442e-05, |
|
"loss": 0.0609, |
|
"step": 1186 |
|
}, |
|
{ |
|
"epoch": 29.68, |
|
"learning_rate": 2.439e-05, |
|
"loss": 0.038, |
|
"step": 1187 |
|
}, |
|
{ |
|
"epoch": 29.7, |
|
"learning_rate": 2.4360000000000004e-05, |
|
"loss": 0.1407, |
|
"step": 1188 |
|
}, |
|
{ |
|
"epoch": 29.73, |
|
"learning_rate": 2.4330000000000003e-05, |
|
"loss": 0.0914, |
|
"step": 1189 |
|
}, |
|
{ |
|
"epoch": 29.75, |
|
"learning_rate": 2.43e-05, |
|
"loss": 0.2476, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 29.77, |
|
"learning_rate": 2.4270000000000003e-05, |
|
"loss": 0.0884, |
|
"step": 1191 |
|
}, |
|
{ |
|
"epoch": 29.8, |
|
"learning_rate": 2.4240000000000002e-05, |
|
"loss": 0.172, |
|
"step": 1192 |
|
}, |
|
{ |
|
"epoch": 29.82, |
|
"learning_rate": 2.4210000000000004e-05, |
|
"loss": 0.0579, |
|
"step": 1193 |
|
}, |
|
{ |
|
"epoch": 29.85, |
|
"learning_rate": 2.4180000000000002e-05, |
|
"loss": 0.1086, |
|
"step": 1194 |
|
}, |
|
{ |
|
"epoch": 29.88, |
|
"learning_rate": 2.415e-05, |
|
"loss": 0.0723, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 29.9, |
|
"learning_rate": 2.4120000000000003e-05, |
|
"loss": 0.0915, |
|
"step": 1196 |
|
}, |
|
{ |
|
"epoch": 29.93, |
|
"learning_rate": 2.409e-05, |
|
"loss": 0.1965, |
|
"step": 1197 |
|
}, |
|
{ |
|
"epoch": 29.95, |
|
"learning_rate": 2.4060000000000003e-05, |
|
"loss": 0.222, |
|
"step": 1198 |
|
}, |
|
{ |
|
"epoch": 29.98, |
|
"learning_rate": 2.4030000000000002e-05, |
|
"loss": 0.1071, |
|
"step": 1199 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 2.4e-05, |
|
"loss": 0.2407, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_loss": 0.13716813921928406, |
|
"eval_mean_accuracy": 0.9278948485402951, |
|
"eval_mean_iou": 0.5774629255691858, |
|
"eval_overall_accuracy": 0.9275224487099545, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9272201951681884, |
|
0.9285695019124017 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9111556483707967, |
|
0.8212331283367605 |
|
], |
|
"eval_runtime": 48.011, |
|
"eval_samples_per_second": 0.417, |
|
"eval_steps_per_second": 0.208, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 30.02, |
|
"learning_rate": 2.3970000000000003e-05, |
|
"loss": 0.1321, |
|
"step": 1201 |
|
}, |
|
{ |
|
"epoch": 30.05, |
|
"learning_rate": 2.394e-05, |
|
"loss": 0.0936, |
|
"step": 1202 |
|
}, |
|
{ |
|
"epoch": 30.07, |
|
"learning_rate": 2.3910000000000003e-05, |
|
"loss": 0.059, |
|
"step": 1203 |
|
}, |
|
{ |
|
"epoch": 30.1, |
|
"learning_rate": 2.3880000000000002e-05, |
|
"loss": 0.1303, |
|
"step": 1204 |
|
}, |
|
{ |
|
"epoch": 30.12, |
|
"learning_rate": 2.385e-05, |
|
"loss": 0.1556, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 30.15, |
|
"learning_rate": 2.3820000000000002e-05, |
|
"loss": 0.9338, |
|
"step": 1206 |
|
}, |
|
{ |
|
"epoch": 30.18, |
|
"learning_rate": 2.379e-05, |
|
"loss": 0.2618, |
|
"step": 1207 |
|
}, |
|
{ |
|
"epoch": 30.2, |
|
"learning_rate": 2.3760000000000003e-05, |
|
"loss": 0.3154, |
|
"step": 1208 |
|
}, |
|
{ |
|
"epoch": 30.23, |
|
"learning_rate": 2.373e-05, |
|
"loss": 0.1943, |
|
"step": 1209 |
|
}, |
|
{ |
|
"epoch": 30.25, |
|
"learning_rate": 2.37e-05, |
|
"loss": 0.0261, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 30.27, |
|
"learning_rate": 2.3670000000000002e-05, |
|
"loss": 0.1178, |
|
"step": 1211 |
|
}, |
|
{ |
|
"epoch": 30.3, |
|
"learning_rate": 2.364e-05, |
|
"loss": 0.0772, |
|
"step": 1212 |
|
}, |
|
{ |
|
"epoch": 30.32, |
|
"learning_rate": 2.3610000000000003e-05, |
|
"loss": 0.2255, |
|
"step": 1213 |
|
}, |
|
{ |
|
"epoch": 30.35, |
|
"learning_rate": 2.358e-05, |
|
"loss": 0.1229, |
|
"step": 1214 |
|
}, |
|
{ |
|
"epoch": 30.38, |
|
"learning_rate": 2.3550000000000003e-05, |
|
"loss": 0.0847, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 30.4, |
|
"learning_rate": 2.3520000000000002e-05, |
|
"loss": 0.1285, |
|
"step": 1216 |
|
}, |
|
{ |
|
"epoch": 30.43, |
|
"learning_rate": 2.349e-05, |
|
"loss": 0.1394, |
|
"step": 1217 |
|
}, |
|
{ |
|
"epoch": 30.45, |
|
"learning_rate": 2.3460000000000002e-05, |
|
"loss": 0.1079, |
|
"step": 1218 |
|
}, |
|
{ |
|
"epoch": 30.48, |
|
"learning_rate": 2.343e-05, |
|
"loss": 0.08, |
|
"step": 1219 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 2.3400000000000003e-05, |
|
"loss": 0.156, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"eval_loss": 0.14172598719596863, |
|
"eval_mean_accuracy": 0.9438137205101318, |
|
"eval_mean_iou": 0.5857059186137794, |
|
"eval_overall_accuracy": 0.9402666131732398, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9373876490377853, |
|
0.9502397919824782 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9253977014462942, |
|
0.831720054395044 |
|
], |
|
"eval_runtime": 48.2687, |
|
"eval_samples_per_second": 0.414, |
|
"eval_steps_per_second": 0.207, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 30.52, |
|
"learning_rate": 2.337e-05, |
|
"loss": 0.1637, |
|
"step": 1221 |
|
}, |
|
{ |
|
"epoch": 30.55, |
|
"learning_rate": 2.334e-05, |
|
"loss": 0.0296, |
|
"step": 1222 |
|
}, |
|
{ |
|
"epoch": 30.57, |
|
"learning_rate": 2.3310000000000002e-05, |
|
"loss": 0.0958, |
|
"step": 1223 |
|
}, |
|
{ |
|
"epoch": 30.6, |
|
"learning_rate": 2.328e-05, |
|
"loss": 0.3513, |
|
"step": 1224 |
|
}, |
|
{ |
|
"epoch": 30.62, |
|
"learning_rate": 2.3250000000000003e-05, |
|
"loss": 0.0307, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 30.65, |
|
"learning_rate": 2.322e-05, |
|
"loss": 0.0679, |
|
"step": 1226 |
|
}, |
|
{ |
|
"epoch": 30.68, |
|
"learning_rate": 2.319e-05, |
|
"loss": 0.0917, |
|
"step": 1227 |
|
}, |
|
{ |
|
"epoch": 30.7, |
|
"learning_rate": 2.3160000000000002e-05, |
|
"loss": 0.0461, |
|
"step": 1228 |
|
}, |
|
{ |
|
"epoch": 30.73, |
|
"learning_rate": 2.313e-05, |
|
"loss": 0.0671, |
|
"step": 1229 |
|
}, |
|
{ |
|
"epoch": 30.75, |
|
"learning_rate": 2.3100000000000002e-05, |
|
"loss": 0.1705, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 30.77, |
|
"learning_rate": 2.307e-05, |
|
"loss": 0.102, |
|
"step": 1231 |
|
}, |
|
{ |
|
"epoch": 30.8, |
|
"learning_rate": 2.304e-05, |
|
"loss": 0.0605, |
|
"step": 1232 |
|
}, |
|
{ |
|
"epoch": 30.82, |
|
"learning_rate": 2.301e-05, |
|
"loss": 0.0652, |
|
"step": 1233 |
|
}, |
|
{ |
|
"epoch": 30.85, |
|
"learning_rate": 2.298e-05, |
|
"loss": 0.1386, |
|
"step": 1234 |
|
}, |
|
{ |
|
"epoch": 30.88, |
|
"learning_rate": 2.2950000000000002e-05, |
|
"loss": 0.1284, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 30.9, |
|
"learning_rate": 2.292e-05, |
|
"loss": 0.2326, |
|
"step": 1236 |
|
}, |
|
{ |
|
"epoch": 30.93, |
|
"learning_rate": 2.289e-05, |
|
"loss": 0.0967, |
|
"step": 1237 |
|
}, |
|
{ |
|
"epoch": 30.95, |
|
"learning_rate": 2.286e-05, |
|
"loss": 0.0923, |
|
"step": 1238 |
|
}, |
|
{ |
|
"epoch": 30.98, |
|
"learning_rate": 2.283e-05, |
|
"loss": 0.0836, |
|
"step": 1239 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 2.2800000000000002e-05, |
|
"loss": 0.2055, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"eval_loss": 0.13539931178092957, |
|
"eval_mean_accuracy": 0.9464552948800316, |
|
"eval_mean_iou": 0.587195085273711, |
|
"eval_overall_accuracy": 0.9394420571122895, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9337498527076614, |
|
0.9591607370524018 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9252739587919888, |
|
0.8363112970291441 |
|
], |
|
"eval_runtime": 48.8168, |
|
"eval_samples_per_second": 0.41, |
|
"eval_steps_per_second": 0.205, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 31.02, |
|
"learning_rate": 2.277e-05, |
|
"loss": 0.0609, |
|
"step": 1241 |
|
}, |
|
{ |
|
"epoch": 31.05, |
|
"learning_rate": 2.274e-05, |
|
"loss": 0.0757, |
|
"step": 1242 |
|
}, |
|
{ |
|
"epoch": 31.07, |
|
"learning_rate": 2.271e-05, |
|
"loss": 0.0706, |
|
"step": 1243 |
|
}, |
|
{ |
|
"epoch": 31.1, |
|
"learning_rate": 2.268e-05, |
|
"loss": 0.0559, |
|
"step": 1244 |
|
}, |
|
{ |
|
"epoch": 31.12, |
|
"learning_rate": 2.265e-05, |
|
"loss": 0.1757, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 31.15, |
|
"learning_rate": 2.262e-05, |
|
"loss": 0.0531, |
|
"step": 1246 |
|
}, |
|
{ |
|
"epoch": 31.18, |
|
"learning_rate": 2.2590000000000002e-05, |
|
"loss": 0.19, |
|
"step": 1247 |
|
}, |
|
{ |
|
"epoch": 31.2, |
|
"learning_rate": 2.256e-05, |
|
"loss": 0.0815, |
|
"step": 1248 |
|
}, |
|
{ |
|
"epoch": 31.23, |
|
"learning_rate": 2.253e-05, |
|
"loss": 0.2507, |
|
"step": 1249 |
|
}, |
|
{ |
|
"epoch": 31.25, |
|
"learning_rate": 2.25e-05, |
|
"loss": 0.0954, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 31.27, |
|
"learning_rate": 2.247e-05, |
|
"loss": 0.366, |
|
"step": 1251 |
|
}, |
|
{ |
|
"epoch": 31.3, |
|
"learning_rate": 2.2440000000000002e-05, |
|
"loss": 0.2605, |
|
"step": 1252 |
|
}, |
|
{ |
|
"epoch": 31.32, |
|
"learning_rate": 2.241e-05, |
|
"loss": 0.2055, |
|
"step": 1253 |
|
}, |
|
{ |
|
"epoch": 31.35, |
|
"learning_rate": 2.238e-05, |
|
"loss": 0.1824, |
|
"step": 1254 |
|
}, |
|
{ |
|
"epoch": 31.38, |
|
"learning_rate": 2.235e-05, |
|
"loss": 0.0568, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 31.4, |
|
"learning_rate": 2.232e-05, |
|
"loss": 0.0994, |
|
"step": 1256 |
|
}, |
|
{ |
|
"epoch": 31.43, |
|
"learning_rate": 2.2290000000000002e-05, |
|
"loss": 0.0737, |
|
"step": 1257 |
|
}, |
|
{ |
|
"epoch": 31.45, |
|
"learning_rate": 2.226e-05, |
|
"loss": 0.1399, |
|
"step": 1258 |
|
}, |
|
{ |
|
"epoch": 31.48, |
|
"learning_rate": 2.223e-05, |
|
"loss": 0.182, |
|
"step": 1259 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 2.22e-05, |
|
"loss": 0.1991, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"eval_loss": 0.19345512986183167, |
|
"eval_mean_accuracy": 0.9347760583245306, |
|
"eval_mean_iou": 0.5441659817453899, |
|
"eval_overall_accuracy": 0.9125732916342403, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.894552701068966, |
|
0.9749994155800952 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.8895013422759901, |
|
0.7429966029601797 |
|
], |
|
"eval_runtime": 48.2394, |
|
"eval_samples_per_second": 0.415, |
|
"eval_steps_per_second": 0.207, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 31.52, |
|
"learning_rate": 2.217e-05, |
|
"loss": 0.1023, |
|
"step": 1261 |
|
}, |
|
{ |
|
"epoch": 31.55, |
|
"learning_rate": 2.214e-05, |
|
"loss": 0.0606, |
|
"step": 1262 |
|
}, |
|
{ |
|
"epoch": 31.57, |
|
"learning_rate": 2.211e-05, |
|
"loss": 0.1546, |
|
"step": 1263 |
|
}, |
|
{ |
|
"epoch": 31.6, |
|
"learning_rate": 2.208e-05, |
|
"loss": 0.0425, |
|
"step": 1264 |
|
}, |
|
{ |
|
"epoch": 31.62, |
|
"learning_rate": 2.205e-05, |
|
"loss": 0.1535, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 31.65, |
|
"learning_rate": 2.202e-05, |
|
"loss": 0.1127, |
|
"step": 1266 |
|
}, |
|
{ |
|
"epoch": 31.68, |
|
"learning_rate": 2.199e-05, |
|
"loss": 0.2647, |
|
"step": 1267 |
|
}, |
|
{ |
|
"epoch": 31.7, |
|
"learning_rate": 2.196e-05, |
|
"loss": 0.2571, |
|
"step": 1268 |
|
}, |
|
{ |
|
"epoch": 31.73, |
|
"learning_rate": 2.193e-05, |
|
"loss": 0.1019, |
|
"step": 1269 |
|
}, |
|
{ |
|
"epoch": 31.75, |
|
"learning_rate": 2.19e-05, |
|
"loss": 0.0443, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 31.77, |
|
"learning_rate": 2.187e-05, |
|
"loss": 0.1379, |
|
"step": 1271 |
|
}, |
|
{ |
|
"epoch": 31.8, |
|
"learning_rate": 2.184e-05, |
|
"loss": 0.2044, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 31.82, |
|
"learning_rate": 2.181e-05, |
|
"loss": 0.0275, |
|
"step": 1273 |
|
}, |
|
{ |
|
"epoch": 31.85, |
|
"learning_rate": 2.178e-05, |
|
"loss": 0.0409, |
|
"step": 1274 |
|
}, |
|
{ |
|
"epoch": 31.88, |
|
"learning_rate": 2.175e-05, |
|
"loss": 0.068, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 31.9, |
|
"learning_rate": 2.172e-05, |
|
"loss": 0.0255, |
|
"step": 1276 |
|
}, |
|
{ |
|
"epoch": 31.93, |
|
"learning_rate": 2.169e-05, |
|
"loss": 0.1377, |
|
"step": 1277 |
|
}, |
|
{ |
|
"epoch": 31.95, |
|
"learning_rate": 2.166e-05, |
|
"loss": 0.1298, |
|
"step": 1278 |
|
}, |
|
{ |
|
"epoch": 31.98, |
|
"learning_rate": 2.163e-05, |
|
"loss": 0.1004, |
|
"step": 1279 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 2.16e-05, |
|
"loss": 0.0246, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_loss": 0.13773247599601746, |
|
"eval_mean_accuracy": 0.9310309811807711, |
|
"eval_mean_iou": 0.5852262909654686, |
|
"eval_overall_accuracy": 0.9354260219741974, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9389932004146506, |
|
0.9230687619468915 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9211641944119524, |
|
0.8345146784844534 |
|
], |
|
"eval_runtime": 48.3932, |
|
"eval_samples_per_second": 0.413, |
|
"eval_steps_per_second": 0.207, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 32.02, |
|
"learning_rate": 2.157e-05, |
|
"loss": 0.1107, |
|
"step": 1281 |
|
}, |
|
{ |
|
"epoch": 32.05, |
|
"learning_rate": 2.154e-05, |
|
"loss": 0.1925, |
|
"step": 1282 |
|
}, |
|
{ |
|
"epoch": 32.08, |
|
"learning_rate": 2.151e-05, |
|
"loss": 0.1466, |
|
"step": 1283 |
|
}, |
|
{ |
|
"epoch": 32.1, |
|
"learning_rate": 2.148e-05, |
|
"loss": 0.0485, |
|
"step": 1284 |
|
}, |
|
{ |
|
"epoch": 32.12, |
|
"learning_rate": 2.145e-05, |
|
"loss": 0.1816, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 32.15, |
|
"learning_rate": 2.1419999999999998e-05, |
|
"loss": 0.1174, |
|
"step": 1286 |
|
}, |
|
{ |
|
"epoch": 32.17, |
|
"learning_rate": 2.139e-05, |
|
"loss": 0.4293, |
|
"step": 1287 |
|
}, |
|
{ |
|
"epoch": 32.2, |
|
"learning_rate": 2.136e-05, |
|
"loss": 0.1377, |
|
"step": 1288 |
|
}, |
|
{ |
|
"epoch": 32.23, |
|
"learning_rate": 2.133e-05, |
|
"loss": 0.0382, |
|
"step": 1289 |
|
}, |
|
{ |
|
"epoch": 32.25, |
|
"learning_rate": 2.13e-05, |
|
"loss": 0.0932, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 32.27, |
|
"learning_rate": 2.1269999999999998e-05, |
|
"loss": 0.1005, |
|
"step": 1291 |
|
}, |
|
{ |
|
"epoch": 32.3, |
|
"learning_rate": 2.124e-05, |
|
"loss": 0.1093, |
|
"step": 1292 |
|
}, |
|
{ |
|
"epoch": 32.33, |
|
"learning_rate": 2.121e-05, |
|
"loss": 0.229, |
|
"step": 1293 |
|
}, |
|
{ |
|
"epoch": 32.35, |
|
"learning_rate": 2.118e-05, |
|
"loss": 0.1214, |
|
"step": 1294 |
|
}, |
|
{ |
|
"epoch": 32.38, |
|
"learning_rate": 2.115e-05, |
|
"loss": 0.0534, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 32.4, |
|
"learning_rate": 2.1119999999999998e-05, |
|
"loss": 0.0326, |
|
"step": 1296 |
|
}, |
|
{ |
|
"epoch": 32.42, |
|
"learning_rate": 2.109e-05, |
|
"loss": 0.0355, |
|
"step": 1297 |
|
}, |
|
{ |
|
"epoch": 32.45, |
|
"learning_rate": 2.1059999999999998e-05, |
|
"loss": 0.1252, |
|
"step": 1298 |
|
}, |
|
{ |
|
"epoch": 32.48, |
|
"learning_rate": 2.103e-05, |
|
"loss": 0.1066, |
|
"step": 1299 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 2.1e-05, |
|
"loss": 0.1121, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"eval_loss": 0.15950095653533936, |
|
"eval_mean_accuracy": 0.9427759583816913, |
|
"eval_mean_iou": 0.5842666722586219, |
|
"eval_overall_accuracy": 0.935530350269159, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.929649545457141, |
|
0.9559023713062414 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9197659954949357, |
|
0.8330340212809302 |
|
], |
|
"eval_runtime": 49.099, |
|
"eval_samples_per_second": 0.407, |
|
"eval_steps_per_second": 0.204, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 32.52, |
|
"learning_rate": 2.097e-05, |
|
"loss": 0.1008, |
|
"step": 1301 |
|
}, |
|
{ |
|
"epoch": 32.55, |
|
"learning_rate": 2.094e-05, |
|
"loss": 0.1884, |
|
"step": 1302 |
|
}, |
|
{ |
|
"epoch": 32.58, |
|
"learning_rate": 2.0909999999999998e-05, |
|
"loss": 0.0282, |
|
"step": 1303 |
|
}, |
|
{ |
|
"epoch": 32.6, |
|
"learning_rate": 2.088e-05, |
|
"loss": 0.0394, |
|
"step": 1304 |
|
}, |
|
{ |
|
"epoch": 32.62, |
|
"learning_rate": 2.085e-05, |
|
"loss": 0.055, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 32.65, |
|
"learning_rate": 2.082e-05, |
|
"loss": 0.0894, |
|
"step": 1306 |
|
}, |
|
{ |
|
"epoch": 32.67, |
|
"learning_rate": 2.079e-05, |
|
"loss": 0.0912, |
|
"step": 1307 |
|
}, |
|
{ |
|
"epoch": 32.7, |
|
"learning_rate": 2.0759999999999998e-05, |
|
"loss": 0.1385, |
|
"step": 1308 |
|
}, |
|
{ |
|
"epoch": 32.73, |
|
"learning_rate": 2.073e-05, |
|
"loss": 0.0427, |
|
"step": 1309 |
|
}, |
|
{ |
|
"epoch": 32.75, |
|
"learning_rate": 2.07e-05, |
|
"loss": 0.1541, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 32.77, |
|
"learning_rate": 2.067e-05, |
|
"loss": 0.151, |
|
"step": 1311 |
|
}, |
|
{ |
|
"epoch": 32.8, |
|
"learning_rate": 2.064e-05, |
|
"loss": 0.067, |
|
"step": 1312 |
|
}, |
|
{ |
|
"epoch": 32.83, |
|
"learning_rate": 2.061e-05, |
|
"loss": 0.1293, |
|
"step": 1313 |
|
}, |
|
{ |
|
"epoch": 32.85, |
|
"learning_rate": 2.0580000000000003e-05, |
|
"loss": 0.0954, |
|
"step": 1314 |
|
}, |
|
{ |
|
"epoch": 32.88, |
|
"learning_rate": 2.055e-05, |
|
"loss": 0.2732, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 32.9, |
|
"learning_rate": 2.0520000000000003e-05, |
|
"loss": 0.098, |
|
"step": 1316 |
|
}, |
|
{ |
|
"epoch": 32.92, |
|
"learning_rate": 2.0490000000000002e-05, |
|
"loss": 0.065, |
|
"step": 1317 |
|
}, |
|
{ |
|
"epoch": 32.95, |
|
"learning_rate": 2.046e-05, |
|
"loss": 0.0508, |
|
"step": 1318 |
|
}, |
|
{ |
|
"epoch": 32.98, |
|
"learning_rate": 2.0430000000000003e-05, |
|
"loss": 0.0714, |
|
"step": 1319 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 2.04e-05, |
|
"loss": 0.1412, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"eval_loss": 0.15888680517673492, |
|
"eval_mean_accuracy": 0.9359989685387133, |
|
"eval_mean_iou": 0.5682434711799925, |
|
"eval_overall_accuracy": 0.9230065239423366, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9124613730708597, |
|
0.9595365640065671 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9037129387329121, |
|
0.8010174748070655 |
|
], |
|
"eval_runtime": 48.665, |
|
"eval_samples_per_second": 0.411, |
|
"eval_steps_per_second": 0.205, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 33.02, |
|
"learning_rate": 2.0370000000000003e-05, |
|
"loss": 0.0426, |
|
"step": 1321 |
|
}, |
|
{ |
|
"epoch": 33.05, |
|
"learning_rate": 2.0340000000000002e-05, |
|
"loss": 0.1127, |
|
"step": 1322 |
|
}, |
|
{ |
|
"epoch": 33.08, |
|
"learning_rate": 2.031e-05, |
|
"loss": 0.0802, |
|
"step": 1323 |
|
}, |
|
{ |
|
"epoch": 33.1, |
|
"learning_rate": 2.0280000000000002e-05, |
|
"loss": 0.1685, |
|
"step": 1324 |
|
}, |
|
{ |
|
"epoch": 33.12, |
|
"learning_rate": 2.025e-05, |
|
"loss": 0.1417, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 33.15, |
|
"learning_rate": 2.0220000000000003e-05, |
|
"loss": 0.0906, |
|
"step": 1326 |
|
}, |
|
{ |
|
"epoch": 33.17, |
|
"learning_rate": 2.019e-05, |
|
"loss": 0.1718, |
|
"step": 1327 |
|
}, |
|
{ |
|
"epoch": 33.2, |
|
"learning_rate": 2.016e-05, |
|
"loss": 0.1187, |
|
"step": 1328 |
|
}, |
|
{ |
|
"epoch": 33.23, |
|
"learning_rate": 2.0130000000000002e-05, |
|
"loss": 0.2979, |
|
"step": 1329 |
|
}, |
|
{ |
|
"epoch": 33.25, |
|
"learning_rate": 2.01e-05, |
|
"loss": 0.1852, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 33.27, |
|
"learning_rate": 2.0070000000000003e-05, |
|
"loss": 0.0292, |
|
"step": 1331 |
|
}, |
|
{ |
|
"epoch": 33.3, |
|
"learning_rate": 2.004e-05, |
|
"loss": 0.0987, |
|
"step": 1332 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 2.0010000000000003e-05, |
|
"loss": 0.097, |
|
"step": 1333 |
|
}, |
|
{ |
|
"epoch": 33.35, |
|
"learning_rate": 1.9980000000000002e-05, |
|
"loss": 0.0652, |
|
"step": 1334 |
|
}, |
|
{ |
|
"epoch": 33.38, |
|
"learning_rate": 1.995e-05, |
|
"loss": 0.1437, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 33.4, |
|
"learning_rate": 1.9920000000000002e-05, |
|
"loss": 0.2821, |
|
"step": 1336 |
|
}, |
|
{ |
|
"epoch": 33.42, |
|
"learning_rate": 1.989e-05, |
|
"loss": 0.134, |
|
"step": 1337 |
|
}, |
|
{ |
|
"epoch": 33.45, |
|
"learning_rate": 1.9860000000000003e-05, |
|
"loss": 0.1025, |
|
"step": 1338 |
|
}, |
|
{ |
|
"epoch": 33.48, |
|
"learning_rate": 1.983e-05, |
|
"loss": 0.0547, |
|
"step": 1339 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 1.98e-05, |
|
"loss": 0.1543, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"eval_loss": 0.15789881348609924, |
|
"eval_mean_accuracy": 0.9333310262617378, |
|
"eval_mean_iou": 0.578741288722326, |
|
"eval_overall_accuracy": 0.9306966068732608, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9285584141948806, |
|
0.938103638328595 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9141493842516836, |
|
0.8220744819152943 |
|
], |
|
"eval_runtime": 48.3937, |
|
"eval_samples_per_second": 0.413, |
|
"eval_steps_per_second": 0.207, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 33.52, |
|
"learning_rate": 1.9770000000000002e-05, |
|
"loss": 0.0497, |
|
"step": 1341 |
|
}, |
|
{ |
|
"epoch": 33.55, |
|
"learning_rate": 1.974e-05, |
|
"loss": 0.0928, |
|
"step": 1342 |
|
}, |
|
{ |
|
"epoch": 33.58, |
|
"learning_rate": 1.9710000000000003e-05, |
|
"loss": 0.0698, |
|
"step": 1343 |
|
}, |
|
{ |
|
"epoch": 33.6, |
|
"learning_rate": 1.968e-05, |
|
"loss": 0.1718, |
|
"step": 1344 |
|
}, |
|
{ |
|
"epoch": 33.62, |
|
"learning_rate": 1.965e-05, |
|
"loss": 0.1448, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 33.65, |
|
"learning_rate": 1.9620000000000002e-05, |
|
"loss": 0.0541, |
|
"step": 1346 |
|
}, |
|
{ |
|
"epoch": 33.67, |
|
"learning_rate": 1.959e-05, |
|
"loss": 0.1672, |
|
"step": 1347 |
|
}, |
|
{ |
|
"epoch": 33.7, |
|
"learning_rate": 1.9560000000000002e-05, |
|
"loss": 0.1103, |
|
"step": 1348 |
|
}, |
|
{ |
|
"epoch": 33.73, |
|
"learning_rate": 1.953e-05, |
|
"loss": 0.1526, |
|
"step": 1349 |
|
}, |
|
{ |
|
"epoch": 33.75, |
|
"learning_rate": 1.95e-05, |
|
"loss": 0.0793, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 33.77, |
|
"learning_rate": 1.947e-05, |
|
"loss": 0.1565, |
|
"step": 1351 |
|
}, |
|
{ |
|
"epoch": 33.8, |
|
"learning_rate": 1.944e-05, |
|
"loss": 0.1673, |
|
"step": 1352 |
|
}, |
|
{ |
|
"epoch": 33.83, |
|
"learning_rate": 1.9410000000000002e-05, |
|
"loss": 0.0978, |
|
"step": 1353 |
|
}, |
|
{ |
|
"epoch": 33.85, |
|
"learning_rate": 1.938e-05, |
|
"loss": 0.0977, |
|
"step": 1354 |
|
}, |
|
{ |
|
"epoch": 33.88, |
|
"learning_rate": 1.935e-05, |
|
"loss": 0.065, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 33.9, |
|
"learning_rate": 1.932e-05, |
|
"loss": 0.1086, |
|
"step": 1356 |
|
}, |
|
{ |
|
"epoch": 33.92, |
|
"learning_rate": 1.929e-05, |
|
"loss": 0.1271, |
|
"step": 1357 |
|
}, |
|
{ |
|
"epoch": 33.95, |
|
"learning_rate": 1.9260000000000002e-05, |
|
"loss": 0.1243, |
|
"step": 1358 |
|
}, |
|
{ |
|
"epoch": 33.98, |
|
"learning_rate": 1.923e-05, |
|
"loss": 0.2135, |
|
"step": 1359 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 1.9200000000000003e-05, |
|
"loss": 0.0749, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"eval_loss": 0.22385883331298828, |
|
"eval_mean_accuracy": 0.9420340200496059, |
|
"eval_mean_iou": 0.565621990022045, |
|
"eval_overall_accuracy": 0.9319807713687711, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.9238211811314232, |
|
0.9602468589677886 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9151379118844872, |
|
0.7817280581816478 |
|
], |
|
"eval_runtime": 47.8116, |
|
"eval_samples_per_second": 0.418, |
|
"eval_steps_per_second": 0.209, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 34.02, |
|
"learning_rate": 1.917e-05, |
|
"loss": 0.0622, |
|
"step": 1361 |
|
}, |
|
{ |
|
"epoch": 34.05, |
|
"learning_rate": 1.914e-05, |
|
"loss": 0.0533, |
|
"step": 1362 |
|
}, |
|
{ |
|
"epoch": 34.08, |
|
"learning_rate": 1.911e-05, |
|
"loss": 0.0981, |
|
"step": 1363 |
|
}, |
|
{ |
|
"epoch": 34.1, |
|
"learning_rate": 1.908e-05, |
|
"loss": 0.1633, |
|
"step": 1364 |
|
}, |
|
{ |
|
"epoch": 34.12, |
|
"learning_rate": 1.9050000000000002e-05, |
|
"loss": 0.1357, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 34.15, |
|
"learning_rate": 1.902e-05, |
|
"loss": 0.0699, |
|
"step": 1366 |
|
}, |
|
{ |
|
"epoch": 34.17, |
|
"learning_rate": 1.899e-05, |
|
"loss": 0.0902, |
|
"step": 1367 |
|
}, |
|
{ |
|
"epoch": 34.2, |
|
"learning_rate": 1.896e-05, |
|
"loss": 0.1042, |
|
"step": 1368 |
|
}, |
|
{ |
|
"epoch": 34.23, |
|
"learning_rate": 1.893e-05, |
|
"loss": 0.0877, |
|
"step": 1369 |
|
}, |
|
{ |
|
"epoch": 34.25, |
|
"learning_rate": 1.8900000000000002e-05, |
|
"loss": 0.12, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 34.27, |
|
"learning_rate": 1.887e-05, |
|
"loss": 0.0861, |
|
"step": 1371 |
|
}, |
|
{ |
|
"epoch": 34.3, |
|
"learning_rate": 1.884e-05, |
|
"loss": 0.1076, |
|
"step": 1372 |
|
}, |
|
{ |
|
"epoch": 34.33, |
|
"learning_rate": 1.881e-05, |
|
"loss": 0.1013, |
|
"step": 1373 |
|
}, |
|
{ |
|
"epoch": 34.35, |
|
"learning_rate": 1.878e-05, |
|
"loss": 0.091, |
|
"step": 1374 |
|
}, |
|
{ |
|
"epoch": 34.38, |
|
"learning_rate": 1.8750000000000002e-05, |
|
"loss": 0.0832, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 34.4, |
|
"learning_rate": 1.872e-05, |
|
"loss": 0.1889, |
|
"step": 1376 |
|
}, |
|
{ |
|
"epoch": 34.42, |
|
"learning_rate": 1.869e-05, |
|
"loss": 0.0417, |
|
"step": 1377 |
|
}, |
|
{ |
|
"epoch": 34.45, |
|
"learning_rate": 1.866e-05, |
|
"loss": 0.1096, |
|
"step": 1378 |
|
}, |
|
{ |
|
"epoch": 34.48, |
|
"learning_rate": 1.863e-05, |
|
"loss": 0.0575, |
|
"step": 1379 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 1.86e-05, |
|
"loss": 0.0426, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"eval_loss": 0.1333775818347931, |
|
"eval_mean_accuracy": 0.9364623083249072, |
|
"eval_mean_iou": 0.5894436338021803, |
|
"eval_overall_accuracy": 0.9347106279516045, |
|
"eval_per_category_accuracy": [ |
|
NaN, |
|
0.933288899063091, |
|
0.9396357175867234 |
|
], |
|
"eval_per_category_iou": [ |
|
0.0, |
|
0.9206012726121113, |
|
0.8477296287944296 |
|
], |
|
"eval_runtime": 48.4729, |
|
"eval_samples_per_second": 0.413, |
|
"eval_steps_per_second": 0.206, |
|
"step": 1380 |
|
} |
|
], |
|
"max_steps": 2000, |
|
"num_train_epochs": 50, |
|
"total_flos": 4.83805042311168e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|