ppo-Huggy / run_logs /timers.json
Olivia-umich's picture
Huggy
9e751d7 verified
raw
history blame contribute delete
No virus
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.398431658744812,
"min": 1.398431658744812,
"max": 1.4240044355392456,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 67099.546875,
"min": 67099.546875,
"max": 78344.109375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 105.85042735042735,
"min": 86.02409638554217,
"max": 369.65441176470586,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49538.0,
"min": 48886.0,
"max": 50273.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999968.0,
"min": 49897.0,
"max": 1999968.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999968.0,
"min": 49897.0,
"max": 1999968.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.363431692123413,
"min": 0.12198834121227264,
"max": 2.4276421070098877,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1106.0860595703125,
"min": 16.468425750732422,
"max": 1397.197021484375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.578485259388247,
"min": 1.8796319449389423,
"max": 3.9106340042309466,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1674.7311013936996,
"min": 253.7503125667572,
"max": 2191.1711953878403,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.578485259388247,
"min": 1.8796319449389423,
"max": 3.9106340042309466,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1674.7311013936996,
"min": 253.7503125667572,
"max": 2191.1711953878403,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017450851236935707,
"min": 0.01250910960904245,
"max": 0.020396836054715095,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05235255371080712,
"min": 0.0250182192180849,
"max": 0.05606057652476011,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0461119541277488,
"min": 0.021757023719449837,
"max": 0.060382589263220626,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1383358623832464,
"min": 0.043514047438899674,
"max": 0.1607322908937931,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.099648966816663e-06,
"min": 3.099648966816663e-06,
"max": 0.00029529165156945,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.298946900449988e-06,
"min": 9.298946900449988e-06,
"max": 0.0008439732186756,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10103318333333332,
"min": 0.10103318333333332,
"max": 0.19843054999999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30309954999999994,
"min": 0.20725859999999996,
"max": 0.5813244000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.155584833333326e-05,
"min": 6.155584833333326e-05,
"max": 0.004921684444999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018466754499999979,
"min": 0.00018466754499999979,
"max": 0.01406808756,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1708417218",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1708419581"
},
"total": 2362.432552364,
"count": 1,
"self": 0.4270175410001684,
"children": {
"run_training.setup": {
"total": 0.053749740000000656,
"count": 1,
"self": 0.053749740000000656
},
"TrainerController.start_learning": {
"total": 2361.951785083,
"count": 1,
"self": 4.455048316977809,
"children": {
"TrainerController._reset_env": {
"total": 4.203484107999998,
"count": 1,
"self": 4.203484107999998
},
"TrainerController.advance": {
"total": 2353.165035757022,
"count": 232187,
"self": 4.561431756958427,
"children": {
"env_step": {
"total": 1860.9512419150612,
"count": 232187,
"self": 1539.8694124681351,
"children": {
"SubprocessEnvManager._take_step": {
"total": 318.30668188497197,
"count": 232187,
"self": 16.268355266973686,
"children": {
"TorchPolicy.evaluate": {
"total": 302.0383266179983,
"count": 222896,
"self": 302.0383266179983
}
}
},
"workers": {
"total": 2.7751475619542134,
"count": 232187,
"self": 0.0,
"children": {
"worker_root": {
"total": 2354.919834876975,
"count": 232187,
"is_parallel": true,
"self": 1097.4163015280276,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009693439999978182,
"count": 1,
"is_parallel": true,
"self": 0.00030220200005715014,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000667141999940668,
"count": 2,
"is_parallel": true,
"self": 0.000667141999940668
}
}
},
"UnityEnvironment.step": {
"total": 0.02961237799996752,
"count": 1,
"is_parallel": true,
"self": 0.0003619499999558684,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00024220400001695452,
"count": 1,
"is_parallel": true,
"self": 0.00024220400001695452
},
"communicator.exchange": {
"total": 0.0282655980000186,
"count": 1,
"is_parallel": true,
"self": 0.0282655980000186
},
"steps_from_proto": {
"total": 0.0007426259999760987,
"count": 1,
"is_parallel": true,
"self": 0.000200315999961731,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005423100000143677,
"count": 2,
"is_parallel": true,
"self": 0.0005423100000143677
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1257.5035333489475,
"count": 232186,
"is_parallel": true,
"self": 40.56701634484284,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.65228899707972,
"count": 232186,
"is_parallel": true,
"self": 82.65228899707972
},
"communicator.exchange": {
"total": 1042.7148564710242,
"count": 232186,
"is_parallel": true,
"self": 1042.7148564710242
},
"steps_from_proto": {
"total": 91.56937153600086,
"count": 232186,
"is_parallel": true,
"self": 31.870957882966366,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.69841365303449,
"count": 464372,
"is_parallel": true,
"self": 59.69841365303449
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 487.6523620850023,
"count": 232187,
"self": 6.570943276044602,
"children": {
"process_trajectory": {
"total": 148.42332261195918,
"count": 232187,
"self": 147.18992148095924,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2334011309999369,
"count": 10,
"self": 1.2334011309999369
}
}
},
"_update_policy": {
"total": 332.65809619699854,
"count": 97,
"self": 268.64470969799623,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.01338649900231,
"count": 2910,
"self": 64.01338649900231
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.40000063565094e-07,
"count": 1,
"self": 9.40000063565094e-07
},
"TrainerController._save_models": {
"total": 0.12821596100002353,
"count": 1,
"self": 0.003180402999987564,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12503555800003596,
"count": 1,
"self": 0.12503555800003596
}
}
}
}
}
}
}