ppo-Pyramids / run_logs /timers.json
sryu1's picture
First Push
cb94e8c
raw
history blame contribute delete
No virus
19.2 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.36755046248435974,
"min": 0.35287874937057495,
"max": 1.4512220621109009,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11067.6796875,
"min": 10541.1943359375,
"max": 44024.2734375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989924.0,
"min": 29952.0,
"max": 989924.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989924.0,
"min": 29952.0,
"max": 989924.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6756559610366821,
"min": -0.10665462911128998,
"max": 0.6876348853111267,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 193.9132537841797,
"min": -25.703765869140625,
"max": 200.10174560546875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.014521007426083088,
"min": -0.03204679861664772,
"max": 0.28865253925323486,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.167529106140137,
"min": -8.65263557434082,
"max": 69.85391235351562,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06820287926060152,
"min": 0.06528136473137973,
"max": 0.07377216319478716,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0230431889090228,
"min": 0.5124505107072262,
"max": 1.0679575363950184,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01657163596307262,
"min": 0.000674331314588271,
"max": 0.01657163596307262,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24857453944608932,
"min": 0.007417644460470982,
"max": 0.24857453944608932,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.506097498000003e-06,
"min": 7.506097498000003e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011259146247000003,
"min": 0.00011259146247000003,
"max": 0.0035072978309007995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250200000000001,
"min": 0.10250200000000001,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.53753,
"min": 1.3886848,
"max": 2.5690992,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002599498,
"min": 0.0002599498,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003899247,
"min": 0.003899247,
"max": 0.11693301008000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013412834145128727,
"min": 0.013065861538052559,
"max": 0.5309029817581177,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20119251310825348,
"min": 0.18292206525802612,
"max": 3.716320753097534,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 287.43809523809523,
"min": 272.4954128440367,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30181.0,
"min": 15984.0,
"max": 33192.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6744628466310956,
"min": -1.0000000521540642,
"max": 1.7177744864248763,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 175.81859889626503,
"min": -30.99840161204338,
"max": 184.2959993928671,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6744628466310956,
"min": -1.0000000521540642,
"max": 1.7177744864248763,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 175.81859889626503,
"min": -30.99840161204338,
"max": 184.2959993928671,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03998106409923085,
"min": 0.03762781582177305,
"max": 11.067019287496805,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.198011730419239,
"min": 3.9282846567657543,
"max": 177.07230859994888,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673392468",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673394627"
},
"total": 2159.0256026630004,
"count": 1,
"self": 0.47493200400003843,
"children": {
"run_training.setup": {
"total": 0.10920643600002222,
"count": 1,
"self": 0.10920643600002222
},
"TrainerController.start_learning": {
"total": 2158.4414642230004,
"count": 1,
"self": 1.2843140039908576,
"children": {
"TrainerController._reset_env": {
"total": 6.226269484000113,
"count": 1,
"self": 6.226269484000113
},
"TrainerController.advance": {
"total": 2150.8383515620094,
"count": 64072,
"self": 1.3886537209814378,
"children": {
"env_step": {
"total": 1476.5336190150024,
"count": 64072,
"self": 1369.6151597531095,
"children": {
"SubprocessEnvManager._take_step": {
"total": 106.09519231798186,
"count": 64072,
"self": 4.474340670990159,
"children": {
"TorchPolicy.evaluate": {
"total": 101.6208516469917,
"count": 62557,
"self": 34.35173498897666,
"children": {
"TorchPolicy.sample_actions": {
"total": 67.26911665801504,
"count": 62557,
"self": 67.26911665801504
}
}
}
}
},
"workers": {
"total": 0.8232669439109941,
"count": 64072,
"self": 0.0,
"children": {
"worker_root": {
"total": 2153.8866914300684,
"count": 64072,
"is_parallel": true,
"self": 887.4396241640948,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00180794099992454,
"count": 1,
"is_parallel": true,
"self": 0.0006408079996162996,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011671330003082403,
"count": 8,
"is_parallel": true,
"self": 0.0011671330003082403
}
}
},
"UnityEnvironment.step": {
"total": 0.04441180300000269,
"count": 1,
"is_parallel": true,
"self": 0.0005322220001744427,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004944590000377502,
"count": 1,
"is_parallel": true,
"self": 0.0004944590000377502
},
"communicator.exchange": {
"total": 0.0416750869999305,
"count": 1,
"is_parallel": true,
"self": 0.0416750869999305
},
"steps_from_proto": {
"total": 0.0017100349998599995,
"count": 1,
"is_parallel": true,
"self": 0.0004311819993745303,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012788530004854692,
"count": 8,
"is_parallel": true,
"self": 0.0012788530004854692
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1266.4470672659736,
"count": 64071,
"is_parallel": true,
"self": 28.510432608087058,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.215833447909063,
"count": 64071,
"is_parallel": true,
"self": 24.215833447909063
},
"communicator.exchange": {
"total": 1105.7518920369307,
"count": 64071,
"is_parallel": true,
"self": 1105.7518920369307
},
"steps_from_proto": {
"total": 107.96890917304677,
"count": 64071,
"is_parallel": true,
"self": 23.60536206608913,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.36354710695764,
"count": 512568,
"is_parallel": true,
"self": 84.36354710695764
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 672.9160788260256,
"count": 64072,
"self": 2.3950769599964588,
"children": {
"process_trajectory": {
"total": 154.1069868980362,
"count": 64072,
"self": 153.90240413503625,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20458276299996214,
"count": 2,
"self": 0.20458276299996214
}
}
},
"_update_policy": {
"total": 516.4140149679929,
"count": 449,
"self": 202.3677294590184,
"children": {
"TorchPPOOptimizer.update": {
"total": 314.0462855089745,
"count": 22770,
"self": 314.0462855089745
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4689999261463527e-06,
"count": 1,
"self": 1.4689999261463527e-06
},
"TrainerController._save_models": {
"total": 0.09252770400007648,
"count": 1,
"self": 0.0013933690001977084,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09113433499987877,
"count": 1,
"self": 0.09113433499987877
}
}
}
}
}
}
}