ppo-HalfCheetah-v3 / config.yml
gretab5802's picture
Initial commit
b8a713d verified
!!python/object/apply:collections.OrderedDict
- - - batch_size
- 64
- - clip_range
- 0.1
- - ent_coef
- 0.000401762
- - gae_lambda
- 0.92
- - gamma
- 0.98
- - learning_rate
- 2.0633e-05
- - max_grad_norm
- 0.8
- - n_envs
- 1
- - n_epochs
- 20
- - n_steps
- 512
- - n_timesteps
- 1000000.0
- - normalize
- '{''norm_obs'': True, ''norm_reward'': False}'
- - policy
- MlpPolicy
- - policy_kwargs
- dict(log_std_init=-2, ortho_init=False, activation_fn=nn.ReLU, net_arch=dict(pi=[256,
256], vf=[256, 256]) )
- - vf_coef
- 0.58096