From a3a06923aac840883ddc003517f126580fd9d647 Mon Sep 17 00:00:00 2001 From: chy <308604256@qq.com> Date: Sun, 18 Apr 2021 17:25:10 +0800 Subject: [PATCH 1/7] add trpo benchmark --- examples/mujoco/mujoco_trpo.py | 172 ++++++++++++++++++++++++++++++ tianshou/policy/modelfree/trpo.py | 3 +- 2 files changed, 173 insertions(+), 2 deletions(-) create mode 100644 examples/mujoco/mujoco_trpo.py diff --git a/examples/mujoco/mujoco_trpo.py b/examples/mujoco/mujoco_trpo.py new file mode 100644 index 000000000..81e7b84b6 --- /dev/null +++ b/examples/mujoco/mujoco_trpo.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python3 +import free_mjc +import os +import gym +import torch +import pprint +import datetime +import argparse +import numpy as np +from torch import nn +from torch.optim.lr_scheduler import LambdaLR +from torch.utils.tensorboard import SummaryWriter +from torch.distributions import Independent, Normal + +from tianshou.policy import TRPOPolicy +from tianshou.utils import BasicLogger +from tianshou.env import SubprocVectorEnv +from tianshou.utils.net.common import Net +from tianshou.trainer import onpolicy_trainer +from tianshou.utils.net.continuous import ActorProb, Critic +from tianshou.data import Collector, ReplayBuffer, VectorReplayBuffer + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--task', type=str, default='HalfCheetah-v3') + parser.add_argument('--seed', type=int, default=0) + parser.add_argument('--buffer-size', type=int, default=4096) + parser.add_argument('--hidden-sizes', type=int, nargs='*', default=[64, 64])# baselines 32 32 + parser.add_argument('--lr', type=float, default=1e-3) + parser.add_argument('--gamma', type=float, default=0.99) + parser.add_argument('--epoch', type=int, default=100) + parser.add_argument('--step-per-epoch', type=int, default=30000) + parser.add_argument('--step-per-collect', type=int, default=1024) + parser.add_argument('--repeat-per-collect', type=int, default=1) + # batch-size >> step-per-collect means caculating all data in one singe forward. + parser.add_argument('--batch-size', type=int, default=99999) + parser.add_argument('--training-num', type=int, default=16) + parser.add_argument('--test-num', type=int, default=10) + # trpo special + parser.add_argument('--rew-norm', type=int, default=True) + parser.add_argument('--gae-lambda', type=float, default=0.95) + # TODO tanh support + parser.add_argument('--bound-action-method', type=str, default="clip") + parser.add_argument('--lr-decay', type=int, default=True) + parser.add_argument('--logdir', type=str, default='log') + parser.add_argument('--render', type=float, default=0.) + parser.add_argument('--norm-adv', type=int, default=1) + parser.add_argument('--optim-critic-iters', type=int, default=20) + parser.add_argument('--max-kl', type=float, default=0.01) + parser.add_argument('--backtrack-coeff', type=float, default=0.8) + parser.add_argument('--max-backtracks', type=int, default=10) + parser.add_argument( + '--device', type=str, + default='cuda' if torch.cuda.is_available() else 'cpu') + parser.add_argument('--resume-path', type=str, default=None) + parser.add_argument('--watch', default=False, action='store_true', + help='watch the play of pre-trained policy only') + return parser.parse_args() + + +def test_trpo(args=get_args()): + env = gym.make(args.task) + args.state_shape = env.observation_space.shape or env.observation_space.n + args.action_shape = env.action_space.shape or env.action_space.n + args.max_action = env.action_space.high[0] + print("Observations shape:", args.state_shape) + print("Actions shape:", args.action_shape) + print("Action range:", np.min(env.action_space.low), + np.max(env.action_space.high)) + # train_envs = gym.make(args.task) + train_envs = SubprocVectorEnv( + [lambda: gym.make(args.task) for _ in range(args.training_num)], + norm_obs=True) + # test_envs = gym.make(args.task) + test_envs = SubprocVectorEnv( + [lambda: gym.make(args.task) for _ in range(args.test_num)], + norm_obs=True, obs_rms=train_envs.obs_rms, update_obs_rms=False) + + # seed + np.random.seed(args.seed) + torch.manual_seed(args.seed) + train_envs.seed(args.seed) + test_envs.seed(args.seed) + # model + net_a = Net(args.state_shape, hidden_sizes=args.hidden_sizes, + activation=nn.Tanh, device=args.device) + actor = ActorProb(net_a, args.action_shape, max_action=args.max_action, + unbounded=True, device=args.device).to(args.device) + net_c = Net(args.state_shape, hidden_sizes=args.hidden_sizes, + activation=nn.Tanh, device=args.device) + critic = Critic(net_c, device=args.device).to(args.device) + torch.nn.init.constant_(actor.sigma_param, -0.5) + for m in list(actor.modules()) + list(critic.modules()): + if isinstance(m, torch.nn.Linear): + # orthogonal initialization + torch.nn.init.orthogonal_(m.weight, gain=np.sqrt(2)) + torch.nn.init.zeros_(m.bias) + # do last policy layer scaling, this will make initial actions have (close to) + # 0 mean and std, and will help boost performances, + # see https://arxiv.org/abs/2006.05990, Fig.24 for details + for m in actor.mu.modules(): + if isinstance(m, torch.nn.Linear): + torch.nn.init.zeros_(m.bias) + m.weight.data.copy_(0.01 * m.weight.data) + + optim = torch.optim.Adam(critic.parameters(), lr=args.lr) + lr_scheduler = None + if args.lr_decay: + # decay learning rate to 0 linearly + max_update_num = np.ceil( + args.step_per_epoch / args.step_per_collect) * args.epoch + + lr_scheduler = LambdaLR( + optim, lr_lambda=lambda epoch: 1 - epoch / max_update_num) + + def dist(*logits): + return Independent(Normal(*logits), 1) + + policy = TRPOPolicy(actor, critic, optim, dist, discount_factor=args.gamma, + gae_lambda=args.gae_lambda, + reward_normalization=args.rew_norm, action_scaling=True, + action_bound_method=args.bound_action_method, + lr_scheduler=lr_scheduler, action_space=env.action_space, + advantage_normalization=args.norm_adv, + optim_critic_iters=args.optim_critic_iters, + max_kl=args.max_kl, + backtrack_coeff=args.backtrack_coeff, + max_backtracks=args.max_backtracks) + + # load a previous policy + if args.resume_path: + policy.load_state_dict(torch.load(args.resume_path, map_location=args.device)) + print("Loaded agent from: ", args.resume_path) + + # collector + if args.training_num > 1: + buffer = VectorReplayBuffer(args.buffer_size, len(train_envs)) + else: + buffer = ReplayBuffer(args.buffer_size) + train_collector = Collector(policy, train_envs, buffer, exploration_noise=True) + test_collector = Collector(policy, test_envs) + # log + t0 = datetime.datetime.now().strftime("%m%d_%H%M%S") + log_file = f'seed_{args.seed}_{t0}-{args.task.replace("-", "_")}_trpo' + log_path = os.path.join(args.logdir, args.task, 'trpo', log_file) + writer = SummaryWriter(log_path) + writer.add_text("args", str(args)) + logger = BasicLogger(writer, update_interval=100, train_interval=100) + + def save_fn(policy): + torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth')) + + if not args.watch: + # trainer + result = onpolicy_trainer( + policy, train_collector, test_collector, args.epoch, args.step_per_epoch, + args.repeat_per_collect, args.test_num, args.batch_size, + step_per_collect=args.step_per_collect, save_fn=save_fn, logger=logger, + test_in_train=False) + pprint.pprint(result) + + # Let's watch its performance! + policy.eval() + test_envs.seed(args.seed) + test_collector.reset() + result = test_collector.collect(n_episode=args.test_num, render=args.render) + print(f'Final reward: {result["rews"].mean()}, length: {result["lens"].mean()}') + + +if __name__ == '__main__': + test_trpo() diff --git a/tianshou/policy/modelfree/trpo.py b/tianshou/policy/modelfree/trpo.py index a1529cd11..32ba13976 100644 --- a/tianshou/policy/modelfree/trpo.py +++ b/tianshou/policy/modelfree/trpo.py @@ -178,8 +178,7 @@ def MVP(v: torch.Tensor) -> torch.Tensor: # matrix vector product if kl < self._delta and new_actor_loss < actor_loss: if i > 0: - warnings.warn(f"Backtracking to step {i}. " - "Hyperparamters aren't good enough.") + warnings.warn(f"Backtracking to step {i}.") break elif i < self._max_backtracks - 1: step_size = step_size * self._backtrack_coeff From 4ee3a462928dbbd38e17f402c6b1fd2bffb10afa Mon Sep 17 00:00:00 2001 From: chy <308604256@qq.com> Date: Sun, 18 Apr 2021 17:36:03 +0800 Subject: [PATCH 2/7] add tools --- examples/mujoco/plotter.py | 21 +------ examples/mujoco/tools.py | 118 +++++++++++++++++++++++++++++++++++-- 2 files changed, 114 insertions(+), 25 deletions(-) diff --git a/examples/mujoco/plotter.py b/examples/mujoco/plotter.py index 7454871e7..39cfbba0e 100755 --- a/examples/mujoco/plotter.py +++ b/examples/mujoco/plotter.py @@ -9,7 +9,7 @@ import matplotlib.ticker as mticker from collections import defaultdict -from tools import find_all_files +from tools import find_all_files, group_files, csv2numpy def smooth(y, radius, mode='two_sided', valid_only=False): @@ -63,25 +63,6 @@ def smooth(y, radius, mode='two_sided', valid_only=False): '#000000', # BLACK ]) - -def csv2numpy(csv_file): - csv_dict = defaultdict(list) - reader = csv.DictReader(open(csv_file)) - for row in reader: - for k, v in row.items(): - csv_dict[k].append(eval(v)) - return {k: np.array(v) for k, v in csv_dict.items()} - - -def group_files(file_list, pattern): - res = defaultdict(list) - for f in file_list: - match = re.search(pattern, f) - key = match.group() if match else '' - res[key].append(f) - return res - - def plot_ax( ax, file_lists, diff --git a/examples/mujoco/tools.py b/examples/mujoco/tools.py index c4ff6c378..726cd7427 100755 --- a/examples/mujoco/tools.py +++ b/examples/mujoco/tools.py @@ -8,6 +8,7 @@ import numpy as np from typing import Dict, List, Union from tensorboard.backend.event_processing import event_accumulator +from collections import defaultdict def find_all_files(root_dir: str, pattern: re.Pattern) -> List[str]: @@ -20,6 +21,21 @@ def find_all_files(root_dir: str, pattern: re.Pattern) -> List[str]: file_list.append(absolute_path) return file_list +def group_files(file_list, pattern): + res = defaultdict(list) + for f in file_list: + match = re.search(pattern, f) + key = match.group() if match else '' + res[key].append(f) + return res + +def csv2numpy(csv_file): + csv_dict = defaultdict(list) + reader = csv.DictReader(open(csv_file)) + for row in reader: + for k, v in row.items(): + csv_dict[k].append(eval(v)) + return {k: np.array(v) for k, v in csv_dict.items()} def convert_tfevents_to_csv( root_dir: str, refresh: bool = False @@ -85,16 +101,108 @@ def merge_csv( print(f"Output merged csv file to {output_path} with {len(content[1:])} lines.") csv.writer(open(output_path, "w")).writerows(content) +def numerical_anysis(root_dir: str, xlim: int) -> None: + file_pattern = r".*/test_rew_\d+seeds.csv$" + norm_group_pattern = r"(/|^)\w+?\-v(\d|$)" + output_group_pattern = r".*?(?=(/|^)\w+?\-v\d)" + csv_files = find_all_files(root_dir, re.compile(file_pattern)) + norm_group = group_files(csv_files, norm_group_pattern) + output_group = group_files(csv_files, output_group_pattern) + # calculate numerical outcome for each csv_file (y/std integration max_y, final_y) + results = {} + for f in csv_files: + # reader = csv.DictReader(open(f, newline='')) + # result = [] + # for row in reader: + # result.append([row['env_step'], row['rew'], row['rew:shaded']]) + # result = np.array(result).T + # iclip = np.searchsorted(result[0], xlim) + result = csv2numpy(f) + result = np.stack((result['env_step'], result['rew'] - result['rew'][0], result['rew:shaded'])) + iclip = np.searchsorted(result[0], xlim) + + if iclip == 0 or iclip == len(result[0]): + results[f] = None + continue + else: + results[f] = {} + result = result[:, :iclip + 1] + final_rew = np.interp(xlim, result[0], result[1]) + final_rew_std = np.interp(xlim, result[0], result[2]) + result[0, iclip] = xlim + result[1, iclip] = final_rew + result[2, iclip] = final_rew_std + results[f]['final_reward'] = final_rew.astype(float) + max_rew = np.max(result[1]) + results[f]['max_reward'] = max_rew.astype(float) + rew_integration = np.trapz(result[1], x=result[0]) + results[f]['reward_integration'] = rew_integration.astype(float) + std_integration = np.trapz(result[2], x=result[0]) + results[f]['reward_std_integration'] = std_integration.astype(float) + # calculate normalised numerical outcome for each csv_file group + for _, fs in norm_group.items(): + maxres = defaultdict(lambda: -np.inf) + # find max for each key + for f in fs: + if not results[f]: + continue + for k, v in results[f].items(): + maxres[k] = v if maxres[k] < v else maxres[k] + # add normalised numerical outcome + for f in fs: + if not results[f]: + continue + new_dict = results[f].copy() + for k, v in results[f].items(): + new_dict[k + ":normalised"] = v / maxres[k] + results[f] = new_dict + # Add all numerical results for each outcome group + output_group + group_results = {} + for g, fs in output_group.items(): + group_results[g] = defaultdict(lambda: 0) + group_n = 0 + for f in fs: + if not results[f]: + continue + group_n += 1 + for k, v in results[f].items(): + group_results[g][k] += v + for k, v in group_results[g].items(): + group_results[g][k] = v / group_n + group_results[g]['group_n'] += group_n + # print all outputs for each csv_file and each outcome group + for f, numerical_result in results.items(): + print("******* " + f + ":") + print(numerical_result) + for g, numerical_result in group_results.items(): + print("******* " + g + ":") + print(numerical_result) if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('--root-dir', type=str) - parser.add_argument( + sp = parser.add_subparsers(dest='action') + + merge_parser = sp.add_parser('merge') + merge_parser.add_argument( '--refresh', action="store_true", help="Re-generate all csv files instead of using existing one.") - parser.add_argument( + merge_parser.add_argument( '--remove-zero', action="store_true", help="Remove the data point of env_step == 0.") + merge_parser.add_argument('--root-dir', type=str) + + analysis_parser = sp.add_parser('analysis') + analysis_parser.add_argument('--xlim', type=int, default=1000000, + help='x-axis limitation (default: None)') + analysis_parser.add_argument('--root-dir', type=str) args = parser.parse_args() - csv_files = convert_tfevents_to_csv(args.root_dir, args.refresh) - merge_csv(csv_files, args.root_dir, args.remove_zero) + + args.action = "analysis" + args.xlim=1000000 + args.root_dir="/home/huayu/git/tianshou/examples/mujoco/ablation_trpo_last" + if args.action == "merge": + csv_files = convert_tfevents_to_csv(args.root_dir, args.refresh) + merge_csv(csv_files, args.root_dir, args.remove_zero) + elif args.action == "analysis": + numerical_anysis(args.root_dir, args.xlim) From a0e05e44668a0f0598920fdf258fc251beb584ee Mon Sep 17 00:00:00 2001 From: chy <308604256@qq.com> Date: Sun, 18 Apr 2021 19:39:04 +0800 Subject: [PATCH 3/7] readme update --- README.md | 2 +- examples/mujoco/README.md | 27 ++++++++++++++++++++++++++- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index f40ee8b7d..92be4178c 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ - [Quantile Regression DQN (QRDQN)](https://arxiv.org/pdf/1710.10044.pdf) - [Policy Gradient (PG)](https://papers.nips.cc/paper/1713-policy-gradient-methods-for-reinforcement-learning-with-function-approximation.pdf) - [Advantage Actor-Critic (A2C)](https://openai.com/blog/baselines-acktr-a2c/) -- [Trust Region Policy Optimization](https://arxiv.org/pdf/1502.05477.pdf) +- [Trust Region Policy Optimization (TRPO)](https://arxiv.org/pdf/1502.05477.pdf) - [Proximal Policy Optimization (PPO)](https://arxiv.org/pdf/1707.06347.pdf) - [Deep Deterministic Policy Gradient (DDPG)](https://arxiv.org/pdf/1509.02971.pdf) - [Twin Delayed DDPG (TD3)](https://arxiv.org/pdf/1802.09477.pdf) diff --git a/examples/mujoco/README.md b/examples/mujoco/README.md index a36d6127b..da7338e2b 100644 --- a/examples/mujoco/README.md +++ b/examples/mujoco/README.md @@ -17,7 +17,8 @@ Supported algorithms are listed below: - [Soft Actor-Critic (SAC)](https://arxiv.org/pdf/1812.05905.pdf), [commit id](https://github.com/thu-ml/tianshou/tree/e605bdea942b408126ef4fbc740359773259c9ec) - [REINFORCE algorithm](https://papers.nips.cc/paper/1999/file/464d828b85b0bed98e80ade0a5c43b0f-Paper.pdf), [commit id](https://github.com/thu-ml/tianshou/tree/e27b5a26f330de446fe15388bf81c3777f024fb9) - [Advantage Actor-Critic (A2C)](https://openai.com/blog/baselines-acktr-a2c/), [commit id](https://github.com/thu-ml/tianshou/tree/1730a9008ad6bb67cac3b21347bed33b532b17bc) -- [Proximal Policy Optimization (PPO)](https://arxiv.org/pdf/1707.06347.pdf), [commit id](https://github.com/thu-ml/tianshou/tree/5d580c36624df0548818edf1f9b111b318dd7fd8) +- [Proximal Policy Optimization (PPO)](https://arxiv.org/pdf/1707.06347.pdf), [commit id](https://github.com/thu-ml/tianshou/tree/6426a39796db052bafb7cabe85c764db20a722b0) +- [Trust Region Policy Optimization (TRPO)](https://arxiv.org/pdf/1502.05477.pdf), [commit id](https://github.com/thu-ml/tianshou/tree/5057b5c89e6168220272c9c28a15b758a72efc32) #### Usage @@ -239,7 +240,31 @@ For pretrained agents, detailed graphs (single agent, single game) and log detai 4. `batch-size` 128 and 64 (default) work equally well. Changing `training-num` alone slightly (maybe in range [8, 128]) won't affect performance. For bound action method, both `clip` and `tanh` work quite well. 5. In OPENAI implementations of PPO, they multiply value loss with a factor of 0.5 for no good reason (see this [issue](https://github.com/openai/baselines/issues/445#issuecomment-777988738)). We do not do so and therefore make our `vf-coef` 0.25 (half of standard 0.5). However, since value loss is only used to optimize `critic` network, setting different `vf-coef` should in theory make no difference if using Adam optimizer. +### TRPO + +|Environment| Tianshou| [ACKTR paper](https://arxiv.org/pdf/1708.05144.pdf)| [PPO paper](https://arxiv.org/pdf/1707.06347.pdf)|[baselines](http://htmlpreview.github.io/?https://github.com/openai/baselines/blob/master/benchmarks_mujoco1M.htm)|[spinningup(pytorch)](https://spinningup.openai.com/en/latest/spinningup/bench.html)| +| :---------------: | :---------------: | :---------------: | :---------------: | :---------------: |:---------------: | +|Ant|**2866.7±707.9** | ~0 | N | N | ~150 | +|HalfCheetah|**4471.2±804.9** | ~400 | ~0| ~1350 | ~850 | +|Hopper| 2046.0±1037.9| ~1400 | ~2100 | **~2200** | ~1200 | +|Walker2d|**3826.7±782.7** | ~550 | ~1100 | ~2350 | ~600 | +|Swimmer|40.9±19.6 | ~40 | **~121** | ~95| ~85 | +|Humanoid|**810.1±126.1**| N | N | N | N | +|Reacher| **-5.1±0.8** | -8 | ~-115 | **~-5** | N | +|InvertedPendulum|**1000.0±0.0** | **~1000** | **~1000** | ~910 | N | +|InvertedDoublePendulum|**8435.2±1073.3**| ~800 | ~200 | ~7000 | N | +\* details[[4]](#footnote4)[[5]](#footnote5) + +#### Hints for TRPO +1. We have tried `step-per-collect` in (80, 1024, 2048, 4096), and `training-num` in (4, 16, 32, 64), and found out 1024 for `step-per-collect` (same as openai baselines) and smaller `training-num` (below 16) are good choices. Set `training-num` to 4 is actually better but we still use 16 considering its training speed boost. +2. Advantage normalization is a standard trick in TRPO, but we found it of minor help, just like in PPO. +3. Larger `optim-critic-iters` (than 5, as used in openai baselines) helps in most environments. Smaller lr and learning decay strategy also help a tiny little bit. +4. `gae-lambda` 0.98 and 0.95 work equally well. +5. We use gae returns (GAE advantage + value) as the target of critic network when updating, while normally people tend to use reward to go(lambda = 0.) as target. We found that they work equally well although using gae returns is a little bit inaccurate(biased) by math. +6. Emperically, Simmer-v3 usually requires larger bootstrap lengths and learning rate. Humanoid-v3/InvertedPendulum-v2, however, are the opposite. +7. In constrast with statement made in TRPO paper, we found that backtracking in line search is rarely used as least in mujoco settings, which is actually unimportant. This makes TRPO algorithm actually the same as TNPG algorithm (described in this [paper](http://proceedings.mlr.press/v48/duan16.html)). This also explains why TNPG and TRPO's plotting results look so similar in that paper. +8. Trick: "recompute advantage", which is helpful in PPO, doesn't really help in TRPO. ## Note From d7c5bbf08086b7034ce1fd41eae50deb81d1d846 Mon Sep 17 00:00:00 2001 From: chy <308604256@qq.com> Date: Tue, 20 Apr 2021 12:14:47 +0800 Subject: [PATCH 4/7] add npg --- test/continuous/test_npg.py | 136 +++++++++++++++++++++++ test/continuous/test_trpo.py | 5 +- tianshou/policy/__init__.py | 2 + tianshou/policy/modelfree/npg.py | 177 ++++++++++++++++++++++++++++++ tianshou/policy/modelfree/trpo.py | 68 +----------- 5 files changed, 323 insertions(+), 65 deletions(-) create mode 100644 test/continuous/test_npg.py create mode 100644 tianshou/policy/modelfree/npg.py diff --git a/test/continuous/test_npg.py b/test/continuous/test_npg.py new file mode 100644 index 000000000..d5172fa8b --- /dev/null +++ b/test/continuous/test_npg.py @@ -0,0 +1,136 @@ +import os +import gym +import torch +import pprint +import argparse +import numpy as np +from torch import nn +from torch.utils.tensorboard import SummaryWriter +from torch.distributions import Independent, Normal + +from tianshou.policy import NPGPolicy +from tianshou.utils import BasicLogger +from tianshou.env import DummyVectorEnv +from tianshou.utils.net.common import Net +from tianshou.trainer import onpolicy_trainer +from tianshou.data import Collector, VectorReplayBuffer +from tianshou.utils.net.continuous import ActorProb, Critic + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--task', type=str, default='Pendulum-v0') + parser.add_argument('--seed', type=int, default=1) + parser.add_argument('--buffer-size', type=int, default=50000) + parser.add_argument('--lr', type=float, default=1e-3) + parser.add_argument('--gamma', type=float, default=0.95) + parser.add_argument('--epoch', type=int, default=5) + parser.add_argument('--step-per-epoch', type=int, default=50000) + parser.add_argument('--step-per-collect', type=int, default=2048) + parser.add_argument('--repeat-per-collect', type=int, + default=2) # theoretically it should be 1 + parser.add_argument('--batch-size', type=int, default=99999) + parser.add_argument('--hidden-sizes', type=int, nargs='*', default=[64, 64]) + parser.add_argument('--training-num', type=int, default=16) + parser.add_argument('--test-num', type=int, default=10) + parser.add_argument('--logdir', type=str, default='log') + parser.add_argument('--render', type=float, default=0.) + parser.add_argument( + '--device', type=str, + default='cuda' if torch.cuda.is_available() else 'cpu') + # npg special + parser.add_argument('--gae-lambda', type=float, default=0.95) + parser.add_argument('--rew-norm', type=int, default=1) + parser.add_argument('--norm-adv', type=int, default=1) + parser.add_argument('--optim-critic-iters', type=int, default=5) + parser.add_argument('--actor-step-size', type=float, default=0.5) + args = parser.parse_known_args()[0] + return args + + +def test_npg(args=get_args()): + env = gym.make(args.task) + if args.task == 'Pendulum-v0': + env.spec.reward_threshold = -250 + args.state_shape = env.observation_space.shape or env.observation_space.n + args.action_shape = env.action_space.shape or env.action_space.n + args.max_action = env.action_space.high[0] + # you can also use tianshou.env.SubprocVectorEnv + # train_envs = gym.make(args.task) + train_envs = DummyVectorEnv( + [lambda: gym.make(args.task) for _ in range(args.training_num)]) + # test_envs = gym.make(args.task) + test_envs = DummyVectorEnv( + [lambda: gym.make(args.task) for _ in range(args.test_num)]) + # seed + np.random.seed(args.seed) + torch.manual_seed(args.seed) + train_envs.seed(args.seed) + test_envs.seed(args.seed) + # model + net = Net(args.state_shape, hidden_sizes=args.hidden_sizes, + activation=nn.Tanh, device=args.device) + actor = ActorProb(net, args.action_shape, max_action=args.max_action, + unbounded=True, device=args.device).to(args.device) + critic = Critic(Net( + args.state_shape, hidden_sizes=args.hidden_sizes, device=args.device, + activation=nn.Tanh), device=args.device).to(args.device) + # orthogonal initialization + for m in list(actor.modules()) + list(critic.modules()): + if isinstance(m, torch.nn.Linear): + torch.nn.init.orthogonal_(m.weight) + torch.nn.init.zeros_(m.bias) + optim = torch.optim.Adam(set( + actor.parameters()).union(critic.parameters()), lr=args.lr) + + # replace DiagGuassian with Independent(Normal) which is equivalent + # pass *logits to be consistent with policy.forward + def dist(*logits): + return Independent(Normal(*logits), 1) + + policy = NPGPolicy( + actor, critic, optim, dist, + discount_factor=args.gamma, + reward_normalization=args.rew_norm, + advantage_normalization=args.norm_adv, + gae_lambda=args.gae_lambda, + action_space=env.action_space, + optim_critic_iters=args.optim_critic_iters, + actor_step_size=args.actor_step_size) + # collector + train_collector = Collector( + policy, train_envs, + VectorReplayBuffer(args.buffer_size, len(train_envs))) + test_collector = Collector(policy, test_envs) + # log + log_path = os.path.join(args.logdir, args.task, 'npg') + writer = SummaryWriter(log_path) + logger = BasicLogger(writer) + + def save_fn(policy): + torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth')) + + def stop_fn(mean_rewards): + return mean_rewards >= env.spec.reward_threshold + + # trainer + result = onpolicy_trainer( + policy, train_collector, test_collector, args.epoch, + args.step_per_epoch, args.repeat_per_collect, args.test_num, args.batch_size, + step_per_collect=args.step_per_collect, stop_fn=stop_fn, save_fn=save_fn, + logger=logger) + assert stop_fn(result['best_reward']) + + if __name__ == '__main__': + pprint.pprint(result) + # Let's watch its performance! + env = gym.make(args.task) + policy.eval() + collector = Collector(policy, env) + result = collector.collect(n_episode=1, render=args.render) + rews, lens = result["rews"], result["lens"] + print(f"Final reward: {rews.mean()}, length: {lens.mean()}") + + +if __name__ == '__main__': + test_npg() diff --git a/test/continuous/test_trpo.py b/test/continuous/test_trpo.py index 4b2dc08dc..9db4f449c 100644 --- a/test/continuous/test_trpo.py +++ b/test/continuous/test_trpo.py @@ -27,8 +27,7 @@ def get_args(): parser.add_argument('--epoch', type=int, default=5) parser.add_argument('--step-per-epoch', type=int, default=50000) parser.add_argument('--step-per-collect', type=int, default=2048) - parser.add_argument('--repeat-per-collect', type=int, - default=2) # theoretically it should be 1 + parser.add_argument('--repeat-per-collect', type=int, default=1) parser.add_argument('--batch-size', type=int, default=99999) parser.add_argument('--hidden-sizes', type=int, nargs='*', default=[64, 64]) parser.add_argument('--training-num', type=int, default=16) @@ -43,7 +42,7 @@ def get_args(): parser.add_argument('--rew-norm', type=int, default=1) parser.add_argument('--norm-adv', type=int, default=1) parser.add_argument('--optim-critic-iters', type=int, default=5) - parser.add_argument('--max-kl', type=float, default=0.01) + parser.add_argument('--max-kl', type=float, default=0.005) parser.add_argument('--backtrack-coeff', type=float, default=0.8) parser.add_argument('--max-backtracks', type=int, default=10) diff --git a/tianshou/policy/__init__.py b/tianshou/policy/__init__.py index d087a89b1..f0177d9cb 100644 --- a/tianshou/policy/__init__.py +++ b/tianshou/policy/__init__.py @@ -5,6 +5,7 @@ from tianshou.policy.modelfree.qrdqn import QRDQNPolicy from tianshou.policy.modelfree.pg import PGPolicy from tianshou.policy.modelfree.a2c import A2CPolicy +from tianshou.policy.modelfree.npg import NPGPolicy from tianshou.policy.modelfree.ddpg import DDPGPolicy from tianshou.policy.modelfree.ppo import PPOPolicy from tianshou.policy.modelfree.trpo import TRPOPolicy @@ -25,6 +26,7 @@ "QRDQNPolicy", "PGPolicy", "A2CPolicy", + "NPGPolicy", "DDPGPolicy", "PPOPolicy", "TRPOPolicy", diff --git a/tianshou/policy/modelfree/npg.py b/tianshou/policy/modelfree/npg.py new file mode 100644 index 000000000..8b4eed4bc --- /dev/null +++ b/tianshou/policy/modelfree/npg.py @@ -0,0 +1,177 @@ +import torch +import numpy as np +from torch import nn +import torch.nn.functional as F +from torch.distributions import kl_divergence +from typing import Any, Dict, List, Type, Callable + + +from tianshou.policy import A2CPolicy +from tianshou.data import Batch, ReplayBuffer + + +def _conjugate_gradients( + Avp: Callable[[torch.Tensor], torch.Tensor], + b: torch.Tensor, + nsteps: int = 10, + residual_tol: float = 1e-10 +) -> torch.Tensor: + x = torch.zeros_like(b) + r, p = b.clone(), b.clone() + # Note: should be 'r, p = b - A(x)', but for x=0, A(x)=0. + # Change if doing warm start. + rdotr = r.dot(r) + for i in range(nsteps): + z = Avp(p) + alpha = rdotr / p.dot(z) + x += alpha * p + r -= alpha * z + new_rdotr = r.dot(r) + if new_rdotr < residual_tol: + break + p = r + new_rdotr / rdotr * p + rdotr = new_rdotr + return x + + +def _get_flat_grad(y: torch.Tensor, model: nn.Module, **kwargs: Any) -> torch.Tensor: + grads = torch.autograd.grad(y, model.parameters(), **kwargs) # type: ignore + return torch.cat([grad.reshape(-1) for grad in grads]) + + +def _set_from_flat_params(model: nn.Module, flat_params: torch.Tensor) -> nn.Module: + prev_ind = 0 + for param in model.parameters(): + flat_size = int(np.prod(list(param.size()))) + param.data.copy_( + flat_params[prev_ind:prev_ind + flat_size].view(param.size())) + prev_ind += flat_size + return model + + +class NPGPolicy(A2CPolicy): + """Implementation of Natural Policy Gradient. + https://proceedings.neurips.cc/paper/2001/file/4b86abe48d358ecf194c56c69108433e-Paper.pdf + :param torch.nn.Module actor: the actor network following the rules in + :class:`~tianshou.policy.BasePolicy`. (s -> logits) + :param torch.nn.Module critic: the critic network. (s -> V(s)) + :param torch.optim.Optimizer optim: the optimizer for actor and critic network. + :param dist_fn: distribution class for computing the action. + :type dist_fn: Type[torch.distributions.Distribution] + :param bool advantage_normalization: whether to do per mini-batch advantage + normalization. Default to True. + :param int optim_critic_iters: Number of times to optimize critic network per + update. Default to 5. + :param float gae_lambda: in [0, 1], param for Generalized Advantage Estimation. + Default to 0.95. + :param bool reward_normalization: normalize estimated values to have std close to + 1. Default to False. + :param int max_batchsize: the maximum size of the batch when computing GAE, + depends on the size of available memory and the memory cost of the + model; should be as large as possible within the memory constraint. + Default to 256. + :param bool action_scaling: whether to map actions from range [-1, 1] to range + [action_spaces.low, action_spaces.high]. Default to True. + :param str action_bound_method: method to bound action to range [-1, 1], can be + either "clip" (for simply clipping the action), "tanh" (for applying tanh + squashing) for now, or empty string for no bounding. Default to "clip". + :param Optional[gym.Space] action_space: env's action space, mandatory if you want + to use option "action_scaling" or "action_bound_method". Default to None. + :param lr_scheduler: a learning rate scheduler that adjusts the learning rate in + optimizer in each policy.update(). Default to None (no lr_scheduler). + """ + + def __init__( + self, + actor: torch.nn.Module, + critic: torch.nn.Module, + optim: torch.optim.Optimizer, + dist_fn: Type[torch.distributions.Distribution], + advantage_normalization: bool = True, + optim_critic_iters: int = 5, + actor_step_size: float = 0.5, # TODO + **kwargs: Any, + ) -> None: + super().__init__(actor, critic, optim, dist_fn, **kwargs) + del self._weight_vf, self._weight_ent, self._grad_norm + self._norm_adv = advantage_normalization + self._optim_critic_iters = optim_critic_iters + self._step_size = actor_step_size + # adjusts Hessian-vector product calculation for numerical stability + self._damping = 0.1 + + def process_fn( + self, batch: Batch, buffer: ReplayBuffer, indice: np.ndarray + ) -> Batch: + batch = super().process_fn(batch, buffer, indice) + old_log_prob = [] + with torch.no_grad(): + for b in batch.split(self._batch, shuffle=False, merge_last=True): + old_log_prob.append(self(b).dist.log_prob(b.act)) + batch.logp_old = torch.cat(old_log_prob, dim=0) + if self._norm_adv: + batch.adv = (batch.adv - batch.adv.mean()) / batch.adv.std() + return batch + + def learn( # type: ignore + self, batch: Batch, batch_size: int, repeat: int, **kwargs: Any + ) -> Dict[str, List[float]]: + actor_losses, vf_losses, kls = [], [], [] + for step in range(repeat): + for b in batch.split(batch_size, merge_last=True): + # optimize actor + # direction: calculate villia gradient + dist = self(b).dist # TODO could come from batch + ratio = (dist.log_prob(b.act) - b.logp_old).exp().float() + ratio = ratio.reshape(ratio.size(0), -1).transpose(0, 1) + actor_loss = -(ratio * b.adv).mean() + flat_grads = _get_flat_grad( + actor_loss, self.actor, retain_graph=True).detach() + + # direction: calculate natural gradient + with torch.no_grad(): + old_dist = self(b).dist + + kl = kl_divergence(old_dist, dist).mean() + # calculate first order gradient of kl with respect to theta + flat_kl_grad = _get_flat_grad(kl, self.actor, create_graph=True) + + def MVP(v: torch.Tensor) -> torch.Tensor: # matrix vector product + # caculate second order gradient of kl with respect to theta + kl_v = (flat_kl_grad * v).sum() + flat_kl_grad_grad = _get_flat_grad( + kl_v, self.actor, retain_graph=True).detach() + return flat_kl_grad_grad + v * self._damping + + search_direction = -_conjugate_gradients(MVP, flat_grads, nsteps=10) + + # step + with torch.no_grad(): + flat_params = torch.cat([param.data.view(-1) + for param in self.actor.parameters()]) + new_flat_params = flat_params + self._step_size * search_direction + _set_from_flat_params(self.actor, new_flat_params) + new_dist = self(b).dist + kl = kl_divergence(old_dist, new_dist).mean() + + # optimize citirc + for _ in range(self._optim_critic_iters): + value = self.critic(b.obs).flatten() + vf_loss = F.mse_loss(b.returns, value) + self.optim.zero_grad() + vf_loss.backward() + self.optim.step() + + actor_losses.append(actor_loss.item()) + vf_losses.append(vf_loss.item()) + kls.append(kl.item()) + + # update learning rate if lr_scheduler is given + if self.lr_scheduler is not None: + self.lr_scheduler.step() + + return { + "loss/actor": actor_losses, + "loss/vf": vf_losses, + "kl": kls, + } diff --git a/tianshou/policy/modelfree/trpo.py b/tianshou/policy/modelfree/trpo.py index 32ba13976..6172ead78 100644 --- a/tianshou/policy/modelfree/trpo.py +++ b/tianshou/policy/modelfree/trpo.py @@ -7,50 +7,13 @@ from typing import Any, Dict, List, Type, Callable -from tianshou.policy import A2CPolicy +from tianshou.policy import NPGPolicy from tianshou.data import Batch, ReplayBuffer +# TODO don't know whether this work or not in library +from .npg import _conjugate_gradients, _get_flat_grad, _set_from_flat_params -def _conjugate_gradients( - Avp: Callable[[torch.Tensor], torch.Tensor], - b: torch.Tensor, - nsteps: int = 10, - residual_tol: float = 1e-10 -) -> torch.Tensor: - x = torch.zeros_like(b) - r, p = b.clone(), b.clone() - # Note: should be 'r, p = b - A(x)', but for x=0, A(x)=0. - # Change if doing warm start. - rdotr = r.dot(r) - for i in range(nsteps): - z = Avp(p) - alpha = rdotr / p.dot(z) - x += alpha * p - r -= alpha * z - new_rdotr = r.dot(r) - if new_rdotr < residual_tol: - break - p = r + new_rdotr / rdotr * p - rdotr = new_rdotr - return x - - -def _get_flat_grad(y: torch.Tensor, model: nn.Module, **kwargs: Any) -> torch.Tensor: - grads = torch.autograd.grad(y, model.parameters(), **kwargs) # type: ignore - return torch.cat([grad.reshape(-1) for grad in grads]) - - -def _set_from_flat_params(model: nn.Module, flat_params: torch.Tensor) -> nn.Module: - prev_ind = 0 - for param in model.parameters(): - flat_size = int(np.prod(list(param.size()))) - param.data.copy_( - flat_params[prev_ind:prev_ind + flat_size].view(param.size())) - prev_ind += flat_size - return model - - -class TRPOPolicy(A2CPolicy): +class TRPOPolicy(NPGPolicy): """Implementation of Trust Region Policy Optimization. arXiv:1502.05477. :param torch.nn.Module actor: the actor network following the rules in @@ -94,35 +57,16 @@ def __init__( critic: torch.nn.Module, optim: torch.optim.Optimizer, dist_fn: Type[torch.distributions.Distribution], - advantage_normalization: bool = True, - optim_critic_iters: int = 5, max_kl: float = 0.01, backtrack_coeff: float = 0.8, max_backtracks: int = 10, **kwargs: Any, ) -> None: super().__init__(actor, critic, optim, dist_fn, **kwargs) - del self._weight_vf, self._weight_ent, self._grad_norm - self._norm_adv = advantage_normalization - self._optim_critic_iters = optim_critic_iters + del self._step_size self._max_backtracks = max_backtracks self._delta = max_kl self._backtrack_coeff = backtrack_coeff - # adjusts Hessian-vector product calculation for numerical stability - self.__damping = 0.1 - - def process_fn( - self, batch: Batch, buffer: ReplayBuffer, indice: np.ndarray - ) -> Batch: - batch = super().process_fn(batch, buffer, indice) - old_log_prob = [] - with torch.no_grad(): - for b in batch.split(self._batch, shuffle=False, merge_last=True): - old_log_prob.append(self(b).dist.log_prob(b.act)) - batch.logp_old = torch.cat(old_log_prob, dim=0) - if self._norm_adv: - batch.adv = (batch.adv - batch.adv.mean()) / batch.adv.std() - return batch def learn( # type: ignore self, batch: Batch, batch_size: int, repeat: int, **kwargs: Any @@ -152,7 +96,7 @@ def MVP(v: torch.Tensor) -> torch.Tensor: # matrix vector product kl_v = (flat_kl_grad * v).sum() flat_kl_grad_grad = _get_flat_grad( kl_v, self.actor, retain_graph=True).detach() - return flat_kl_grad_grad + v * self.__damping + return flat_kl_grad_grad + v * self._damping search_direction = -_conjugate_gradients(MVP, flat_grads, nsteps=10) From cbfcab4d2fd8b9b367f61b3cb943ee2ee0ed6810 Mon Sep 17 00:00:00 2001 From: chy <308604256@qq.com> Date: Tue, 20 Apr 2021 12:24:29 +0800 Subject: [PATCH 5/7] revert --- examples/mujoco/mujoco_trpo.py | 1 + examples/mujoco/plotter.py | 21 +++++- examples/mujoco/tools.py | 118 ++------------------------------- 3 files changed, 26 insertions(+), 114 deletions(-) diff --git a/examples/mujoco/mujoco_trpo.py b/examples/mujoco/mujoco_trpo.py index 0527b3920..ad99069fa 100644 --- a/examples/mujoco/mujoco_trpo.py +++ b/examples/mujoco/mujoco_trpo.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 + import os import gym import torch diff --git a/examples/mujoco/plotter.py b/examples/mujoco/plotter.py index 39cfbba0e..7454871e7 100755 --- a/examples/mujoco/plotter.py +++ b/examples/mujoco/plotter.py @@ -9,7 +9,7 @@ import matplotlib.ticker as mticker from collections import defaultdict -from tools import find_all_files, group_files, csv2numpy +from tools import find_all_files def smooth(y, radius, mode='two_sided', valid_only=False): @@ -63,6 +63,25 @@ def smooth(y, radius, mode='two_sided', valid_only=False): '#000000', # BLACK ]) + +def csv2numpy(csv_file): + csv_dict = defaultdict(list) + reader = csv.DictReader(open(csv_file)) + for row in reader: + for k, v in row.items(): + csv_dict[k].append(eval(v)) + return {k: np.array(v) for k, v in csv_dict.items()} + + +def group_files(file_list, pattern): + res = defaultdict(list) + for f in file_list: + match = re.search(pattern, f) + key = match.group() if match else '' + res[key].append(f) + return res + + def plot_ax( ax, file_lists, diff --git a/examples/mujoco/tools.py b/examples/mujoco/tools.py index 726cd7427..c4ff6c378 100755 --- a/examples/mujoco/tools.py +++ b/examples/mujoco/tools.py @@ -8,7 +8,6 @@ import numpy as np from typing import Dict, List, Union from tensorboard.backend.event_processing import event_accumulator -from collections import defaultdict def find_all_files(root_dir: str, pattern: re.Pattern) -> List[str]: @@ -21,21 +20,6 @@ def find_all_files(root_dir: str, pattern: re.Pattern) -> List[str]: file_list.append(absolute_path) return file_list -def group_files(file_list, pattern): - res = defaultdict(list) - for f in file_list: - match = re.search(pattern, f) - key = match.group() if match else '' - res[key].append(f) - return res - -def csv2numpy(csv_file): - csv_dict = defaultdict(list) - reader = csv.DictReader(open(csv_file)) - for row in reader: - for k, v in row.items(): - csv_dict[k].append(eval(v)) - return {k: np.array(v) for k, v in csv_dict.items()} def convert_tfevents_to_csv( root_dir: str, refresh: bool = False @@ -101,108 +85,16 @@ def merge_csv( print(f"Output merged csv file to {output_path} with {len(content[1:])} lines.") csv.writer(open(output_path, "w")).writerows(content) -def numerical_anysis(root_dir: str, xlim: int) -> None: - file_pattern = r".*/test_rew_\d+seeds.csv$" - norm_group_pattern = r"(/|^)\w+?\-v(\d|$)" - output_group_pattern = r".*?(?=(/|^)\w+?\-v\d)" - csv_files = find_all_files(root_dir, re.compile(file_pattern)) - norm_group = group_files(csv_files, norm_group_pattern) - output_group = group_files(csv_files, output_group_pattern) - # calculate numerical outcome for each csv_file (y/std integration max_y, final_y) - results = {} - for f in csv_files: - # reader = csv.DictReader(open(f, newline='')) - # result = [] - # for row in reader: - # result.append([row['env_step'], row['rew'], row['rew:shaded']]) - # result = np.array(result).T - # iclip = np.searchsorted(result[0], xlim) - result = csv2numpy(f) - result = np.stack((result['env_step'], result['rew'] - result['rew'][0], result['rew:shaded'])) - iclip = np.searchsorted(result[0], xlim) - - if iclip == 0 or iclip == len(result[0]): - results[f] = None - continue - else: - results[f] = {} - result = result[:, :iclip + 1] - final_rew = np.interp(xlim, result[0], result[1]) - final_rew_std = np.interp(xlim, result[0], result[2]) - result[0, iclip] = xlim - result[1, iclip] = final_rew - result[2, iclip] = final_rew_std - results[f]['final_reward'] = final_rew.astype(float) - max_rew = np.max(result[1]) - results[f]['max_reward'] = max_rew.astype(float) - rew_integration = np.trapz(result[1], x=result[0]) - results[f]['reward_integration'] = rew_integration.astype(float) - std_integration = np.trapz(result[2], x=result[0]) - results[f]['reward_std_integration'] = std_integration.astype(float) - # calculate normalised numerical outcome for each csv_file group - for _, fs in norm_group.items(): - maxres = defaultdict(lambda: -np.inf) - # find max for each key - for f in fs: - if not results[f]: - continue - for k, v in results[f].items(): - maxres[k] = v if maxres[k] < v else maxres[k] - # add normalised numerical outcome - for f in fs: - if not results[f]: - continue - new_dict = results[f].copy() - for k, v in results[f].items(): - new_dict[k + ":normalised"] = v / maxres[k] - results[f] = new_dict - # Add all numerical results for each outcome group - output_group - group_results = {} - for g, fs in output_group.items(): - group_results[g] = defaultdict(lambda: 0) - group_n = 0 - for f in fs: - if not results[f]: - continue - group_n += 1 - for k, v in results[f].items(): - group_results[g][k] += v - for k, v in group_results[g].items(): - group_results[g][k] = v / group_n - group_results[g]['group_n'] += group_n - # print all outputs for each csv_file and each outcome group - for f, numerical_result in results.items(): - print("******* " + f + ":") - print(numerical_result) - for g, numerical_result in group_results.items(): - print("******* " + g + ":") - print(numerical_result) if __name__ == "__main__": parser = argparse.ArgumentParser() - sp = parser.add_subparsers(dest='action') - - merge_parser = sp.add_parser('merge') - merge_parser.add_argument( + parser.add_argument('--root-dir', type=str) + parser.add_argument( '--refresh', action="store_true", help="Re-generate all csv files instead of using existing one.") - merge_parser.add_argument( + parser.add_argument( '--remove-zero', action="store_true", help="Remove the data point of env_step == 0.") - merge_parser.add_argument('--root-dir', type=str) - - analysis_parser = sp.add_parser('analysis') - analysis_parser.add_argument('--xlim', type=int, default=1000000, - help='x-axis limitation (default: None)') - analysis_parser.add_argument('--root-dir', type=str) args = parser.parse_args() - - args.action = "analysis" - args.xlim=1000000 - args.root_dir="/home/huayu/git/tianshou/examples/mujoco/ablation_trpo_last" - if args.action == "merge": - csv_files = convert_tfevents_to_csv(args.root_dir, args.refresh) - merge_csv(csv_files, args.root_dir, args.remove_zero) - elif args.action == "analysis": - numerical_anysis(args.root_dir, args.xlim) + csv_files = convert_tfevents_to_csv(args.root_dir, args.refresh) + merge_csv(csv_files, args.root_dir, args.remove_zero) From ff5c1ed7f7b03df537100cacbb140bb342116358 Mon Sep 17 00:00:00 2001 From: Trinkle23897 Date: Wed, 21 Apr 2021 08:44:18 +0800 Subject: [PATCH 6/7] polish --- tianshou/policy/modelfree/npg.py | 113 ++++++++++++++++-------------- tianshou/policy/modelfree/trpo.py | 30 +++----- 2 files changed, 69 insertions(+), 74 deletions(-) diff --git a/tianshou/policy/modelfree/npg.py b/tianshou/policy/modelfree/npg.py index 8b4eed4bc..abb6396af 100644 --- a/tianshou/policy/modelfree/npg.py +++ b/tianshou/policy/modelfree/npg.py @@ -2,56 +2,19 @@ import numpy as np from torch import nn import torch.nn.functional as F +from typing import Any, Dict, List, Type from torch.distributions import kl_divergence -from typing import Any, Dict, List, Type, Callable from tianshou.policy import A2CPolicy from tianshou.data import Batch, ReplayBuffer -def _conjugate_gradients( - Avp: Callable[[torch.Tensor], torch.Tensor], - b: torch.Tensor, - nsteps: int = 10, - residual_tol: float = 1e-10 -) -> torch.Tensor: - x = torch.zeros_like(b) - r, p = b.clone(), b.clone() - # Note: should be 'r, p = b - A(x)', but for x=0, A(x)=0. - # Change if doing warm start. - rdotr = r.dot(r) - for i in range(nsteps): - z = Avp(p) - alpha = rdotr / p.dot(z) - x += alpha * p - r -= alpha * z - new_rdotr = r.dot(r) - if new_rdotr < residual_tol: - break - p = r + new_rdotr / rdotr * p - rdotr = new_rdotr - return x - - -def _get_flat_grad(y: torch.Tensor, model: nn.Module, **kwargs: Any) -> torch.Tensor: - grads = torch.autograd.grad(y, model.parameters(), **kwargs) # type: ignore - return torch.cat([grad.reshape(-1) for grad in grads]) - - -def _set_from_flat_params(model: nn.Module, flat_params: torch.Tensor) -> nn.Module: - prev_ind = 0 - for param in model.parameters(): - flat_size = int(np.prod(list(param.size()))) - param.data.copy_( - flat_params[prev_ind:prev_ind + flat_size].view(param.size())) - prev_ind += flat_size - return model +class NPGPolicy(A2CPolicy): + """Implementation of Natural Policy Gradient. + https://proceedings.neurips.cc/paper/2001/file/4b86abe48d358ecf194c56c69108433e-Paper.pdf -class NPGPolicy(A2CPolicy): - """Implementation of Natural Policy Gradient. - https://proceedings.neurips.cc/paper/2001/file/4b86abe48d358ecf194c56c69108433e-Paper.pdf :param torch.nn.Module actor: the actor network following the rules in :class:`~tianshou.policy.BasePolicy`. (s -> logits) :param torch.nn.Module critic: the critic network. (s -> V(s)) @@ -89,7 +52,7 @@ def __init__( dist_fn: Type[torch.distributions.Distribution], advantage_normalization: bool = True, optim_critic_iters: int = 5, - actor_step_size: float = 0.5, # TODO + actor_step_size: float = 0.5, **kwargs: Any, ) -> None: super().__init__(actor, critic, optim, dist_fn, **kwargs) @@ -125,7 +88,7 @@ def learn( # type: ignore ratio = (dist.log_prob(b.act) - b.logp_old).exp().float() ratio = ratio.reshape(ratio.size(0), -1).transpose(0, 1) actor_loss = -(ratio * b.adv).mean() - flat_grads = _get_flat_grad( + flat_grads = self._get_flat_grad( actor_loss, self.actor, retain_graph=True).detach() # direction: calculate natural gradient @@ -134,23 +97,16 @@ def learn( # type: ignore kl = kl_divergence(old_dist, dist).mean() # calculate first order gradient of kl with respect to theta - flat_kl_grad = _get_flat_grad(kl, self.actor, create_graph=True) - - def MVP(v: torch.Tensor) -> torch.Tensor: # matrix vector product - # caculate second order gradient of kl with respect to theta - kl_v = (flat_kl_grad * v).sum() - flat_kl_grad_grad = _get_flat_grad( - kl_v, self.actor, retain_graph=True).detach() - return flat_kl_grad_grad + v * self._damping - - search_direction = -_conjugate_gradients(MVP, flat_grads, nsteps=10) + flat_kl_grad = self._get_flat_grad(kl, self.actor, create_graph=True) + search_direction = -self._conjugate_gradients( + flat_grads, flat_kl_grad, nsteps=10) # step with torch.no_grad(): flat_params = torch.cat([param.data.view(-1) for param in self.actor.parameters()]) new_flat_params = flat_params + self._step_size * search_direction - _set_from_flat_params(self.actor, new_flat_params) + self._set_from_flat_params(self.actor, new_flat_params) new_dist = self(b).dist kl = kl_divergence(old_dist, new_dist).mean() @@ -175,3 +131,52 @@ def MVP(v: torch.Tensor) -> torch.Tensor: # matrix vector product "loss/vf": vf_losses, "kl": kls, } + + def _MVP(self, v: torch.Tensor, flat_kl_grad: torch.Tensor) -> torch.Tensor: + """Matrix vector product.""" + # caculate second order gradient of kl with respect to theta + kl_v = (flat_kl_grad * v).sum() + flat_kl_grad_grad = self._get_flat_grad( + kl_v, self.actor, retain_graph=True).detach() + return flat_kl_grad_grad + v * self._damping + + def _conjugate_gradients( + self, + b: torch.Tensor, + flat_kl_grad: torch.Tensor, + nsteps: int = 10, + residual_tol: float = 1e-10 + ) -> torch.Tensor: + x = torch.zeros_like(b) + r, p = b.clone(), b.clone() + # Note: should be 'r, p = b - MVP(x)', but for x=0, MVP(x)=0. + # Change if doing warm start. + rdotr = r.dot(r) + for i in range(nsteps): + z = self._MVP(p, flat_kl_grad) + alpha = rdotr / p.dot(z) + x += alpha * p + r -= alpha * z + new_rdotr = r.dot(r) + if new_rdotr < residual_tol: + break + p = r + new_rdotr / rdotr * p + rdotr = new_rdotr + return x + + def _get_flat_grad( + self, y: torch.Tensor, model: nn.Module, **kwargs: Any + ) -> torch.Tensor: + grads = torch.autograd.grad(y, model.parameters(), **kwargs) # type: ignore + return torch.cat([grad.reshape(-1) for grad in grads]) + + def _set_from_flat_params( + self, model: nn.Module, flat_params: torch.Tensor + ) -> nn.Module: + prev_ind = 0 + for param in model.parameters(): + flat_size = int(np.prod(list(param.size()))) + param.data.copy_( + flat_params[prev_ind:prev_ind + flat_size].view(param.size())) + prev_ind += flat_size + return model diff --git a/tianshou/policy/modelfree/trpo.py b/tianshou/policy/modelfree/trpo.py index 6172ead78..9d456878c 100644 --- a/tianshou/policy/modelfree/trpo.py +++ b/tianshou/policy/modelfree/trpo.py @@ -1,16 +1,12 @@ import torch import warnings -import numpy as np -from torch import nn import torch.nn.functional as F +from typing import Any, Dict, List, Type from torch.distributions import kl_divergence -from typing import Any, Dict, List, Type, Callable +from tianshou.data import Batch from tianshou.policy import NPGPolicy -from tianshou.data import Batch, ReplayBuffer -# TODO don't know whether this work or not in library -from .npg import _conjugate_gradients, _get_flat_grad, _set_from_flat_params class TRPOPolicy(NPGPolicy): @@ -80,7 +76,7 @@ def learn( # type: ignore ratio = (dist.log_prob(b.act) - b.logp_old).exp().float() ratio = ratio.reshape(ratio.size(0), -1).transpose(0, 1) actor_loss = -(ratio * b.adv).mean() - flat_grads = _get_flat_grad( + flat_grads = self._get_flat_grad( actor_loss, self.actor, retain_graph=True).detach() # direction: calculate natural gradient @@ -89,20 +85,14 @@ def learn( # type: ignore kl = kl_divergence(old_dist, dist).mean() # calculate first order gradient of kl with respect to theta - flat_kl_grad = _get_flat_grad(kl, self.actor, create_graph=True) - - def MVP(v: torch.Tensor) -> torch.Tensor: # matrix vector product - # caculate second order gradient of kl with respect to theta - kl_v = (flat_kl_grad * v).sum() - flat_kl_grad_grad = _get_flat_grad( - kl_v, self.actor, retain_graph=True).detach() - return flat_kl_grad_grad + v * self._damping - - search_direction = -_conjugate_gradients(MVP, flat_grads, nsteps=10) + flat_kl_grad = self._get_flat_grad(kl, self.actor, create_graph=True) + search_direction = -self._conjugate_gradients( + flat_grads, flat_kl_grad, nsteps=10) # stepsize: calculate max stepsize constrained by kl bound step_size = torch.sqrt(2 * self._delta / ( - search_direction * MVP(search_direction)).sum(0, keepdim=True)) + search_direction * self._MVP(search_direction, flat_kl_grad) + ).sum(0, keepdim=True)) # stepsize: linesearch stepsize with torch.no_grad(): @@ -110,7 +100,7 @@ def MVP(v: torch.Tensor) -> torch.Tensor: # matrix vector product for param in self.actor.parameters()]) for i in range(self._max_backtracks): new_flat_params = flat_params + step_size * search_direction - _set_from_flat_params(self.actor, new_flat_params) + self._set_from_flat_params(self.actor, new_flat_params) # calculate kl and if in bound, loss actually down new_dist = self(b).dist new_dratio = ( @@ -127,7 +117,7 @@ def MVP(v: torch.Tensor) -> torch.Tensor: # matrix vector product elif i < self._max_backtracks - 1: step_size = step_size * self._backtrack_coeff else: - _set_from_flat_params(self.actor, new_flat_params) + self._set_from_flat_params(self.actor, new_flat_params) step_size = torch.tensor([0.0]) warnings.warn("Line search failed! It seems hyperparamters" " are poor and need to be changed.") From ae0e2892d6accf7f46449531ee128f24a0883fb9 Mon Sep 17 00:00:00 2001 From: Trinkle23897 Date: Wed, 21 Apr 2021 08:46:38 +0800 Subject: [PATCH 7/7] add docs --- README.md | 1 + docs/api/tianshou.policy.rst | 5 +++++ docs/index.rst | 1 + 3 files changed, 7 insertions(+) diff --git a/README.md b/README.md index 861788514..f6d79c56d 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,7 @@ - [Categorical DQN (C51)](https://arxiv.org/pdf/1707.06887.pdf) - [Quantile Regression DQN (QRDQN)](https://arxiv.org/pdf/1710.10044.pdf) - [Policy Gradient (PG)](https://papers.nips.cc/paper/1713-policy-gradient-methods-for-reinforcement-learning-with-function-approximation.pdf) +- [Natural Policy Gradient (NPG)](https://proceedings.neurips.cc/paper/2001/file/4b86abe48d358ecf194c56c69108433e-Paper.pdf) - [Advantage Actor-Critic (A2C)](https://openai.com/blog/baselines-acktr-a2c/) - [Trust Region Policy Optimization (TRPO)](https://arxiv.org/pdf/1502.05477.pdf) - [Proximal Policy Optimization (PPO)](https://arxiv.org/pdf/1707.06347.pdf) diff --git a/docs/api/tianshou.policy.rst b/docs/api/tianshou.policy.rst index 0664e0107..aa24897c4 100644 --- a/docs/api/tianshou.policy.rst +++ b/docs/api/tianshou.policy.rst @@ -43,6 +43,11 @@ On-policy :undoc-members: :show-inheritance: +.. autoclass:: tianshou.policy.NPGPolicy + :members: + :undoc-members: + :show-inheritance: + .. autoclass:: tianshou.policy.A2CPolicy :members: :undoc-members: diff --git a/docs/index.rst b/docs/index.rst index bd19d5618..08ed3245d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -15,6 +15,7 @@ Welcome to Tianshou! * :class:`~tianshou.policy.C51Policy` `Categorical DQN `_ * :class:`~tianshou.policy.QRDQNPolicy` `Quantile Regression DQN `_ * :class:`~tianshou.policy.PGPolicy` `Policy Gradient `_ +* :class:`~tianshou.policy.NPGPolicy` `Natural Policy Gradient `_ * :class:`~tianshou.policy.A2CPolicy` `Advantage Actor-Critic `_ * :class:`~tianshou.policy.TRPOPolicy` `Trust Region Policy Optimization `_ * :class:`~tianshou.policy.PPOPolicy` `Proximal Policy Optimization `_