-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrainMazeSHELM.py
More file actions
127 lines (110 loc) · 5.37 KB
/
trainMazeSHELM.py
File metadata and controls
127 lines (110 loc) · 5.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import argparse
import gym
import torch
import numpy as np
from helm.trainers.shelm_trainer import SHELMPPO
def getArgs():
parser = argparse.ArgumentParser()
#Training Arguments
parser.add_argument('--adv_norm', type=bool, default=False, help='')
parser.add_argument('--clip_decay', type=str, default="none", help='')
parser.add_argument('--clip_range', type=float, default=0.2, help='')
parser.add_argument('--clip_range_vf', type=str, default=None, help='')
parser.add_argument('--end_fraction', type=float, default=1, help='')
parser.add_argument('--ent_coef', type=float, default=5e-2, help='')
parser.add_argument('--ent_decay', type=str, default='none', help='amount of ent decay I guess')
parser.add_argument('--ent_decay_factor', type=float, default=0.99, help='')
parser.add_argument('--gamma', type=float, default=0.99, help='gamma')
parser.add_argument('--gae_lambda', type=float, default=0.99, help='')
parser.add_argument('--max_grad_norm', type=float, default=0.5, help='')
parser.add_argument('--min_ent_coef', type=float, default=0, help='')
parser.add_argument('--min_lr', type=float, default=0, help='min LR')
parser.add_argument('--n_envs', type=int, default=1, help='number of envs')
parser.add_argument('--n_epochs', type=int, default=3, help='number of epochs')
parser.add_argument('--n_steps', type=int, default=150000000, help='number of steps')
parser.add_argument('--n_rollout_steps', type=int, default=128, help='number of epochs')
parser.add_argument('--learning_rate', type=float, default=1e-4, help='initial LR')
parser.add_argument('--lr_decay', type=str, default='none', help='amount of LR decay I guess')
parser.add_argument('--seed', type=int, default=0, help='Random Seed for Reproducibility')
parser.add_argument('--start_fraction', type=float, default=0, help='')
parser.add_argument('--vf_coef', type=float, default=0.5, help='')
#Environment Arguments
parser.add_argument('--env', type=str, default='9x9', help='the size of the memory maze environment to train on')
parser.add_argument('--test_runs', type=int, default=100, help='number of test trials to do')
parser.add_argument('--weights_path', type=str, default=None, help='path to weights')
#Logging Arguments
parser.add_argument('--outpath', type=str, default='logs/', help='where to put the tensorboard logs')
parser.add_argument('--save_ckpt', type=bool, default=True, help='to save model checkpoints or not')
return parser.parse_args()
if __name__ == '__main__':
config = {
"n_batches": 8,
"batch_size": 16,
"beta": 100,
"beta_lr": 1e-3,
"beta_schedule": "none",
"mem_len": 511,
"min_ent_coef": 0,
"model": "HELM",
"optimizer": "AdamW",
"epsilon": 1e-8,
"topk": 1,
"learning_rate": 1e-4}
args = getArgs()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if args.env == '9x9':
env = gym.make('memory_maze:MemoryMaze-9x9-v0')
elif args.env == '11x11':
env = gym.make('memory_maze:MemoryMaze-11x11-v0')
elif args.env == '13x13':
env = gym.make('memory_maze:MemoryMaze-13x13-v0')
elif args.env == '15x15':
env = gym.make('memory_maze:MemoryMaze-15x15-v0')
else:
print(args.env,"is not a valid environment size!!!")
breakpoint()
model = SHELMPPO("MlpPolicy", env, verbose=1, tensorboard_log=args.outpath,lr_decay=args.lr_decay,
ent_coef=args.ent_coef, ent_decay=args.ent_decay, learning_rate=args.learning_rate,
vf_coef=args.vf_coef, n_epochs=args.n_epochs, ent_decay_factor=args.ent_decay_factor,
clip_range=args.clip_range, gamma=args.gamma, gae_lambda=args.gae_lambda,
n_steps=args.n_rollout_steps, n_envs=args.n_envs, min_lr=args.min_lr,
min_ent_coef=args.min_ent_coef, start_fraction=args.start_fraction,
end_fraction=args.end_fraction, device=device, clip_decay=args.clip_decay,
config=config, clip_range_vf=args.clip_range_vf, seed=args.seed,
max_grad_norm=args.max_grad_norm, adv_norm=args.adv_norm,
save_ckpt=args.save_ckpt)
if args.weights_path is None:
model = model.learn(total_timesteps=args.n_steps, eval_log_path=args.outpath)
else:
model.load(args.weights_path)
env_lengths = []
success = []
rewards = []
for i in range(args.test_runs):
breakpoint()
#TODO: does this return an observation?
obs, info = env.reset()
length = 0
rew_sum = 0
done = False
while not done:
action, hidden_state = model.predict(obs)
#TODO: can you directly use the action???
obs, rew, done, timeout, info = env.step(action)
rew_sum+=rew
length+=1
env_lengths.append(length)
success.append(done and not timeout)
rewards.append(rew_sum)
print('Avg episode length:', np.mean(env_lengths))
print('Success Rate:', sum(success) / len(success))
print('Average Reward:', np.mean(rewards))
print()
print("Raw Data:")
print("Env Lengths")
print(env_lengths)
print("Successes")
print(success)
print("Rewards")
print(rewards)
env.close()