-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
98 lines (68 loc) · 2.64 KB
/
main.py
File metadata and controls
98 lines (68 loc) · 2.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
"""
An example using the rover domain gym-style interface and the standard, included CCEA learning algorithms.
This is a minimal example, showing the minimal Gym interface.
"""
import numpy as np
from rover_domain_core_gym import RoverDomainGym
import code.ccea_2 as ccea
import code.agent_domain_2 as domain
import mods
from multiq.learner import learner
from sys import argv,exit
import pickle
from collections import deque
episodeCount = 15000 # Number of learning episodes
populationSize = 50
nagents=10
RENDER=0
sim = RoverDomainGym(nagents,50)
#mods.recipePoi(sim)
obs=sim.reset()
sim.data["Coupling"]=5
sim.data['Number of Agents']=nagents
sim.data["Act Freq"]=10
obs_size=len(obs[0])
ccea.initCcea(input_shape=obs_size, num_outputs=2, num_units=32)(sim.data)
populationSize=len(sim.data['Agent Populations'][0])
SAMPLES=500
STATES=[deque(maxlen=SAMPLES) for i in range(populationSize)]
ACTIONS=[deque(maxlen=SAMPLES) for i in range(populationSize)]
G=[deque(maxlen=SAMPLES) for i in range(populationSize)]
D=[deque(maxlen=SAMPLES) for i in range(populationSize)]
COUNT=0
for episodeIndex in range(episodeCount):
sim.data["Episode Index"] = episodeIndex
GlobalRewards=[]
DiffRewards=[]
for worldIndex in range(populationSize):
sim.data["World Index"]=worldIndex
obs = sim.reset()
ccea.assignCceaPolicies(sim.data)
#mods.assignHomogeneousPolicy(sim)
done = False
stepCount = 0
while not done:
domain.doAgentProcess(sim.data)
jointAction = sim.data["Agent Actions"]
obs2, reward, done, info = sim.step(jointAction)
STATES[worldIndex].append(np.array(obs))
ACTIONS[worldIndex].append(np.array(jointAction))
G[worldIndex].append(sim.data["Global Reward"])
D[worldIndex].append(np.array(sim.data["Agent Rewards"]))
obs=obs2
stepCount += 1
#print(reward)
if sim.data["Global Reward"]==1.75:
COUNT+=1
GlobalRewards.append(sim.data["Global Reward"])
DiffRewards.append(sim.data["Agent Rewards"])
ccea.rewardCceaPolicies(sim.data)
idx=np.argmax(GlobalRewards)
print(episodeIndex,GlobalRewards[idx],DiffRewards[idx])
if COUNT>10:
with open("data13.pkl",'wb') as f:
#data=[STATES,np.array(sim.data["Agent Position History"]),sim.data["Agent Policies"]]
data=[STATES,ACTIONS,G,D,sim.data['Agent Populations']]
pickle.dump(data,f)
exit()
ccea.evolveCceaPolicies(sim.data)