forked from bjorneju/substrate_modeler
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathunit_functions_old.py
More file actions
278 lines (218 loc) · 7.63 KB
/
unit_functions_old.py
File metadata and controls
278 lines (218 loc) · 7.63 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
import numpy as np
import pyphi
"""
TODO:
- move all validation checks to validation function
"""
def sigmoid(
unit,
i=0,
input_weights=None,
determinism=None,
threshold=None,
floor=None,
ceiling=None,
):
def LogFunc(x, determinism, threshold):
y = ceiling * (
floor + (1 - floor) / (1 + np.e ** (-determinism * (x - threshold)))
)
return y
n_nodes = len(input_weights)
# producing transition probability matrix
tpm = np.array(
[
[
LogFunc(
sum(state * np.array([input_weights[n] for n in range(n_nodes)])),
determinism,
threshold,
)
]
for state in pyphi.utils.all_states(n_nodes)
]
)
return pyphi.convert.to_multidimensional(tpm)
def sor_gate(
unit, i=0, pattern_selection=None, selectivity=None, floor=None, ceiling=None
):
# get state of
# Ensure states are tuples
pattern_selection = list(map(tuple, pattern_selection))
# Ensure nceiling is not more than 1
if ceiling > 1:
ceiling = 1.0
# Ensure selectivity is larger than 1
if not selectivity > 1:
print(
"Selectivity for SOR gates must be bigger than 1, adjusting to inverse of value given."
)
selectivity = 1 / selectivity
# Ensure unit has input state
if unit.input_state == None:
print(
"input state not given for {} unit {}. Setting to all off.".format(
unit.params["mechanism"], unit.label
)
)
unit.set_input_state((0,) * len(unit.inputs))
# Ensure unit has state
if unit.state == None:
print("State not given unit {}. Setting to off.".format(unit.label))
unit.set_state((0,))
# the tpm is uniform for all states except the input state (i.e. short term plasticity)
tpm = np.ones([2] * (len(unit.input_state))) * floor
# if the input state matches a pattern in the patternselction, activation probability given that state is ceiling, otherwise, it is increased by a fraction of the difference between floor and ceiling (given by selectivity)
not_pattern = floor + (ceiling - floor) / selectivity
tpm[unit.input_state] = (
ceiling if unit.input_state in pattern_selection else not_pattern
)
return tpm
def copy_gate(unit, i=0, floor=None, ceiling=None):
tpm = np.ones([2]) * floor
tpm[1] = ceiling
return tpm
def and_gate(unit, i=0, floor=None, ceiling=None):
tpm = np.ones((2, 2)) * floor
tpm[(1, 1)] = ceiling
return tpm
def or_gate(unit, i=0, floor=None, ceiling=None):
tpm = np.ones((2, 2)) * ceiling
tpm[(0, 0)] = floor
return tpm
def xor_gate(unit, i=0, floor=None, ceiling=None):
tpm = np.ones((2, 2)) * floor
tpm[(0, 1)] = ceiling
tpm[(1, 0)] = ceiling
return tpm
def weighted_mean(unit, i=0, weights=[], floor=None, ceiling=None):
weights = [w/np.sum(weights) for w in weights]
N = len(weights)
tpm = np.ones((2,)*N)
for state in pyphi.utils.all_states(N):
weighted_mean = sum([(1 + w*(s*2-1))/2 for w,s in zip(weights, state)])/N
tpm[state] = weighted_mean*(ceiling-floor) + floor
return tpm
def democracy(unit, i=0, floor=None, ceiling=None):
N = len(unit.inputs)
tpm = np.ones((2,)*N)
for state in pyphi.utils.all_states(N):
avg_vote = np.mean(state)
tpm[state] = avg_vote*(ceiling-floor) + floor
return tpm
def majority(unit, i=0, floor=None, ceiling=None):
N = len(unit.inputs)
tpm = np.ones((2,)*N)
for state in pyphi.utils.all_states(N):
avg_vote = round(np.mean(state))
tpm[state] = avg_vote*(ceiling-floor) + floor
return tpm
def mismatch_corrector(unit, i=0, floor=None, ceiling=None):
# Ensure unit has input state
if unit.input_state == None:
print(
"input state not given for {} unit {}. Setting to all off.".format(
unit.params["mechanism"], unit.label
)
)
unit.set_input_state((0,) * len(unit.inputs))
# Ensure unit has state
if unit.state == None:
print("State not given unit {}. Setting to off.".format(unit.label))
unit.set_state((0,))
# Ensure that there is only one unit
if len(unit.inputs) > 1:
print(
"Unit {} has too many inputs for mechanism of typ {}. Using only first input.".format(
unit.label
)
)
unit.set_inputs(tuple(unit.inputs[[0]]))
# check whether state of unit matches its input, and create tpm accordingly
if unit.state == unit.input_state:
tpm = np.ones([2]) * 0.5
else:
tpm = np.array([floor, ceiling])
return tpm
def modulated_sigmoid(
unit,
i=0,
input_weights=None,
determinism=None,
threshold=None,
floor=None,
ceiling=None,
):
# modulation by the last unit in the inputs.
# modulation consists in a linear shift in the threshold of the sigmoid, distance given by last value in input_weights
def LogFunc(x, determinism, threshold):
y = ceiling * (
floor + (1 - floor) / (1 + np.e ** (-determinism * (x - threshold)))
)
return y
n_nodes = len(input_weights)
# producing transition probability matrix
tpm = np.array(
[
[
LogFunc(
sum(
state[:-1] * np.array([weight for weight in input_weights[:-1]])
),
determinism,
threshold - input_weights[-1] * state[-1],
)
]
for state in pyphi.utils.all_states(n_nodes)
]
)
return pyphi.convert.to_multidimensional(tpm)
def biased_sigmoid(
unit,
i=0,
input_weights=None,
determinism=None,
threshold=None,
floor=None,
ceiling=None,
):
# A sigmoid unit that is biased in its activation by the last unit in the inputs.
# The bias consists in a rescaling of the activation probability to make it more in line with the biasing unit. The biasing unit is assumed to be the last one of the inputs.
# For example, if the biased unit is OFF, the sigmoid activation probability is divided by the factor given in the last value of input_weights. If the unit is ON, 1 - the activation probability is divided by the factor (in essence reducing the probability that it will NOT activate).
def LogFunc(x, determinism, threshold):
y = ceiling * (
floor + (1 - floor) / (1 + np.e ** (-determinism * (x - threshold)))
)
return y
n_nodes = len(input_weights)
# producing transition probability matrix
tpm = np.array(
[
[
LogFunc(
sum(
state[:-1] * np.array([weight for weight in input_weights[:-1]])
),
determinism,
threshold,
)
/ input_weights[-1]
if state[-1] == 0
else 1
- (
1
- LogFunc(
sum(
state[:-1]
* np.array([weight for weight in input_weights[:-1]])
),
determinism,
threshold,
)
)
/ input_weights[-1]
]
for state in pyphi.utils.all_states(n_nodes)
]
)
return pyphi.convert.to_multidimensional(tpm)