Skip to content

Commit

Permalink
Merge pull request #53 from genn-team/friendly_var_name
Browse files Browse the repository at this point in the history
When using VarRecorder you shouldn't have to know the internal GeNN variable names
  • Loading branch information
neworderofjamie authored Mar 7, 2023
2 parents cd01d77 + fd7acef commit 3723622
Show file tree
Hide file tree
Showing 13 changed files with 69 additions and 39 deletions.
4 changes: 2 additions & 2 deletions docs/usage/callbacks_recording.rst
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@ objects (or the names of default-constructable callbacks in the same style as ne
models etc) which defaults to a list containing a :class:`~callbacks.progress_bar.BatchProgressBar`
to show inference progress. However, you could additionally add a :class:`~callbacks.var_recorder.VarRecorder`
callback to a model (where `input` is a :class:`~population.Population` object with a
neuron model that has a state variable called `V`):
neuron model that has a state variable called `v`):

.. code-block:: python
from ml_genn.callbacks import VarRecorder
...
callbacks = ["batch_progress_bar", VarRecorder(input, "V", key="v_input")]
callbacks = ["batch_progress_bar", VarRecorder(input, "v", key="v_input")]
metrics, cb_data = compiled_net.evaluate({input: testing_images * 0.01}, {output: testing_labels},
callbacks=callbacks)
Expand Down
2 changes: 1 addition & 1 deletion examples/eprop/pattern_recognition.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def alpha_schedule(epoch, alpha):
# Evaluate model on numpy dataset
start_time = perf_counter()
callbacks = ["batch_progress_bar",
VarRecorder(output, "V", key="output_v"),
VarRecorder(output, "v", key="output_v"),
SpikeRecorder(input, key="input_spikes"),
SpikeRecorder(hidden, key="hidden_spikes"),
OptimiserParamSchedule("alpha", alpha_schedule)]
Expand Down
19 changes: 15 additions & 4 deletions ml_genn/ml_genn/callbacks/var_recorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,33 @@

from itertools import chain

from typing import Optional
from .callback import Callback
from ..utils.filter import ExampleFilter, ExampleFilterType, NeuronFilterType
from ..utils.network import PopulationType

from ..utils.filter import get_neuron_filter_mask
from ..utils.network import get_underlying_pop

from ..utils.value import get_genn_var_name

class VarRecorder(Callback):
def __init__(self, pop: PopulationType, var: str, key=None,
def __init__(self, pop: PopulationType, var: Optional[str], key=None,
example_filter: ExampleFilterType = None,
neuron_filter: NeuronFilterType = None):
neuron_filter: NeuronFilterType = None,
genn_var: Optional[str] = None):
# Get underlying population
# **TODO** handle Connection variables as well
self._pop = get_underlying_pop(pop)
self._var = var

# Get the name of the GeNN variable corresponding to var
if var is not None:
self._var = get_genn_var_name(self._pop.neuron, var)
elif genn_var is not None:
self._var = genn_var
else:
raise RuntimeError("SpikeRecorder callback requires a "
"variable to be specified, either "
"via 'var' or 'genn_var' argument")

# Stash key
self.key = key
Expand Down
8 changes: 4 additions & 4 deletions ml_genn/ml_genn/neurons/few_spike_relu.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
// **NOTE** needs to be before applying input as
// spikes from LAST timestep must be processed
$(Fx) += ($(Isyn) * d);
// If this is the first timestep, apply input
// **NOTE** this cannot be done in custom update as it
// needs to occur in the middle of neuron update
Expand Down Expand Up @@ -156,6 +156,6 @@ def get_model(self, population, dt):

model = genn_model_upstream_signed if source_signed else genn_model
return NeuronModel(model, "Fx",
{"K": self.k, "Scale": scale,
"SrcScale": source_scale},
{"Fx": 0.0, "V": 0.0})
{"K": self.k, "Scale": scale,
"SrcScale": source_scale},
{"Fx": 0.0, "V": 0.0})
4 changes: 1 addition & 3 deletions ml_genn/ml_genn/neurons/leaky_integrate_fire.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@
from ..utils.model import NeuronModel
from ..utils.value import InitValue, ValueDescriptor

from ..utils.value import is_value_initializer


class LeakyIntegrateFire(Neuron):
v_thresh = ValueDescriptor("Vthresh")
Expand All @@ -17,7 +15,7 @@ class LeakyIntegrateFire(Neuron):
def __init__(self, v_thresh: InitValue = 1.0, v_reset: InitValue = 0.0,
v: InitValue = 0.0, tau_mem: InitValue = 20.0,
tau_refrac: InitValue = None, relative_reset: bool = True,
integrate_during_refrac: bool = True,
integrate_during_refrac: bool = True,
softmax: bool = False, readout=None):
super(LeakyIntegrateFire, self).__init__(softmax, readout)

Expand Down
12 changes: 6 additions & 6 deletions ml_genn/ml_genn/optimisers/adam.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
"extra_global_params": [("Alpha", "scalar"),
("MomentScale1", "scalar"),
("MomentScale2", "scalar")],
"var_refs": [("Gradient", "scalar", VarAccessMode_READ_ONLY),
"var_refs": [("Gradient", "scalar", VarAccessMode_READ_ONLY),
("Variable", "scalar")],
"update_code":
"""
Expand All @@ -40,7 +40,7 @@ def __init__(self, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8):
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon

def set_step(self, genn_cu, step):
assert step >= 0
moment_scale_1 = 1.0 / (1.0 - (self.beta1 ** (step + 1)))
Expand All @@ -49,17 +49,17 @@ def set_step(self, genn_cu, step):
genn_cu.extra_global_params["Alpha"].view[:] = self.alpha
genn_cu.extra_global_params["MomentScale1"].view[:] = moment_scale_1
genn_cu.extra_global_params["MomentScale2"].view[:] = moment_scale_2

def get_model(self, gradient_ref, var_ref, zero_gradient: bool):
model = CustomUpdateModel(
model = CustomUpdateModel(
deepcopy(genn_model),
{"Beta1": self.beta1, "Beta2": self.beta2,
"Epsilon": self.epsilon},
{"M": 0.0, "V": 0.0},
{"Gradient": gradient_ref, "Variable": var_ref},
{"Alpha": self.alpha, "FirstMomentScale": 0.0,
{"Alpha": self.alpha, "FirstMomentScale": 0.0,
"SecondMomentScale": 0.0})

# If a optimiser than automatically zeros
# gradients should be provided
if zero_gradient:
Expand Down
2 changes: 1 addition & 1 deletion ml_genn/ml_genn/optimisers/optimiser.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,4 @@ def set_step(self, genn_cu, step: int):

@abstractmethod
def get_model(self, gradient_ref, var_ref, zero_gradient: bool):
pass
pass
2 changes: 1 addition & 1 deletion ml_genn/ml_genn/readouts/spike_count.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,4 @@ def get_readout(self, genn_pop, batch_size: int, shape) -> np.ndarray:

@property
def reset_vars(self):
return [("Scount", "unsigned int", 0)]
return [("Scount", "unsigned int", 0)]
7 changes: 4 additions & 3 deletions ml_genn/ml_genn/readouts/sum_var.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ def add_readout_logic(self, model: NeuronModel):
model_copy.append_sim_code(
f"$({sum_var_name}) += $({self.output_var_name});")

# Add sum variable with same type as output variable and initialise to zero
# Add sum variable with same type as output
# variable and initialise to zero
model_copy.add_var(sum_var_name, self.output_var_type, 0)

return model_copy
Expand All @@ -50,7 +51,7 @@ def get_readout(self, genn_pop, batch_size: int, shape) -> np.ndarray:
# Return contents, reshaped as desired
return np.reshape(genn_pop.vars[sum_var_name].view,
(batch_size,) + shape)

@property
def reset_vars(self):
return [(self.output_var_name + "Sum", self.output_var_type, 0.0)]
return [(self.output_var_name + "Sum", self.output_var_type, 0.0)]
6 changes: 3 additions & 3 deletions ml_genn/ml_genn/readouts/var.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,15 @@ def add_readout_logic(self, model: NeuronModel):

# Find output variable
try:
output_var = (v for v in model.model["var_name_types"]
if v[0] == self.output_var_name)
_ = (v for v in model.model["var_name_types"]
if v[0] == self.output_var_name)
except StopIteration:
raise RuntimeError(f"Model does not have variable "
f"{self.output_var_name} to read")

return model

def get_readout(self, genn_pop, batch_size:int, shape) -> np.ndarray:
def get_readout(self, genn_pop, batch_size: int, shape) -> np.ndarray:
# Pull variable from genn
genn_pop.pull_var_from_device(self.output_var_name)

Expand Down
8 changes: 4 additions & 4 deletions ml_genn/ml_genn/utils/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,8 +226,8 @@ def from_val_descriptors(model, output_var_name, inst, dt,
param_vals={}, var_vals={}, egp_vals={}):
return NeuronModel(
model, output_var_name,
get_values(inst, model["param_name_types"], dt, param_vals),
get_values(inst, model["var_name_types"], dt, var_vals),
get_values(inst, model.get("param_name_types", []), dt, param_vals),
get_values(inst, model.get("var_name_types", []), dt, var_vals),
egp_vals)

@property
Expand All @@ -254,8 +254,8 @@ def from_val_descriptors(model, inst, dt,
param_vals={}, var_vals={}, egp_vals={}):
return SynapseModel(
model,
get_values(inst, model["param_name_types"], dt, param_vals),
get_values(inst, model["var_name_types"], dt, var_vals),
get_values(inst, model.get("param_name_types", []), dt, param_vals),
get_values(inst, model.get("var_name_types", []), dt, var_vals),
egp_vals)


Expand Down
22 changes: 17 additions & 5 deletions ml_genn/ml_genn/utils/value.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,13 +73,25 @@ def is_value_initializer(value):
return isinstance(value, Initializer)


# **THINK** should maybe a method in a base class for Neuron/Synapse etc
def get_genn_var_name(inst, name):
# Get attribute from instance type
d = getattr(type(inst), name)

# If attribute is a value descriptor, return GeNN name
if isinstance(d, ValueDescriptor):
return d.genn_name
else:
raise RuntimeError(f"'{name}' is not a ValueDescriptor")


# **THINK** should maybe a method in a base class for Neuron/Synapse etc
def get_values(inst, name_types, dt, vals={}):
# Get descriptors
descriptors = getmembers(type(inst), isdatadescriptor)

# Build dictionary mapping GeNN names to var descriptors
descriptors = {d.genn_name: d for n, d in descriptors
descriptors = {d.genn_name: d for n, d in descriptors
if (isinstance(d, ValueDescriptor)
and d.genn_name is not None)}

Expand All @@ -96,13 +108,13 @@ def set_values(inst, vals):
descriptors = getmembers(type(inst), isdatadescriptor)

# Build dictionary mapping GeNN names to var descriptors
descriptors = {d.genn_name: d for n, d in descriptors
descriptors = {d.genn_name: d for n, d in descriptors
if (isinstance(d, ValueDescriptor)
and d.genn_name is not None)}

# Loop through values
for n, v in vals.items():
# If there is a descriptor matching
# If there is a descriptor matching
# this name, use it to set variable
if n in descriptors:
descriptors[n].__set__(inst, v)
descriptors[n].__set__(inst, v)
12 changes: 10 additions & 2 deletions tests/ml_genn/utils/test_value.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from ml_genn.utils.value import ValueDescriptor

from pytest import approx
from ml_genn.utils.value import get_values
from pytest import approx, raises
from ml_genn.utils.value import get_genn_var_name, get_values

class Model:
x = ValueDescriptor("X")
Expand All @@ -16,6 +16,14 @@ def __init__(self, x, y, z):
def test_invalid_values():
pass

def test_get_genn_name():
x = Model(1.0, 2.0, 3.0)

assert get_genn_var_name(x, "x") == "X"

with raises(AttributeError):
get_genn_var_name(x, "n")

def test_get_values():
x = Model(1.0, 2.0, 3.0)

Expand Down

0 comments on commit 3723622

Please sign in to comment.