diff --git a/docs/usage/callbacks_recording.rst b/docs/usage/callbacks_recording.rst index a53566f4..e9471247 100644 --- a/docs/usage/callbacks_recording.rst +++ b/docs/usage/callbacks_recording.rst @@ -9,13 +9,13 @@ objects (or the names of default-constructable callbacks in the same style as ne models etc) which defaults to a list containing a :class:`~callbacks.progress_bar.BatchProgressBar` to show inference progress. However, you could additionally add a :class:`~callbacks.var_recorder.VarRecorder` callback to a model (where `input` is a :class:`~population.Population` object with a -neuron model that has a state variable called `V`): +neuron model that has a state variable called `v`): .. code-block:: python from ml_genn.callbacks import VarRecorder ... - callbacks = ["batch_progress_bar", VarRecorder(input, "V", key="v_input")] + callbacks = ["batch_progress_bar", VarRecorder(input, "v", key="v_input")] metrics, cb_data = compiled_net.evaluate({input: testing_images * 0.01}, {output: testing_labels}, callbacks=callbacks) diff --git a/examples/eprop/pattern_recognition.py b/examples/eprop/pattern_recognition.py index 37d4e7e8..a00d4266 100644 --- a/examples/eprop/pattern_recognition.py +++ b/examples/eprop/pattern_recognition.py @@ -94,7 +94,7 @@ def alpha_schedule(epoch, alpha): # Evaluate model on numpy dataset start_time = perf_counter() callbacks = ["batch_progress_bar", - VarRecorder(output, "V", key="output_v"), + VarRecorder(output, "v", key="output_v"), SpikeRecorder(input, key="input_spikes"), SpikeRecorder(hidden, key="hidden_spikes"), OptimiserParamSchedule("alpha", alpha_schedule)] diff --git a/ml_genn/ml_genn/callbacks/var_recorder.py b/ml_genn/ml_genn/callbacks/var_recorder.py index 92f67cec..a430ff0f 100644 --- a/ml_genn/ml_genn/callbacks/var_recorder.py +++ b/ml_genn/ml_genn/callbacks/var_recorder.py @@ -2,22 +2,33 @@ from itertools import chain +from typing import Optional from .callback import Callback from ..utils.filter import ExampleFilter, ExampleFilterType, NeuronFilterType from ..utils.network import PopulationType from ..utils.filter import get_neuron_filter_mask from ..utils.network import get_underlying_pop - +from ..utils.value import get_genn_var_name class VarRecorder(Callback): - def __init__(self, pop: PopulationType, var: str, key=None, + def __init__(self, pop: PopulationType, var: Optional[str], key=None, example_filter: ExampleFilterType = None, - neuron_filter: NeuronFilterType = None): + neuron_filter: NeuronFilterType = None, + genn_var: Optional[str] = None): # Get underlying population # **TODO** handle Connection variables as well self._pop = get_underlying_pop(pop) - self._var = var + + # Get the name of the GeNN variable corresponding to var + if var is not None: + self._var = get_genn_var_name(self._pop.neuron, var) + elif genn_var is not None: + self._var = genn_var + else: + raise RuntimeError("SpikeRecorder callback requires a " + "variable to be specified, either " + "via 'var' or 'genn_var' argument") # Stash key self.key = key diff --git a/ml_genn/ml_genn/neurons/few_spike_relu.py b/ml_genn/ml_genn/neurons/few_spike_relu.py index dd3ed290..7434dbec 100644 --- a/ml_genn/ml_genn/neurons/few_spike_relu.py +++ b/ml_genn/ml_genn/neurons/few_spike_relu.py @@ -27,7 +27,7 @@ // **NOTE** needs to be before applying input as // spikes from LAST timestep must be processed $(Fx) += ($(Isyn) * d); - + // If this is the first timestep, apply input // **NOTE** this cannot be done in custom update as it // needs to occur in the middle of neuron update @@ -156,6 +156,6 @@ def get_model(self, population, dt): model = genn_model_upstream_signed if source_signed else genn_model return NeuronModel(model, "Fx", - {"K": self.k, "Scale": scale, - "SrcScale": source_scale}, - {"Fx": 0.0, "V": 0.0}) + {"K": self.k, "Scale": scale, + "SrcScale": source_scale}, + {"Fx": 0.0, "V": 0.0}) diff --git a/ml_genn/ml_genn/neurons/leaky_integrate_fire.py b/ml_genn/ml_genn/neurons/leaky_integrate_fire.py index 05870339..ec6c6e31 100644 --- a/ml_genn/ml_genn/neurons/leaky_integrate_fire.py +++ b/ml_genn/ml_genn/neurons/leaky_integrate_fire.py @@ -4,8 +4,6 @@ from ..utils.model import NeuronModel from ..utils.value import InitValue, ValueDescriptor -from ..utils.value import is_value_initializer - class LeakyIntegrateFire(Neuron): v_thresh = ValueDescriptor("Vthresh") @@ -17,7 +15,7 @@ class LeakyIntegrateFire(Neuron): def __init__(self, v_thresh: InitValue = 1.0, v_reset: InitValue = 0.0, v: InitValue = 0.0, tau_mem: InitValue = 20.0, tau_refrac: InitValue = None, relative_reset: bool = True, - integrate_during_refrac: bool = True, + integrate_during_refrac: bool = True, softmax: bool = False, readout=None): super(LeakyIntegrateFire, self).__init__(softmax, readout) diff --git a/ml_genn/ml_genn/optimisers/adam.py b/ml_genn/ml_genn/optimisers/adam.py index c6c57fc8..66712dcf 100644 --- a/ml_genn/ml_genn/optimisers/adam.py +++ b/ml_genn/ml_genn/optimisers/adam.py @@ -14,7 +14,7 @@ "extra_global_params": [("Alpha", "scalar"), ("MomentScale1", "scalar"), ("MomentScale2", "scalar")], - "var_refs": [("Gradient", "scalar", VarAccessMode_READ_ONLY), + "var_refs": [("Gradient", "scalar", VarAccessMode_READ_ONLY), ("Variable", "scalar")], "update_code": """ @@ -40,7 +40,7 @@ def __init__(self, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): self.beta1 = beta1 self.beta2 = beta2 self.epsilon = epsilon - + def set_step(self, genn_cu, step): assert step >= 0 moment_scale_1 = 1.0 / (1.0 - (self.beta1 ** (step + 1))) @@ -49,17 +49,17 @@ def set_step(self, genn_cu, step): genn_cu.extra_global_params["Alpha"].view[:] = self.alpha genn_cu.extra_global_params["MomentScale1"].view[:] = moment_scale_1 genn_cu.extra_global_params["MomentScale2"].view[:] = moment_scale_2 - + def get_model(self, gradient_ref, var_ref, zero_gradient: bool): - model = CustomUpdateModel( + model = CustomUpdateModel( deepcopy(genn_model), {"Beta1": self.beta1, "Beta2": self.beta2, "Epsilon": self.epsilon}, {"M": 0.0, "V": 0.0}, {"Gradient": gradient_ref, "Variable": var_ref}, - {"Alpha": self.alpha, "FirstMomentScale": 0.0, + {"Alpha": self.alpha, "FirstMomentScale": 0.0, "SecondMomentScale": 0.0}) - + # If a optimiser than automatically zeros # gradients should be provided if zero_gradient: diff --git a/ml_genn/ml_genn/optimisers/optimiser.py b/ml_genn/ml_genn/optimisers/optimiser.py index 39e26471..ca0619e8 100644 --- a/ml_genn/ml_genn/optimisers/optimiser.py +++ b/ml_genn/ml_genn/optimisers/optimiser.py @@ -10,4 +10,4 @@ def set_step(self, genn_cu, step: int): @abstractmethod def get_model(self, gradient_ref, var_ref, zero_gradient: bool): - pass \ No newline at end of file + pass diff --git a/ml_genn/ml_genn/readouts/spike_count.py b/ml_genn/ml_genn/readouts/spike_count.py index 86b32ffa..b12a2762 100644 --- a/ml_genn/ml_genn/readouts/spike_count.py +++ b/ml_genn/ml_genn/readouts/spike_count.py @@ -34,4 +34,4 @@ def get_readout(self, genn_pop, batch_size: int, shape) -> np.ndarray: @property def reset_vars(self): - return [("Scount", "unsigned int", 0)] \ No newline at end of file + return [("Scount", "unsigned int", 0)] diff --git a/ml_genn/ml_genn/readouts/sum_var.py b/ml_genn/ml_genn/readouts/sum_var.py index eaba76a7..1b5a0200 100644 --- a/ml_genn/ml_genn/readouts/sum_var.py +++ b/ml_genn/ml_genn/readouts/sum_var.py @@ -36,7 +36,8 @@ def add_readout_logic(self, model: NeuronModel): model_copy.append_sim_code( f"$({sum_var_name}) += $({self.output_var_name});") - # Add sum variable with same type as output variable and initialise to zero + # Add sum variable with same type as output + # variable and initialise to zero model_copy.add_var(sum_var_name, self.output_var_type, 0) return model_copy @@ -50,7 +51,7 @@ def get_readout(self, genn_pop, batch_size: int, shape) -> np.ndarray: # Return contents, reshaped as desired return np.reshape(genn_pop.vars[sum_var_name].view, (batch_size,) + shape) - + @property def reset_vars(self): - return [(self.output_var_name + "Sum", self.output_var_type, 0.0)] \ No newline at end of file + return [(self.output_var_name + "Sum", self.output_var_type, 0.0)] diff --git a/ml_genn/ml_genn/readouts/var.py b/ml_genn/ml_genn/readouts/var.py index f7155352..a3edcc7b 100644 --- a/ml_genn/ml_genn/readouts/var.py +++ b/ml_genn/ml_genn/readouts/var.py @@ -17,15 +17,15 @@ def add_readout_logic(self, model: NeuronModel): # Find output variable try: - output_var = (v for v in model.model["var_name_types"] - if v[0] == self.output_var_name) + _ = (v for v in model.model["var_name_types"] + if v[0] == self.output_var_name) except StopIteration: raise RuntimeError(f"Model does not have variable " f"{self.output_var_name} to read") return model - def get_readout(self, genn_pop, batch_size:int, shape) -> np.ndarray: + def get_readout(self, genn_pop, batch_size: int, shape) -> np.ndarray: # Pull variable from genn genn_pop.pull_var_from_device(self.output_var_name) diff --git a/ml_genn/ml_genn/utils/model.py b/ml_genn/ml_genn/utils/model.py index 3d0b594a..f0a966d7 100644 --- a/ml_genn/ml_genn/utils/model.py +++ b/ml_genn/ml_genn/utils/model.py @@ -226,8 +226,8 @@ def from_val_descriptors(model, output_var_name, inst, dt, param_vals={}, var_vals={}, egp_vals={}): return NeuronModel( model, output_var_name, - get_values(inst, model["param_name_types"], dt, param_vals), - get_values(inst, model["var_name_types"], dt, var_vals), + get_values(inst, model.get("param_name_types", []), dt, param_vals), + get_values(inst, model.get("var_name_types", []), dt, var_vals), egp_vals) @property @@ -254,8 +254,8 @@ def from_val_descriptors(model, inst, dt, param_vals={}, var_vals={}, egp_vals={}): return SynapseModel( model, - get_values(inst, model["param_name_types"], dt, param_vals), - get_values(inst, model["var_name_types"], dt, var_vals), + get_values(inst, model.get("param_name_types", []), dt, param_vals), + get_values(inst, model.get("var_name_types", []), dt, var_vals), egp_vals) diff --git a/ml_genn/ml_genn/utils/value.py b/ml_genn/ml_genn/utils/value.py index 76cb3e14..69befe2b 100644 --- a/ml_genn/ml_genn/utils/value.py +++ b/ml_genn/ml_genn/utils/value.py @@ -73,13 +73,25 @@ def is_value_initializer(value): return isinstance(value, Initializer) +# **THINK** should maybe a method in a base class for Neuron/Synapse etc +def get_genn_var_name(inst, name): + # Get attribute from instance type + d = getattr(type(inst), name) + + # If attribute is a value descriptor, return GeNN name + if isinstance(d, ValueDescriptor): + return d.genn_name + else: + raise RuntimeError(f"'{name}' is not a ValueDescriptor") + + # **THINK** should maybe a method in a base class for Neuron/Synapse etc def get_values(inst, name_types, dt, vals={}): # Get descriptors descriptors = getmembers(type(inst), isdatadescriptor) # Build dictionary mapping GeNN names to var descriptors - descriptors = {d.genn_name: d for n, d in descriptors + descriptors = {d.genn_name: d for n, d in descriptors if (isinstance(d, ValueDescriptor) and d.genn_name is not None)} @@ -96,13 +108,13 @@ def set_values(inst, vals): descriptors = getmembers(type(inst), isdatadescriptor) # Build dictionary mapping GeNN names to var descriptors - descriptors = {d.genn_name: d for n, d in descriptors + descriptors = {d.genn_name: d for n, d in descriptors if (isinstance(d, ValueDescriptor) and d.genn_name is not None)} - + # Loop through values for n, v in vals.items(): - # If there is a descriptor matching + # If there is a descriptor matching # this name, use it to set variable if n in descriptors: - descriptors[n].__set__(inst, v) \ No newline at end of file + descriptors[n].__set__(inst, v) diff --git a/tests/ml_genn/utils/test_value.py b/tests/ml_genn/utils/test_value.py index 7aa0f0ac..441d68ba 100644 --- a/tests/ml_genn/utils/test_value.py +++ b/tests/ml_genn/utils/test_value.py @@ -1,7 +1,7 @@ from ml_genn.utils.value import ValueDescriptor -from pytest import approx -from ml_genn.utils.value import get_values +from pytest import approx, raises +from ml_genn.utils.value import get_genn_var_name, get_values class Model: x = ValueDescriptor("X") @@ -16,6 +16,14 @@ def __init__(self, x, y, z): def test_invalid_values(): pass +def test_get_genn_name(): + x = Model(1.0, 2.0, 3.0) + + assert get_genn_var_name(x, "x") == "X" + + with raises(AttributeError): + get_genn_var_name(x, "n") + def test_get_values(): x = Model(1.0, 2.0, 3.0)