diff --git a/.gitignore b/.gitignore index 5a198a7..5883008 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,3 @@ .vscode Manifest.toml -docs/build \ No newline at end of file +docs/build diff --git a/Project.toml b/Project.toml index c0a20f0..662f4fc 100644 --- a/Project.toml +++ b/Project.toml @@ -4,6 +4,8 @@ version = "0.1.1" [deps] BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" +CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" +Colors = "5ae59095-9a9b-59fe-a467-6f913c188581" Dagger = "d58978e5-989f-55fb-8d15-ea34adc7bf54" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" @@ -22,21 +24,42 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Requires = "ae029012-a4dd-5104-9daa-d747884805df" Revise = "295af30f-e4ad-537b-8983-00126c2a3abe" RollingFunctions = "b0e4dd01-7b14-53d8-9b45-175a3e362653" +SGtSNEpi = "e6c19c8d-e382-4a50-b2c6-174ddd647730" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" +StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" ThreadTools = "dbf13d8f-d36e-4350-8970-f3a99faba1a8" TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" UnPack = "3a884ed6-31ef-47d7-9d2a-63182c4928ed" +Unitful = "1986cc42-f94f-5a68-af5c-568840ba703d" [compat] +BenchmarkTools = "1" +CairoMakie = "0.12.16, 0.13" +Colors = "0.12.11, 0.13" +Dagger = "0.18" Distributions = "0.25.112" Documenter = "1.8.0" +DrWatson = "2" Graphs = "1.12.0" Interpolations = "0.15.1" +LoopVectorization = "0.12" MetaGraphs = "0.8.0" +Parameters = "0.12" +Plots = "1" +ProgressBars = "1" Requires = "0.5, 1" +Revise = "3" +RollingFunctions = "0.8" +SGtSNEpi = "0.4.0" +StaticArrays = "1" +Statistics = "1" +StatsBase = "0.34.3" +ThreadTools = "0.2" +TimerOutputs = "0.5" UnPack = "1" +Unitful = "1" julia = "1" [extras] diff --git a/README.md b/README.md index 5e80531..72e3461 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,7 @@ # SpikingNeuralNetworks +![CI](https://github.com/JuliaNeuroscience/SpikingNeuralNetworks.jl/actions/workflows/ci.yml/badge.svg) + + ## Installation diff --git a/examples/PotjansDiesman.jl b/examples/PotjansDiesman.jl new file mode 100644 index 0000000..9547275 --- /dev/null +++ b/examples/PotjansDiesman.jl @@ -0,0 +1,217 @@ +using Revise +using DrWatson +using SpikingNeuralNetworks +SNN.@load_units +import SpikingNeuralNetworks: AdExParameter, IFParameter +using Statistics, Random +using Plots +using SparseArrays +using ProgressMeter +using Plots +using SpikingNeuralNetworks +using SNNUtils + + +#using Graphs +#using Karnak +#using NetworkLayout +#using Colors + +""" +Auxiliary Potjans parameters for neural populations with scaled cell counts +""" +function potjans_neurons(scale=1.0) + ccu = Dict( + :E23 => trunc(Int32, 20683 * scale), + :E4 => trunc(Int32, 21915 * scale), + :E5 => trunc(Int32, 4850 * scale), + :E6 => trunc(Int32, 14395 * scale), + :I6 => trunc(Int32, 2948 * scale), + :I23 => trunc(Int32, 5834 * scale), + :I5 => trunc(Int32, 1065 * scale), + :I4 => trunc(Int32, 5479 * scale) + ) + + neurons = Dict{Symbol, SNN.AbstractPopulation}() + for (k, v) in ccu + if occursin("E", String(k)) + neurons[k] = SNN.AdEx(; N = 1, param = SNN.AdExParameter(; El = -49mV), name=string(k)) + + else + + neurons[k] = SNN.IF(; N = 1, param = SNN.IFParameter(; El = -49mV), name=string(k)) + end + end + return neurons +end + +""" +Define Potjans parameters for neuron populations and connection probabilities + # Names of the simulated populations. + 'populations': ['L23E', 'L23I', 'L4E', 'L4I', 'L5E', 'L5I', 'L6E', 'L6I'], + # Number of neurons in the different populations. The order of the + # elements corresponds to the names of the variable 'populations'. + 'N_full': np.array([20683, 5834, 21915, 5479, 4850, 1065, 14395, 2948]), + # Mean rates of the different populations in the non-scaled version + # of the microcircuit. Necessary for the scaling of the network. + # The order corresponds to the order in 'populations'. + 'full_mean_rates': + np.array([0.971, 2.868, 4.746, 5.396, 8.142, 9.078, 0.991, 7.523]), + # Connection probabilities. The first index corresponds to the targets + # and the second to the sources. + # Number of external connections to the different populations. + # The order corresponds to the order in 'populations'. + 'K_ext': np.array([1600, 1500, 2100, 1900, 2000, 1900, 2900, 2100]), + # Factor to scale the indegrees. + 'K_scaling': 0.1, + # Factor to scale the number of neurons. + 'N_scaling': 0.1, + # Mean amplitude of excitatory postsynaptic potential (in mV). + 'PSP_e': 0.15, + # Relative standard deviation of the postsynaptic potential. + 'PSP_sd': 0.1, + # Relative inhibitory synaptic strength (in relative units). + 'g': -4, + # Rate of the Poissonian spike generator (in Hz). + 'bg_rate': 8., + # Turn Poisson input on or off (True or False). + 'poisson_input': True, + # Delay of the Poisson generator (in ms). + 'poisson_delay': 1.5, + # Mean delay of excitatory connections (in ms). + 'mean_delay_exc': 1.5, + # Mean delay of inhibitory connections (in ms). + 'mean_delay_inh': 0.75, + # Relative standard deviation of the delay of excitatory and + # inhibitory connections (in relative units). + 'rel_std_delay': 0.5, + """ +function potjans_conn(Ne) + + function j_from_name(pre, post, g) + if occursin("E", String(pre)) && occursin("E", String(post)) + return g.jee + elseif occursin("I", String(pre)) && occursin("E", String(post)) + return g.jei + elseif occursin("E", String(pre)) && occursin("I", String(post)) + return g.jie + elseif occursin("I", String(pre)) && occursin("I", String(post)) + return g.jii + else + throw(ArgumentError("Invalid pre-post combination: $pre-$post")) + end + end + + layer_names = [:E23, :I23, :E4, :I4, :E5, :I5, :E6, :I6] + + # Replace static matrix with a regular matrix for `conn_probs` + # ! the convention is j_post_pre. This is how the matrices `w` are built. Are you using that when defining the parameters? + conn_probs = Float32[ + 0.1009 0.1689 0.0437 0.0818 0.0323 0.0 0.0076 0.0 + 0.1346 0.1371 0.0316 0.0515 0.0755 0.0 0.0042 0.0 + 0.0077 0.0059 0.0497 0.135 0.0067 0.0003 0.0453 0.0 + 0.0691 0.0029 0.0794 0.1597 0.0033 0.0 0.1057 0.0 + 0.1004 0.0622 0.0505 0.0057 0.0831 0.3726 0.0204 0.0 + 0.0548 0.0269 0.0257 0.0022 0.06 0.3158 0.0086 0.0 + 0.0156 0.0066 0.0211 0.0166 0.0572 0.0197 0.0396 0.2252 + 0.0364 0.001 0.0034 0.0005 0.0277 0.008 0.0658 0.1443 + ] + + # !Assign dimensions to these parameters, and some reference + pree = 0.2 + g = 1.0 + tau_meme = 10.0 # (ms) + # Synaptic strengths for each connection type + K = round(Int, Ne * pree) + sqrtK = sqrt(K) + + # !Same, the convention is j_post_pre + je = 2.0 / sqrtK * tau_meme * g + ji = 2.0 / sqrtK * tau_meme * g + jee = 0.15 * je + jie = je + jei = -0.75 * ji + jii = -ji + g_strengths = dict2ntuple(SNN.@symdict jee jie jei jii) + + conn_j = zeros(Float32, size(conn_probs)) + for pre in eachindex(layer_names) + for post in eachindex(layer_names) + conn_j[post, pre ] = j_from_name(layer_names[pre], layer_names[post], g_strengths) + end + end + + return layer_names, conn_probs, conn_j +end + + +""" +Main function to setup Potjans layer with memory-optimized connectivity +""" +function potjans_layer(scale) + + ## Create the neuron populations + neurons = potjans_neurons(scale) + exc_pop = filter(x -> occursin("E", String(x)), keys(neurons)) + inh_pop = filter(x -> occursin("I", String(x)), keys(neurons)) + Ne = trunc(Int32, sum([neurons[k].N for k in exc_pop])) + Ni = trunc(Int32, sum([neurons[k].N for k in inh_pop])) + #@show exc_pop, Ne, Ni + layer_names, conn_probs, conn_j = potjans_conn(Ne) + + ## Create the synaptic connections based on the connection probabilities and synaptic weights assigned to each pre-post pair + connections = Dict() + for i in eachindex(layer_names) + for j in eachindex(layer_names) + pre = layer_names[i] + post = layer_names[j] + p = conn_probs[j, i] + J = conn_j[j, i] + sym = J>=0 ? :ge : :gi + μ = abs(J) + + + #S = SNN.SpikingSynapse( + # inputs, + # neurons, + # :ge; + # μ = 0.01, + # p = 1.0, + # param = SNN.vSTDPParameter(; Wmax = 0.01), + #) + + s = SNN.SpikingSynapse(neurons[pre], neurons[post], sym; μ = μ*10, p=p, σ=0)#,param = SNN.vSTDPParameter(; Wmax = 0.5)) + connections[Symbol(string(pre,"_", post))] = s + end + end + + ## Create the Poisson stimulus for each population + stimuli = Dict() + for pop in exc_pop + νe = 3.5kHz + post = neurons[pop] + s = SNN.PoissonStimulus(post, :ge; param = νe, cells=:ALL, μ=1.f0, name="PoissonE_$(post.name)") + stimuli[Symbol(string("PoissonE_", pop))] = s + end + return merge_models(neurons,connections, stimuli) +end + + +model = potjans_layer(0.1) +duration = 15000ms +SNN.monitor([model.pop...], [:fire]) +SNN.monitor([model.pop...], [:v])#, sr=200Hz) +SNN.sim!(model=model; duration = duration, pbar = true, dt = 0.125) +display(SNN.raster(model.pop))#, [10s, 15s])) + +#Trange = 0:10:15s +frE, interval = SNN.firing_rate(model.pop, interval = Trange) +display(plot(mean.(frE), xlabel="Time [ms]", ylabel="Firing rate [Hz]", legend=:topleft)) +## + +#vecplot(model.pop.E23, :v, neurons =1, r=0s:15s,label="soma") +layer_names, conn_probs, conn_j = potjans_conn(4000) +pj = heatmap(conn_j, xticks=(1:8,layer_names), yticks=(1:8,layer_names), aspect_ratio=1, color=:bluesreds, title="Synaptic weights", xlabel="Presynaptic", ylabel="Postsynaptic", size=(500,500), clims=(-maximum(abs.(conn_j)), maximum(abs.(conn_j)))) +pprob=heatmap(conn_probs, xticks=(1:8,layer_names), yticks=(1:8,layer_names), aspect_ratio=1, color=:viridis, title="Connection probability", xlabel="Presynaptic", ylabel="Postsynaptic", size=(500,500)) +display(plot(pprob, pj, layout=(1,2), size=(1000,500), margin=5Plots.mm)) +## \ No newline at end of file diff --git a/examples/Tonic_NMNIST_Stimulus.jl b/examples/Tonic_NMNIST_Stimulus.jl new file mode 100644 index 0000000..b339ece --- /dev/null +++ b/examples/Tonic_NMNIST_Stimulus.jl @@ -0,0 +1,98 @@ +module Tonic_NMNIST_Stimulus + using JLD2 + using PyCall + using DataFrames + + using Random + using Plots + """ + NMNIST data set is a 3D labelled data set played out onto a 400 by 400 matrix of pixels over time. For any time instant there can be positive and negative spikes. + Here the 2D pixel matrix is collapsed into 1D so that it the matrix can be more easily projected onto the synapses of a biological network population. + """ + + """ + Call Pythons tonic module to get NMNIST a spiking data set of numerals. + """ + + function getNMNIST(ind)#, cnt, input_shape) + pushfirst!(PyVector(pyimport("sys")."path"), "") + nmnist_module = pyimport("batch_nmnist_motions") + dataset::PyObject = nmnist_module.NMNIST("./") + A = zeros((35,35)) + I = LinearIndices(A) + pop_stimulation= Vector{Int32}([])#Vector{UInt32}([]) + l = -1 + events,l = dataset._dataset[ind]#._dataset(ind)#;limit=15) + l = convert(Int32,l) + x = Vector{Int32}([e[1] for e in events]) + y = Vector{Int32}([e[2] for e in events]) + ts = Vector{Float32}([e[3]/100000.0 for e in events]) + p = Vector{Int8}([e[4] for e in events]) + for (x_,y_) in zip(x,y) + push!(pop_stimulation,Int32(I[CartesianIndex(convert(Int32,x_+1),convert(Int32,y_+1))])) + end + (ts,p,l,pop_stimulation) + end + + function spike_packets_by_labels(cached_spikes) + ts = [cached_spikes[i][1] for (i,l) in enumerate(cached_spikes)] + p = [cached_spikes[i][2] for (i,l) in enumerate(cached_spikes)] + labels = [cached_spikes[i][3] for (i,l) in enumerate(cached_spikes)] + pop_stimulation = [cached_spikes[i][4] for (i,l) in enumerate(cached_spikes)] + + # Create DataFrame + df = DataFrame( + ts = ts, + p = p, + labels = labels, + pop_stimulation = pop_stimulation + ) + + # Sort the DataFrame by `labels` + sorted_df = sort(df, :labels) + + # Group by `labels` to create indexed/enumerated groups + grouped_df = groupby(sorted_df, :labels) + unique_labels = unique(labels) + labeled_packets = Dict() + for filter_label in unique_labels + time_and_offset=[] + population_code=[] + next_offset = 0 + group = grouped_df[filter_label+1] + for row in eachrow(group) + append!(population_code,row["pop_stimulation"]) + append!(time_and_offset,next_offset.+row["ts"]) + windowb = minimum(row["ts"]) + windowa = maximum(row["ts"]) + next_offset = windowa-windowb + + + labeled_packets[filter_label] = (population_code,time_and_offset) + + end + end + labeled_packets + end + + """ + For a given numeral characters label plot the corresponding spike packets. + """ + function spike_character_plot(filter_label,population_code,time_and_offset) + p1 = Plots.scatter() + scatter!(p1, + population_code, + time_and_offset, + ms = 1, # Marker size + ylabel = "Time (ms)", + xlabel = "Neuron Index", + title = "Spiking Activity with Distinct Characters", + legend=false + ) + plot(p1) + savefig("label$filter_label.png") + end +end + + + diff --git a/examples/batch_nmnist_motions.py b/examples/batch_nmnist_motions.py new file mode 100644 index 0000000..cd17492 --- /dev/null +++ b/examples/batch_nmnist_motions.py @@ -0,0 +1,58 @@ +""" + +NMNIST_Motions dataset class + +Provides access to the event-based motions NMIST dataset. This is a version of the +NMNIST dataset in which we have separated out the events by motion and linked them +to the specified MNIST images. This allows us to retrieve any motion from the dataset +along with the associated mnist image. + +""" +from os.path import exists + +#import h5py +import numpy as np +import tonic +#from numpy.lib.recfunctions import merge_arrays + + +class NMNIST(object): + """ A Dataset interface to the NMNIST dataset """ + + def __init__(self, dataset_path, train=True, first_saccade_only=False,transform=None): + """ Creates a dataset instance using the specified path """ + super(NMNIST, self).__init__() + + # Validate the specified path and store it + if not exists(dataset_path): + raise Exception("Specified dataset path does not exist") + self._dataset = tonic.datasets.NMNIST(save_to='./data', train=train, first_saccade_only=first_saccade_only,transform=transform) + # self.transform = transform + """ + def get_dataset_item(self, indices): + + assert(len(indices) <= 100) + all_events = [] + + for id,index in enumerate(indices): + events, label = self._dataset[index] + label_array = np.full(events['x'].shape[0],label,dtype=[('label','i8')]) + event_array = merge_arrays((events,label_array),flatten=True) + all_events.append(event_array) + + super_events = np.hstack(all_events) + super_events = super_events[super_events['t'].argsort()] + return super_events + + def get_count(self): + """ Returns the number of items """ + return len(self._dataset) + + def get_label_range(self): + """ Returns the range of labels in this dataset """ + return [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + + def get_element_dimensions(self): + """ Returns a tuple containing the dimensions of each image in the dataset """ + return self._dataset.sensor_size + """ \ No newline at end of file diff --git a/examples/genPotjansConnectivity.jl b/examples/genPotjansConnectivity.jl new file mode 100644 index 0000000..cc2c770 --- /dev/null +++ b/examples/genPotjansConnectivity.jl @@ -0,0 +1,437 @@ +using Revise +using DrWatson +@quickactivate "SpikingNeuralNetworks" +using SpikingNeuralNetworks +SNN.@load_units +import SpikingNeuralNetworks: AdExParameter, IFParameter +using Statistics, Random +using Plots +using SparseArrays +using ProgressMeter +using Plots +using SpikingNeuralNetworks + +using SGtSNEpi, Random +using Revise +using Colors, LinearAlgebra +#using GLMakie +using Graphs +using JLD2 +import StatsBase.mean + + + +""" +Define Potjans parameters for neuron populations and connection probabilities +""" +function potjans_params(ccu, scale=1.0) + cumulative = Dict{String, Vector{Int64}}() + layer_names = ["23E", "23I", "4E", "4I", "5E", "5I", "6E", "6I"] + + # Replace static matrix with a regular matrix for `conn_probs` + conn_probs = Float32[ + 0.1009 0.1689 0.0437 0.0818 0.0323 0.0 0.0076 0.0 + 0.1346 0.1371 0.0316 0.0515 0.0755 0.0 0.0042 0.0 + 0.0077 0.0059 0.0497 0.135 0.0067 0.0003 0.0453 0.0 + 0.0691 0.0029 0.0794 0.1597 0.0033 0.0 0.1057 0.0 + 0.1004 0.0622 0.0505 0.0057 0.0831 0.3726 0.0204 0.0 + 0.0548 0.0269 0.0257 0.0022 0.06 0.3158 0.0086 0.0 + 0.0156 0.0066 0.0211 0.0166 0.0572 0.0197 0.0396 0.2252 + 0.0364 0.001 0.0034 0.0005 0.0277 0.008 0.0658 0.1443 + ] + + # Calculate cumulative cell counts + v_old = 1 + for (k, v) in pairs(ccu) + cumulative[k] = collect(v_old:v + v_old) + v_old += v + end + + syn_pol = Vector{Int64}(undef, length(ccu)) + for (i, k) in enumerate(keys(ccu)) + syn_pol[i] = occursin("E", k) ? 1 : 0 + end + + return cumulative, ccu, layer_names, conn_probs, syn_pol +end + +""" +Assign synapse weights selectively, and only update non-zero entries +""" +function index_assignment!(item, w0Weights, g_strengths, Lee, Lie, Lii, Lei) + (src, tgt, syn0, syn1) = item + jee, jie, jei, jii = g_strengths + wig = -20 * 4.5f0 + + if syn0 == 1 && syn1 == 1 + w0Weights[src, tgt] = jee + Lee[src, tgt] = jee + elseif syn0 == 1 && syn1 == 0 + w0Weights[src, tgt] = jei + Lei[src, tgt] = jei + elseif syn0 == 0 && syn1 == 1 + w0Weights[src, tgt] = wig + Lie[src, tgt] = wig + elseif syn0 == 0 && syn1 == 0 + w0Weights[src, tgt] = wig + Lii[src, tgt] = wig + end +end + +""" +Build matrix with memory-efficient computations, filtering and batching +""" +function build_matrix(cumulative, conn_probs, Ncells, g_strengths, syn_pol, batch_size=1000) + w0Weights = spzeros(Float32, Ncells, Ncells) + Lee = spzeros(Float32, Ncells, Ncells) + Lii = spzeros(Float32, Ncells, Ncells) + Lei = spzeros(Float32, Ncells, Ncells) + Lie = spzeros(Float32, Ncells, Ncells) + + cumvalues = collect(values(cumulative)) + + # Process connections in batches + @inbounds @showprogress for batch in Iterators.partition(1:length(cumvalues), batch_size) + for i in batch + (syn0, v) = (syn_pol[i], cumvalues[i]) + for src in v + for j in batch + prob = conn_probs[i, j] + if prob > 0 + (syn1, v1) = (syn_pol[j], cumvalues[j]) + for tgt in v1 + if src != tgt && rand() < prob + item = (src, tgt, syn0, syn1) + index_assignment!(item, w0Weights, g_strengths, Lee, Lie, Lii, Lei) + end + end + end + end + end + end + end + + Lexc = Lee + Lei + Linh = Lie + Lii + + return w0Weights, Lee, Lie, Lei, Lii, Lexc, Linh +end + +""" +Create Potjans weights with modularized params and memory-optimized matrices +""" +function potjans_weights(Ncells, g_strengths, ccu, scale) + cumulative, ccu, layer_names, conn_probs, syn_pol = potjans_params(ccu, scale) + build_matrix(cumulative, conn_probs, Ncells, g_strengths, syn_pol) +end + +""" +Auxiliary Potjans parameters for neural populations with scaled cell counts +""" +function auxil_potjans_param(scale=1.0) + ccu = Dict( + "23E" => trunc(Int32, 20683 * scale), + "4E" => trunc(Int32, 21915 * scale), + "5E" => trunc(Int32, 4850 * scale), + "6E" => trunc(Int32, 14395 * scale), + "6I" => trunc(Int32, 2948 * scale), + "23I" => trunc(Int32, 5834 * scale), + "5I" => trunc(Int32, 1065 * scale), + "4I" => trunc(Int32, 5479 * scale) + ) + + Ncells = trunc(Int32, sum(values(ccu)) + 1) + Ne = trunc(Int32, ccu["23E"] + ccu["4E"] + ccu["5E"] + ccu["6E"]) + Ni = Ncells - Ne + return Ncells, Ne, Ni, ccu +end +""" +Main function to setup Potjans layer with memory-optimized connectivity +""" +function potjans_layer(scale) + Ncells, Ne, Ni, ccu = auxil_potjans_param(scale) + + # Synaptic strengths for each connection type + pree = 0.1 + K = round(Int, Ne * pree) + sqrtK = sqrt(K) + g = 1.0 + tau_meme = 10.0 # (ms) + je = 2.0 / sqrtK * tau_meme * g + ji = 2.0 / sqrtK * tau_meme * g + jee = 0.15 * je + jei = je + jie = -0.75 * ji + jii = -ji + g_strengths = Float32[jee, jie, jei, jii] + + potjans_weights(Ncells, g_strengths, ccu, scale) +end + + +if !isfile("potjans_wiring.jld") + # Run with a specific scaling factor + scale = 0.01 + w0Weights, Lee, Lie, Lei, Lii, Lexc, Linh = potjans_layer(scale) + @save "potjans_wiring.jld" w0Weights, Lee, Lie, Lei, Lii, Lexc, Linh +else + @load "potjans_wiring.jld" w0Weights, Lee, Lie, Lei, Lii, Lexc, Linh + + exc_pop = unique(Lexc.rowval) + inhib_pop = unique(Linh.rowval) + exc_pop = Set(Lexc.rowval) + inhib_pop = Set(Linh.rowval) + + + # Find overlapping elements + overlap = intersect(exc_pop, inhib_pop) + + # Remove overlapping elements from both sets + Epop_numbers = setdiff(exc_pop, overlap) + IPop_numbers = setdiff(inhib_pop, overlap) + + overlap = intersect(exc_pop, inhib_pop) + +#= +Note I may have to remove these elements from Lee,Lie,Lii,Lexc,Linh + +=# + +# Ensure they are disjoint (no overlap) +if isempty(overlap) + println("exc_pop and inhib_pop are unique sets.") +else + println("exc_pop and inhib_pop have overlapping elements: ", overlap) +end + + +## Neuron parameters + +function initialize_LKD(Epop_numbers,IPop_numbers,w0Weights, Lee, Lie, Lei, Lii;νe = 4.5Hz) + τm = 20ms + C = 300SNN.pF # Capacitance + R = τm / C + τre = 1ms # Rise time for excitatory synapses + τde = 6ms # Decay time for excitatory synapses + τri = 0.5ms # Rise time for inhibitory synapses + τdi = 2ms # Decay time for inhibitory synapses + + # Input and synapse paramater + N = 1000 + # νe = 8.5Hz # Rate of external input to E neurons + # νe = 4.5Hz # Rate of external input to E neurons + νi = 0#0.025Hz # Rate of external input to I neurons + p_in = 0.5 #1.0 # 0.5 + μ_in_E = 1.78SNN.pF + + μEE = 2.76SNN.pF # Initial E to E synaptic weight + μIE = 48.7SNN.pF # Initial I to E synaptic weight + μEI = 1.27SNN.pF # Synaptic weight from E to I + μII = 16.2SNN.pF # Synaptic weight from I to I + + Random.seed!(28) + + LKD_AdEx_exc = AdExParameter( + τm = 20ms, + Vt = -52mV, + Vr = -60mV, + El = -70mV, + R = R, + ΔT = 2mV, + a = 4nS, + b = 0.805SNN.pA, + τabs = 1ms, + τw = 150ms, + τre = τre, + τde = τde, + τri = τri, + τdi = τdi, + At = 10mV, + τt = 30ms, + E_i = -75mV, + E_e = 0mV, + ) # 0.000805nA + LKD_IF_inh = IFParameter( + τm = 20ms, + Vt = -52mV, + Vr = -60mV, + El = -62mV, + R = R, + τre = τre, + τde = τde, + τri = τri, + τdi = τdi, + E_i = -75mV, + E_e = 0mV, + ) + + Input_E = SNN.Poisson(; N = N, param = SNN.PoissonParameter(; rate = νe)) + Input_I = SNN.Poisson(; N = N, param = SNN.PoissonParameter(; rate = νi)) + E = SNN.AdEx(; N = size(Lee)[1], param = LKD_AdEx_exc) + I = SNN.IF(; N = size(Lee)[1], param = LKD_IF_inh) + EE = SNN.SpikingSynapse(E, E, w = Lee, :ge; μ = μEE, param = SNN.vSTDPParameter()) + EI = SNN.SpikingSynapse(E, I, w = Lei, :ge; μ = μEI, param = SNN.vSTDPParameter()) + IE = SNN.SpikingSynapse(I, E, w = Lie, :gi; μ = μIE, param = SNN.vSTDPParameter()) + II = SNN.SpikingSynapse(I, I, w = Lii, :gi; μ = μII) + ProjI = SNN.SpikingSynapse(Input_I, I, :ge; μ = μ_in_E, p = p_in) + ProjE = SNN.SpikingSynapse(Input_E, E, :ge; μ = μ_in_E, p = p_in) # connection from input to E + P = [E, I, Input_E, Input_I] + C = [EE, II, EI, IE, ProjE, ProjI] + return P, C, EE, II, EI, IE, ProjE, ProjI +end + +## +P,C, EE, II, EI, IE, ProjE, ProjI = initialize_LKD(Epop_numbers,IPop_numbers,w0Weights, Lee, Lie, Lei, Lii;νe = 4.5Hz) + + +# +#P, C = initialize_LKD(20Hz) +duration = 15000ms +#pltdur = 500e1 +SNN.monitor(P[1:2], [:fire, :v]) +SNN.sim!(P, C; duration = duration) + +p1 = SNN.raster(P[1]) +p2 = SNN.raster(P[2]) +display(plot(p1,p2)) + +matrix_to_plot = w0Weights # Replace 1 with the index of the matrix you need + + +# Plot the heatmap +heatmap(matrix_to_plot, color=:viridis, xlabel="Source Neurons", ylabel="Target Neurons", title="Connectivity Matrix Heatmap") + +# Assuming `matrix_to_plot` is the matrix you want to visualize +#matrix_to_plot = layer_matrices[1] # For example, w0Weights +final = Matrix(Lee)+Matrix(Lei) +# Convert the matrix to dense if it's sparse +matrix_dense = Matrix(final) + +# Normalize the matrix between 0 and 1 +min_val = minimum(matrix_dense) +max_val = maximum(matrix_dense) +normalized_matrix = (matrix_dense .- min_val) ./ (max_val - min_val) + +# Plot the normalized heatmap +heatmap( + normalized_matrix, + color=:viridis, + xlabel="Source Neurons", + ylabel="Target Neurons", + title="Normalized Connectivity Matrix Heatmap", + clims=(0, 1) # Setting color limits for the normalized scale +) + +#scale = 0.015 +#pot_conn = grab_connectome(scale) +dim = 2 +Lx = Vector{Int64}(zeros(size(w0Weights[2,:]))) +Lx = convert(Vector{Int64},Lx) + +y = sgtsnepi(w0Weights) +cmap_out = distinguishable_colors( + length(Lx), + [RGB(1,1,1), RGB(0,0,0)], dropseed=true) + +display(SGtSNEpi.show_embedding( y, Lx ,A=w0Weights;edge_alpha=0.15,lwd_in=0.15,lwd_out=0.013,cmap=cmap_out) +#SNN.raster(P[1:2], [1, 1.5] .* pltdur) +#display(SNN.vecplot(P[1], :v, 10)) +# E_neuron_spikes = map(sum, E.records[:fire]) +# E_neuron_spikes = map(sum, E.records[:fire]) +display(histogram(IE.W[:])) +display(histogram(EE.W[:])) +display(histogram(EI.W[:])) + +## +function firing_rate(E, I, bin_width, bin_edges) + # avg spikes at each time step + E_neuron_spikes = map(sum, E.records[:fire]) ./ E.N + I_neuron_spikes = map(sum, I.records[:fire]) ./ E.N + # Count the number of spikes in each bin + E_bin_count = [sum(E_neuron_spikes[i:(i+bin_width-1)]) for i in bin_edges] + I_bin_count = [sum(I_neuron_spikes[i:(i+bin_width-1)]) for i in bin_edges] + return E_bin_count, I_bin_count +end + +# frequencies = [1Hz 10Hz 100Hz] +inputs = 0:50:300 +E_bin_counts = [] +I_bin_counts = [] +bin_edges = [] +bin_width = 10 # in milliseconds + +num_bins = Int(length(EE.records[:fire]) / bin_width) +bin_edges = 1:bin_width:(num_bins*bin_width) + +## + +## +inputs = 0nA:5e3nA:(50e3)nA +rates = zeros(length(inputs), 2) +for (n, inh_input) in enumerate(inputs) + @info inh_input + + + + # + duration = 500ms + #P, C = initialize_LKD(20Hz) + SNN.monitor(P[1:2], [:fire]) + SNN.sim!(P, C; duration = duration) + + duration = 5000ms + P[2].I .= inh_input + SNN.sim!(P, C; duration = duration) + E_bin_count, I_bin_count = firing_rate(P[1], P[2], bin_width, bin_edges) + rates[n, 1] = mean(E_bin_count[(end-100):end]) + rates[n, 2] = mean(I_bin_count[(end-100):end]) + # push!(E_bin_counts, E_bin_count) + # push!(I_bin_counts, I_bin_count) +end + +plot( + inputs, + rates, + label = ["Excitatory" "Inhibitory"], + ylabel = "Firing rate (Hz)", + xlabel = "External input (μA)", +) +## + +# Create a new plot or use an existing plot if it exists +plot( + xlabel = "Time bins", + size = (800, 800), + ylabel = "Firing frequency (spikes/$(bin_width) ms)", + xtickfontsize = 6, + ytickfontsize = 6, + yguidefontsize = 6, + xguidefontsize = 6, + titlefontsize = 7, + legend = :bottomright, + layout = (length(frequencies), 1), + title = ["νi = $(νi*1000)Hz" for νi in frequencies], +) + +# Plot excitatory neurons +plot!(bin_edges, E_bin_counts, label = "Excitatory neurons") +# Plot inhibitory neurons +plot!(bin_edges, I_bin_counts, label = "Inhibitory neurons") + + +# Optional: If you need them as sorted arrays (not sets) +#exc_pop_array = sort(collect(exc_pop)) +#inhib_pop_array = sort(collect(inhib_pop)) + +#@show exc_pop_array +#@show inhib_pop_array + +# Assuming `layer_matrices` is a tuple of matrices, such as: +# layer_matrices = (w0Weights, Lee, Lie, Lei, Lii, Lexc, Linh) +# For example, let's plot the first matrix, `w0Weights` + +# Extract the matrix you want to plot +matrix_to_plot = layer_matrices[1] # Replace 1 with the index of the matrix you need + +# Plot the heatmap +heatmap(matrix_to_plot, color=:viridis, xlabel="Source Neurons", ylabel="Target Neurons", title="Connectivity Matrix Heatmap") diff --git a/examples/paradoxical_effect.jl b/examples/paradoxical_effect.jl index 722388d..e7d3540 100644 --- a/examples/paradoxical_effect.jl +++ b/examples/paradoxical_effect.jl @@ -67,3 +67,5 @@ plot!( legend = :topright, ) ## +inputs = SNN.Poisson(; N = 350, param = SNN.PoissonParameter(rate = 10.5Hz)) +@show(inputs) \ No newline at end of file diff --git a/examples/potjans_nmnist.jl b/examples/potjans_nmnist.jl new file mode 100644 index 0000000..57320e0 --- /dev/null +++ b/examples/potjans_nmnist.jl @@ -0,0 +1,367 @@ +using Revise +using DrWatson +using SpikingNeuralNetworks +SNN.@load_units +import SpikingNeuralNetworks: AdExParameter, IFParameter +using Statistics, Random +using Plots +using SparseArrays +using ProgressMeter +using Plots +using SpikingNeuralNetworks +using SNNUtils +using JLD2 +using Distributions +include("Tonic_NMNIST_Stimulus.jl") +using .Tonic_NMNIST_Stimulus + + + +""" +Auxiliary Potjans parameters for neural populations with scaled cell counts +""" +function potjans_neurons(scale=1.0) + ccu = Dict( + :E23 => trunc(Int32, 20683 * scale), + :E4 => trunc(Int32, 21915 * scale), + :E5 => trunc(Int32, 4850 * scale), + :E6 => trunc(Int32, 14395 * scale), + :I6 => trunc(Int32, 2948 * scale), + :I23 => trunc(Int32, 5834 * scale), + :I5 => trunc(Int32, 1065 * scale), + :I4 => trunc(Int32, 5479 * scale) + ) + + neurons = Dict{Symbol, SNN.AbstractPopulation}() + for (k, v) in ccu + if occursin("E", String(k)) + neurons[k] = IF(N = v, param=LKD2014SingleExp.PV, name=string(k)) + else + neurons[k] = IF(N = v, param=LKD2014SingleExp.PV, name=string(k)) + end + end + return neurons +end + +""" +Define Potjans parameters for neuron populations and connection probabilities +""" +function potjans_conn(Ne) + + function j_from_name(pre, post) + if occursin("E", String(pre)) && occursin("E", String(post)) + syn_weight_dist = Normal(0.15, 0.1) + + return rand(syn_weight_dist) + elseif occursin("I", String(pre)) && occursin("E", String(post)) + syn_weight_dist = Normal(0.15, 0.1) + return -4.0*rand(syn_weight_dist) + + elseif occursin("E", String(pre)) && occursin("I", String(post)) + syn_weight_dist = Normal(0.15, 0.1) + + return rand(syn_weight_dist) + elseif occursin("I", String(pre)) && occursin("I", String(post)) + syn_weight_dist = Normal(0.15, 0.1) + return -4.0*rand(syn_weight_dist) + else + throw(ArgumentError("Invalid pre-post combination: $pre-$post")) + end + end + + + + layer_names = [:E23, :I23, :E4, :I4, :E5, :I5, :E6, :I6] + + + + total_cortical_thickness = 1500.0 + N_full = [20683, 5834, 21915, 5479, 4850, 1065, 14395, 2948] + N_E_total = N_full[1]+N_full[3]+N_full[5]+N_full[7] + dimensions_3D = Dict( + "x_dimension"=> 1000, + "z_dimension"=> 1000, + "total_cortical_thickness"=> total_cortical_thickness, + + # Have the thicknesses proportional to the numbers of E cells in each layer + "layer_thicknesses"=> Dict( + "L23"=> total_cortical_thickness*N_full[1]/N_E_total, + "L4" => total_cortical_thickness*N_full[3]/N_E_total, + "L5" => total_cortical_thickness*N_full[5]/N_E_total, + "L6" => total_cortical_thickness*N_full[7]/N_E_total, + "thalamus" => 100 + ) + ) + + net_dict = Dict{String, Any}( + "PSP_e"=> 0.15, + # Relative standard deviation of the postsynaptic potential. + "PSP_sd"=> 0.1, + # Relative inhibitory synaptic strength (in relative units). + "g"=> -4, + # Rate of the Poissonian spike generator (in Hz). + "bg_rate"=> 8., + # Turn Poisson input on or off (True or False). + "poisson_input"=> true, + # Delay of the Poisson generator (in ms). + "poisson_delay"=> 1.5, + # Mean delay of excitatory connections (in ms). + "mean_delay_exc"=> 1.5, + # Mean delay of inhibitory connections (in ms). + "mean_delay_inh"=> 0.75, + # Relative standard deviation of the delay of excitatory and + # inhibitory connections (in relative units). + "rel_std_delay"=> 0.5 + ) + # Replace static matrix with a regular matrix for `conn_probs` + # ! the convention is j_post_pre. This is how the matrices `w` are built. Are you using that when defining the parameters? + conn_probs = Float32[ + 0.1009 0.1689 0.0437 0.0818 0.0323 0.0 0.0076 0.0 + 0.1346 0.1371 0.0316 0.0515 0.0755 0.0 0.0042 0.0 + 0.0077 0.0059 0.0497 0.135 0.0067 0.0003 0.0453 0.0 + 0.0691 0.0029 0.0794 0.1597 0.0033 0.0 0.1057 0.0 + 0.1004 0.0622 0.0505 0.0057 0.0831 0.3726 0.0204 0.0 + 0.0548 0.0269 0.0257 0.0022 0.06 0.3158 0.0086 0.0 + 0.0156 0.0066 0.0211 0.0166 0.0572 0.0197 0.0396 0.2252 + 0.0364 0.001 0.0034 0.0005 0.0277 0.008 0.0658 0.1443 + ] + + + conn_j = zeros(Float32, size(conn_probs)) + for pre in eachindex(layer_names) + for post in eachindex(layer_names) + + conn_j[post, pre ] = j_from_name(layer_names[pre], layer_names[post]) + + end + end + return layer_names, conn_probs, conn_j,net_dict +end + + +""" +Main function to setup Potjans layer with memory-optimized connectivity +""" +function potjans_layer(scale) + + ## Create the neuron populations + neurons = potjans_neurons(scale) + exc_pop = filter(x -> occursin("E", String(x)), keys(neurons)) + inh_pop = filter(x -> occursin("I", String(x)), keys(neurons)) + Ne = trunc(Int32, sum([neurons[k].N for k in exc_pop])) + Ni = trunc(Int32, sum([neurons[k].N for k in inh_pop])) + layer_names, conn_probs, conn_j,net_dict = potjans_conn(Ne) + syn_weight_dist = Normal(0.15, 0.1) + delay_dist_exc = Normal(1.5, 0.5) + delay_dist_inh = Normal( 0.75, 0.5) + + ## Create the synaptic connections based on the connection probabilities and synaptic weights assigned to each pre-post pair + connections = Dict() + stdp_param = STDPParameter(A_pre =5e-1, + A_post=-5e-1, + τpre =20ms, + τpost =15ms) + + for i in eachindex(layer_names) + for j in eachindex(layer_names) + pre = layer_names[i] + post = layer_names[j] + p = conn_probs[j, i] + J = conn_j[j, i] + sym = J>=0 ? :ge : :gi + μ = abs(J) + if J>=0 + s = SNN.SpikingSynapse(neurons[pre], neurons[post], sym; μ = μ, p=p, σ=0,param=stdp_param, delay_dist=delay_dist_exc) + else + s = SNN.SpikingSynapse(neurons[pre], neurons[post], sym; μ = -μ, p=p, σ=0, delay_dist=delay_dist_inh) + + end + connections[Symbol(string(pre,"_", post))] = s + end + end + + full_mean_rates = [0.971, 2.868, 4.746, 5.396, 8.142, 9.078, 0.991, 7.523] + stimuli = Dict() + for (ind,pop) in enumerate(exc_pop) + νe = full_mean_rates[ind]kHz + post = neurons[pop] + s = SNN.PoissonStimulus(post, :ge; param = νe, cells=:ALL, μ=1.f0, name="PoissonE_$(post.name)") + stimuli[Symbol(string("PoissonE_", pop))] = s + end + return merge_models(neurons,connections, stimuli),neurons,connections,stimuli,net_dict +end + +#if !isfile("cached_potjans_model.jld2") + model,neurons_,connections_,stimuli_,net_dict = potjans_layer(0.085) + @save "cached_potjans_model.jld2" model neurons_ connections_ stimuli_ net_dict +#else + @load "cached_potjans_model.jld2" model neurons_ connections_ stimuli_ net_dict +#end +#@show(connections_.vals) + +before_learnning_weights = model.syn[1].W +#= +ΔTs = -100:1:100ms +ΔWs = zeros(Float32, length(ΔTs)) +Threads.@threads for i in eachindex(ΔTs) + ΔT = ΔTs[i] + #spiketime = [2000ms, 2000ms+ΔT] + #neurons = [[1], [2]] + #inputs = SpikeTime(spiketime, neurons) + w = zeros(Float32, 2,2) + w[1, 2] = 1f0 + st = Identity(N=max_neurons(inputs)) + stim = SpikeTimeStimulusIdentity(st, :g, param=inputs) + syn = SpikingSynapse( st, st, nothing, w = w, param = stdp_param) + + model = merge_models(pop=st, stim=stim, syn=syn, silent=true) + SNN.monitor(model.pop..., [:fire]) + SNN.monitor(model.syn..., [:tpre, :tpost]) + train!(model=model, duration=3000ms, dt=0.1ms) + ΔWs[i] = model.syn[1].W[1] - 1 +end +=# + +duration = 15000ms +SNN.monitor([model.pop...], [:fire]) +#SNN.monitor([model.pop...], [:v], sr=200Hz) +SNN.sim!(model=model, duration=3000ms, dt=0.125, pbar = true)#, dt = 0.125) + +#= +# Example data: Spike times and trial start times +s#pike_times = [0.1, 0.15, 0.2, 0.4, 0.6, 0.65, 0.8, 1.0, 1.1, 1.3, 1.5] +t#rial_starts = [0.0, 1.0, 2.0] # Start times of each trial +num_trials = length(trial_starts) + +# Parameters +bin_width = 0.1 # Bin width in seconds +time_window = (0.0, 1.0) # Time window relative to each trial start + +# Create bins +edges = collect(time_window[1]:bin_width:time_window[2]) +num_bins = length(edges) - 1 + +# Initialize a matrix to hold spike counts +spike_counts = zeros(num_trials, num_bins) + +# Bin spikes for each trial +for (i, trial_start) in enumerate(trial_starts) + aligned_spikes = spike_times .- trial_start # Align spikes to trial start + filtered_spikes = aligned_spikes[aligned_spikes .>= time_window[1] .& aligned_spikes .< time_window[2]] + spike_counts[i, :] = histcounts(filtered_spikes, edges) +end + +# Compute average spike activity (optional) +average_spike_activity = mean(spike_counts, dims=1) + +# Plot heatmap +heatmap( + edges[1:end-1] .+ bin_width / 2, # Bin centers + 1:num_trials, # Trial indices + spike_counts, + xlabel="Time (s)", + ylabel="Trial", + color=:viridis, + title="Spike Activity Heatmap" +) +=# +SNN.train!(model=model, duration=3000ms, dt=0.125, pbar = true)#, dt = 0.125) + +#SNN.sim!(model=model; duration = duration, pbar = true, dt = 0.125) +display(SNN.raster(model.pop, [1.0s, 2s])) +savefig("without_stimulus.png") + + + + +after_learnning_weights0 = model.syn[1].W + +training_order = shuffle(0:60000-1) +cached_spikes=[] +for i in 1:40 + push!(cached_spikes,Tonic_NMNIST_Stimulus.getNMNIST(training_order[i])) +end + +""" +Filter for spike packets that belong to provided labels that pertain to single numeral characters. +""" + + +if !isfile("labeled_packets.jld2") + labeled_packets = Tonic_NMNIST_Stimulus.spike_packets_by_labels(cached_spikes) + filter_label = 0 + (population_code,time_and_offset) = labeled_packets[filter_label] + for filter_label in range(0,9) + (population_code_,time_and_offset_) = labeled_packets[filter_label] + append!(population_code,population_code_) + append!(time_and_offset,time_and_offset_) + end + @save "labeled_packets.jld2" population_code time_and_offset +else + @load "labeled_packets.jld2" population_code time_and_offset +end + + + +p1 = Plots.scatter() +scatter!(p1, + time_and_offset, + population_code, + ms = 0.5, # Marker size + ylabel = "Neuron Index" , + xlabel ="Time (ms)", + title = "Spiking Activity with Distinct Characters", + legend=false +) +display(plot(p1)) +savefig("stimulus.png") + +neurons_as_nested_array = [ Vector{Int64}([n]) for n in population_code] +inputs = SpikeTime(time_and_offset,neurons_as_nested_array) + +st = neurons_[:E4] #Identity(N=max_neurons(inputs)) +w = ones(Float32,neurons_[:E4].N,max_neurons(inputs))*15 + + +st = Identity(N=max_neurons(inputs)) +stim = SpikeTimeStimulusIdentity(st, :g, param=inputs) + + +syn = SpikingSynapse( st, neurons_[:E4], nothing, w = w)#, param = stdp_param) +model2 = merge_models(pop=[st,model], stim=[stim,stimuli_], syn=[syn,connections_], silent=false) + +duration = 15000ms +SNN.monitor([model2.pop...], [:fire]) +SNN.monitor([model2.pop...], [:v], sr=200Hz) +SNN.train!(model=model2; duration = duration, pbar = true, dt = 0.125) +#display(SNN.raster(model2.pop, [0s, 15s])) + +after_learnning_weights1 = model.syn[1].W + +@show(mean(before_learnning_weights)) +@show(mean(after_learnning_weights0)) +@show(mean(after_learnning_weights1)) + +#mean(model2.syn[1].W) + +SNN.spiketimes(model.pop[1]) + +#x, y, y0 = SNN._raster(model2.pop.pop_2_E5,[1.95s, 2s])) + +display(SNN.raster(model2.pop, [1.75s, 2s])) + +savefig("with_stimulus.png") + +Trange = 0:10:15s +frE, interval, names_pop = SNN.firing_rate(model2.pop, interval = Trange) +plot(mean.(frE), label=hcat(names_pop...), xlabel="Time [ms]", ylabel="Firing rate [Hz]", legend=:topleft) +savefig("firing_rate.png") + +## + +vecplot(model2.pop.E4, :v, neurons =1, r=0s:15s,label="soma") +savefig("vector_vm_plot.png") +layer_names, conn_probs, conn_j = potjans_conn(4000) +pj = heatmap(conn_j, xticks=(1:8,layer_names), yticks=(1:8,layer_names), aspect_ratio=1, color=:bluesreds, title="Synaptic weights", xlabel="Presynaptic", ylabel="Postsynaptic", size=(500,500), clims=(-maximum(abs.(conn_j)), maximum(abs.(conn_j)))) +pprob=heatmap(conn_probs, xticks=(1:8,layer_names), yticks=(1:8,layer_names), aspect_ratio=1, color=:viridis, title="Connection probability", xlabel="Presynaptic", ylabel="Postsynaptic", size=(500,500)) +plot(pprob, pj, layout=(1,2), size=(1000,500), margin=5Plots.mm) diff --git a/test/runtests.jl b/test/runtests.jl index 26702d3..470f2e7 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -16,10 +16,9 @@ include("iz_neuron.jl") include("oja.jl") include("rate_net.jl") include("stdp_demo.jl") -include("tripod.jl") -include("tripod_network.jl") include("poisson_stim.jl") -include("tripod.jl") -include("tripod_network.jl") -include("spiketime.jl") -include("ballandstick.jl") \ No newline at end of file +#include("tripod_soma.jl") +#include("tripod.jl") +#include("tripod_network.jl") +#include("spiketime.jl") +#include("ballandstick.jl")