-
Notifications
You must be signed in to change notification settings - Fork 61
/
Copy pathparams.prm
279 lines (213 loc) · 10.4 KB
/
params.prm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
#######################################################################
# SpikeDetekt parameters
#######################################################################
# Name of the experiment (which will be the name of the output KWIK
# files)
experiment_name = 'test_hybrid_120sec'
# Raw data files. These can be in .dat/.bin/any raw data format,
# or .kwd HDF5 KWIK format. You can pass a list of several files which
# will be processed sequentially.
raw_data_files = experiment_name + '.raw.kwd'
#raw_data_files = ['file1.dat', 'file2.dat', 'file3.dat']
# Probe file describing geometry, channels, and adjacency graph in JSON
prb_file = '32chan1shankbuzsaki.prb'
# Bit depth of raw data
nbits = 16
# Multiplier from actual voltage to stored data
voltage_gain = 10.
# Raw data sampling rate, in Hz
sample_rate = 20000
# Number of channels in the recording
nchannels = 32
# ---------------------------------------------------------------------
# Raw data filtering and saving
# ---------------------------------------------------------------------
# Whether to save the .raw.kwd file if a non-HDF5 raw data format is used.
# This is needed to visualise the data in TraceView etc, and speeds up
# future runs of SpikeDetekt. If a .raw.kwd file is used as the input,
# it will never be overwritten.
#save_raw = True
# Whether to save the .high.kwd file with HPF data used for spike
# detection. This is processed using a Butterworth band-pass filter.
#save_high = False
# Bandpass filter low corner frequency
#filter_low = 500.
# Bandpass filter high corner frequency
#filter_high = 0.95 * .5 * sample_rate
# Order of Butterworth filter.
#filter_butter_order = 3
# Whether to save a .low.kwd file; this is processed using a Hamming
# window FIR filter, then subsampled 16x to save space when storing.
#save_low = False
# ---------------------------------------------------------------------
# Chunks
# ---------------------------------------------------------------------
# SpikeDetekt processes the raw data in chunks with small overlaps to
# catch spikes which would otherwise span two chunks. These options
# will change the default chunk size and overlap.
#chunk_size = int(1. * sample_rate) # 1 second
#chunk_overlap = int(.015 * sample_rate) # 15 ms
# ---------------------------------------------------------------------
# Threshold setting for spike detection
# ---------------------------------------------------------------------
# Change this to 'positive' to detect positive spikes, or 'both' to
# detect both positive and negative spikes with the same threshold.
#detect_spikes = 'negative'
# SpikeDetekt takes a set of uniformly distributed chunks throughout
# the high-pass filtered data to estimate its standard deviation. These
# parameters select how many excerpts are used and how long each of them
# are.
#nexcerpts = 50
#excerpt_size = int(1. * sample_rate) # 1 second
# This is then used to calculate a base threshold which is multiplied
# by the two parameters below for the two-threshold detection process.
#threshold_strong_std_factor = 4
#threshold_weak_std_factor = 2.
# ---------------------------------------------------------------------
# Spike extraction
# ---------------------------------------------------------------------
# The number of samples to extract before and after the centre of the
# spike for waveforms. Then, waveforms_nsamples is calculated using the
# formula: waveforms_nsamples = extract_s_before + extract_s_after
#extract_s_before = 16
#extract_s_after = 16
# ---------------------------------------------------------------------
# Features
# ---------------------------------------------------------------------
# Number of features (PCs) per channel.
#nfeatures_per_channel = 3
# The number of spikes used to determine the PCs
#pca_nwaveforms_max = 10000
# ---------------------------------------------------------------------
# Advanced
# ---------------------------------------------------------------------
# Number of samples to use in floodfill algorithm for spike detection
#connected_component_join_size = 1 # 1 sample
#connected_component_join_size = int(.00005 * sample_rate) # 0.05ms
# Waveform alignment
#weight_power = 2
# Whether to make the features array contiguous
#features_contiguous = True
#######################################################################
# KlustaKwik parameters (must be prefixed by KK_). Uncomment to override
# the defaults, which can be shown by running 'klustakwik' with no options
#######################################################################
# This causes KlustaKwik to perform clustering on a subset of spikes and
# estimate the assignment of the other spikes. This causes a speedup in
# computational time (by a rough factor of KK_Subset), though will not
# significantly decrease RAM usage. For long runs where you are unsure of
# the data quality, you can first use KK_Subset = 50 to check the
# clustering quality before performing a Subset 1 (all spikes) run.
#KK_Subset = 1
# The largest permitted number of clusters, so cluster splitting can produce
# no more than n clusters. Note: This must be set higher than MaskStarts.
#KK_MaxPossibleClusters = 1000
# Maximum number of iterations. ie. it won't try more than n iterations
# from any starting point.
#KK_MaxIter = 10000
# You can start with a chosen fixed number of clusters derived from the
# mask vectors, set by KK_MaskStarts.
#KK_MaskStarts = 500
# The number of iterations after which KlustaKwik first attempts to split
# existing clusters. KlustaKwik then splits every SplitEvery iterations.
#KK_SplitFirst = 20
# The number of iterations after which KlustaKwik attempts to split existing
# clusters. When using masked initializations, to save time due to excessive
# splitting, set SplitEvery to a large number, close to the number of distinct
# masks or the number of chosen starting masks.
#KK_SplitEvery = 40
# KlustaKwik uses penalties to reduce the number of clusters fit. The
# parameters PenaltyK and PenaltyKLogN are given positive values. The
# higher the values, the fewer clusters you obtain. Higher penalties
# discourage cluster splitting. PenaltyKLogN also increases penalty
# when there are more points. -PenaltyK 0 -PenaltyKLogN 1 is the
# default, corresponding to the "Bayesian Information Criterion".
# -PenaltyK 1 -PenaltyKLogN 0 corresponds to "Akaike's Information
# Criterion". This produces a larger number of clusters, and is
# recommended if you are find that clusters corresponding to different
# neurons are incorrectly merged.
#KK_PenaltyK = 0.
#KK_PenaltyKLogN = 1.
# Specifies a seed for the random number generator.
#KK_RandomSeed = 1
# The number of unmasked spikes on a certain channel needed to unmask that
# channel in the cluster. This prevents a single noisy spike, or coincident
# noise on adjacent channels from slowing down computation time.
#KK_PointsForClusterMask = 10
# Setting this saves a .temp.clu file every iteration. This slows the runtime
# down reasonably significantly for small runs with many iterations, but allows
# to recover where KlustaKwik left off; useful in case of large runs where you
# are not confident that the run will be uninterrupted.
#KK_SaveTempCluEveryIter = 0
# This is an integer N when, used in combination with the empty string
# for UseFeatures above, omits the last N features. This should always
# be used with KK_UseFeatures = ""
#KK_DropLastNFeatures = 0
# ---------------------------------------------------------------------
# Classic 'all channels unmasked always' mode
# ---------------------------------------------------------------------
# To use KlustaKwik in "unmasked" mode, set this to 0.
# This disables the use of the new `masked Expectation-Maximization'
# algorithm, and sets all the channels to be unmasked on all spikes.
#KK_UseDistributional = 1
# In classic mode, KlustaKwik starts from random cluster assignments,
# running a new random start for every integer between MinClusters and
# MaxClusters. For these values to take effect, MaskStarts must be set to 0.
#KK_MinClusters = 100
#KK_MaxClusters = 110
# By default, this is an empty string, which means 'use all features'.
# Or, you can you can specify a string with 1's for features you want to
# use, and 0's for features you don't want to use. In classic mode,
# you use this option to take out bad channels. In masked mode,
# you should instead take bad channels out from the .PRB file.
#KK_UseFeatures = ""
# ---------------------------------------------------------------------
# Advanced
# ---------------------------------------------------------------------
# The algorithm will be started n times for each initial cluster count
# between MinClusters and MaxClusters.
#KK_nStarts = 1
# Saves means and covariance matrices. Stops computation at each iteration.
# Manual input required for continuation.
#KK_SaveCovarianceMeans = 0
# Saves a .clu file with masks sorted lexicographically.
#KK_SaveSorted = 0
# Initialises using distinct derived binary masks. Use together with
# AssignToFirstClosestMask below.
#KK_UseMaskedInitialConditions = 0
# If starting with a number of clusters fewer than the number of distinct
# derived binary masks, it will assign the rest of the points to the cluster
# with the nearest mask.
#KK_AssignToFirstClosestMask = 0
# All log-likelihoods are recalculated every KK_FullStepEvery steps
# (see DistThresh).
#KK_FullStepEvery = 20
#KK_MinMaskOverlap = 0.
#KK_AlwaysSplitBimodal = 0
# ---------------------------------------------------------------------
# Debugging
# ---------------------------------------------------------------------
# Turns miscellaneous debugging information on.
#KK_Debug = 0
# Increasing this to 2 increases the amount of information logged to
# the console and the log.
#KK_Verbose = 1
# Outputs more debugging information.
#KK_DistDump = 0
# Time-saving parameter. If a point has log likelihood more than
# DistThresh worse for a given class than for the best class, the log
# likelihood for that class is not recalculated. This saves an awful lot
# of time.
#KK_DistThresh = 6.907755
# All log-likelihoods are recalculated if the fraction of instances
# changing class exceeds ChangedThresh (see DistThresh).
#KK_ChangedThresh = 0.05
# Produces .klg log file (default is yes, to switch off do -Log 0).
#KK_Log = 1
# Produces parameters and progress information on the console. Set to
# 0 to suppress output in batches.
#KK_Screen = 1
# Helps normalize covariance matrices.
#KK_PriorPoint = 1
# Outputs number of initial clusters.
#KK_SplitInfo = 1