Skip to content

BUG: Fix doc building bugs #12009

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Sep 24, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions doc/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@
os.environ["MNE_BROWSER_OVERVIEW_MODE"] = "hidden"
os.environ["MNE_BROWSER_THEME"] = "light"
os.environ["MNE_3D_OPTION_THEME"] = "light"
# https://numba.readthedocs.io/en/latest/reference/deprecation.html#deprecation-of-old-style-numba-captured-errors # noqa: E501
os.environ["NUMBA_CAPTURED_ERRORS"] = "new_style"
sphinx_logger = sphinx.util.logging.getLogger("mne")

# -- Path setup --------------------------------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion examples/decoding/receptive_field_mtrf.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
data = loadmat(join(path, "speech_data.mat"))
raw = data["EEG"].T
speech = data["envelope"].T
sfreq = float(data["Fs"])
sfreq = float(data["Fs"].item())
sfreq /= decim
speech = mne.filter.resample(speech, down=decim, npad="auto")
raw = mne.filter.resample(raw, down=decim, npad="auto")
Expand Down
2 changes: 1 addition & 1 deletion examples/inverse/mixed_norm_inverse.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@
)

t = 0.083
tidx = evoked.time_as_index(t)
tidx = evoked.time_as_index(t).item()
for di, dip in enumerate(dipoles, 1):
print(f"Dipole #{di} GOF at {1000 * t:0.1f} ms: " f"{float(dip.gof[tidx]):0.1f}%")

Expand Down
4 changes: 4 additions & 0 deletions mne/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,10 @@ def pytest_configure(config):
if os.getenv("PYTEST_QT_API") is None and os.getenv("QT_API") is not None:
os.environ["PYTEST_QT_API"] = os.environ["QT_API"]

# https://numba.readthedocs.io/en/latest/reference/deprecation.html#deprecation-of-old-style-numba-captured-errors # noqa: E501
if "NUMBA_CAPTURED_ERRORS" not in os.environ:
os.environ["NUMBA_CAPTURED_ERRORS"] = "new_style"

# Warnings
# - Once SciPy updates not to have non-integer and non-tuple errors (1.2.0)
# we should remove them from here.
Expand Down
20 changes: 0 additions & 20 deletions mne/fixes.py
Original file line number Diff line number Diff line change
Expand Up @@ -797,7 +797,6 @@ def _jit(func):

prange = range
bincount = np.bincount
mean = np.mean

else:

Expand All @@ -808,25 +807,6 @@ def bincount(x, weights, minlength): # noqa: D103
out[idx] += w
return out

# fix because Numba does not support axis kwarg for mean
@jit()
def _np_apply_along_axis(func1d, axis, arr):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
result = np.empty(arr.shape[1])
for i in range(len(result)):
result[i] = func1d(arr[:, i])
else:
result = np.empty(arr.shape[0])
for i in range(len(result)):
result[i] = func1d(arr[i, :])
return result

@jit()
def mean(array, axis): # noqa: D103
return _np_apply_along_axis(np.mean, axis, array)


###############################################################################
# Matplotlib
Expand Down
18 changes: 7 additions & 11 deletions mne/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from scipy.spatial.distance import cdist
from scipy.special import sph_harm

from .fixes import jit, mean, _get_img_fdata
from .fixes import jit, _get_img_fdata
from ._fiff.constants import FIFF
from ._fiff.open import fiff_open
from ._fiff.tag import read_tag
Expand Down Expand Up @@ -1466,16 +1466,12 @@ def _fit_matched_points(p, x, weights=None, scale=False):
assert p.ndim == 2
assert p.shape[1] == 3
# (weighted) centroids
if weights is None:
mu_p = mean(p, axis=0) # eq 23
mu_x = mean(x, axis=0)
dots = np.dot(p.T, x)
dots /= p.shape[0]
else:
weights_ = np.reshape(weights / weights.sum(), (weights.size, 1))
mu_p = np.dot(weights_.T, p)[0]
mu_x = np.dot(weights_.T, x)[0]
dots = np.dot(p.T, weights_ * x)
weights_ = np.full((p.shape[0], 1), 1.0 / max(p.shape[0], 1))
if weights is not None:
weights_[:] = np.reshape(weights / weights.sum(), (weights.size, 1))
mu_p = np.dot(weights_.T, p)[0]
mu_x = np.dot(weights_.T, x)[0]
dots = np.dot(p.T, weights_ * x)
Sigma_px = dots - np.outer(mu_p, mu_x) # eq 24
# x and p should no longer be used
A_ij = Sigma_px - Sigma_px.T
Expand Down
1 change: 1 addition & 0 deletions mne/viz/_3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -3464,6 +3464,7 @@ def plot_sparse_source_estimates(
plt_show(show)

renderer.show()
renderer.set_camera(distance="auto", focalpoint="auto")
return renderer.scene()


Expand Down
3 changes: 0 additions & 3 deletions tutorials/io/60_ctf_bst_auditory.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
`FieldTrip bug tracker
<http://bugzilla.fieldtriptoolbox.org/show_bug.cgi?id=2300>`__.
"""

# Authors: Mainak Jas <mainak.jas@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Jaakko Leppakangas <jaeilepp@student.jyu.fi>
Expand All @@ -39,8 +38,6 @@
from mne.datasets.brainstorm import bst_auditory
from mne.io import read_raw_ctf

print(__doc__)

# %%
# To reduce memory consumption and running time, some of the steps are
# precomputed. To run everything from scratch change ``use_precomputed`` to
Expand Down
5 changes: 2 additions & 3 deletions tutorials/machine-learning/30_strf.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,6 @@
plt.autoscale(tight=True)
mne.viz.tight_layout()


# %%
# Simulate a neural response
# --------------------------
Expand Down Expand Up @@ -186,7 +185,7 @@
rf.fit(X_train, y_train)

# Now make predictions about the model output, given input stimuli.
scores[ii] = rf.score(X_test, y_test)
scores[ii] = rf.score(X_test, y_test).item()
models.append(rf)

times = rf.delays_ / float(rf.sfreq)
Expand Down Expand Up @@ -294,7 +293,7 @@
rf.fit(X_train, y_train)

# Now make predictions about the model output, given input stimuli.
scores_lap[ii] = rf.score(X_test, y_test)
scores_lap[ii] = rf.score(X_test, y_test).item()
models_lap.append(rf)

ix_best_alpha_lap = np.argmax(scores_lap)
Expand Down
4 changes: 2 additions & 2 deletions tutorials/stats-sensor-space/10_background_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
Here we will briefly cover multiple concepts of inferential statistics in an
introductory manner, and demonstrate how to use some MNE statistical functions.
"""

# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
Expand Down Expand Up @@ -250,7 +249,8 @@ def plot_t_p(t, p, title, mcc, axes=None):
ps.append(np.zeros(width * width))
mccs.append(False)
for ii in range(n_src):
ts[-1][ii], ps[-1][ii] = permutation_t_test(X[:, [ii]], verbose=False)[:2]
t, p = permutation_t_test(X[:, [ii]], verbose=False)[:2]
ts[-1][ii], ps[-1][ii] = t[0], p[0]
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])

# %%
Expand Down