diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml index 34801a9c..6ab3295b 100644 --- a/.github/workflows/deploy-docs.yml +++ b/.github/workflows/deploy-docs.yml @@ -27,7 +27,7 @@ jobs: python-version: '3.10' - run: pip install -e .[build] - - run: pdoc -o docs-generated/ -t docs/templates --math docs/bib/bib.py docs/dev_guide.py ./dapper + - run: pdoc -o docs-generated/ -t docs/templates --math --docformat=numpy docs/bib/bib.py docs/dev_guide.py ./dapper - uses: actions/upload-pages-artifact@v1 with: diff --git a/README.md b/README.md index ede0de11..ee8d3f42 100644 --- a/README.md +++ b/README.md @@ -10,10 +10,8 @@ -DAPPER is a set of templates for **benchmarking** the performance of -**data assimilation** (DA) methods. -The tests provide experimental support and guidance for -new developments in DA. +DAPPER is a set of templates for **benchmarking** the performance of **data assimilation** (DA) methods. +The numerical experiments provide support and guidance for new developments in DA. The typical set-up is a **synthetic (twin) experiment**, where you specify a dynamic model and an observational model, and use these to generate a synthetic truth (multivariate time series), diff --git a/dapper/README.md b/dapper/README.md index 6be82ae7..ed35297b 100644 --- a/dapper/README.md +++ b/dapper/README.md @@ -26,8 +26,8 @@ your own **model** or **method**, then rather than trying to squeeze everything into its templates. - If it is relatively simple, however, you may well want to use DAPPER. In that case, read this: - - `mods` - - `da_methods` + - `dapper.mods` + - `dapper.da_methods` Since the generality of DAPPER is [limited](https://github.com/nansencenter/DAPPER#similar-projects) diff --git a/dapper/da_methods/README.md b/dapper/da_methods/README.md index 7a187be4..85b619a8 100644 --- a/dapper/da_methods/README.md +++ b/dapper/da_methods/README.md @@ -1,5 +1,6 @@ -See the README section on +Also see the section on [DA Methods](https://github.com/nansencenter/DAPPER#DA-Methods) +in the main README for an overview of the methods included with DAPPER. ## Defining your own method diff --git a/dapper/mods/QG/__init__.py b/dapper/mods/QG/__init__.py index 235f4a36..4a3fa746 100644 --- a/dapper/mods/QG/__init__.py +++ b/dapper/mods/QG/__init__.py @@ -95,7 +95,7 @@ def step_1(self, x0, t, dt): assert self.prms["dtout"] == dt # Coz Fortran is typed. assert isinstance(t, float) - # QG is autonomous, but Fortran doesn't like nan/inf. + # QG is autonomous (⇒ t should not matter), but Fortran doesn't like nan/inf. assert np.isfinite(t) # Copy coz Fortran will modify in-place. psi = py2f(x0.copy()) diff --git a/dapper/mods/QG/f90/README.md b/dapper/mods/QG/f90/README.md index 9a47fa9a..b2b769fb 100644 --- a/dapper/mods/QG/f90/README.md +++ b/dapper/mods/QG/f90/README.md @@ -26,52 +26,56 @@ Licence reproduced here. Compile as follows... ### This `f90` directory -contains Fortran90 code for the QG model for building + +contains Fortran-90 code for the QG model for building 1. a python extension module `py_mod` 2. a standalone program DAPPER only requires `py_mod` (also to generate initial sample). -Both require an f90 compiler (tested with g95 and gfortran). +Both require an `f90` compiler (tested with `g95` and `gfortran`). In addition `py_mod` requires `f2py`, while `qg` requires `netcdf` libraries. -### For DAPPER, -To build `py_mod`, ensure that both `gcc` and `gfortran` are installed. For example, with `conda`: - - $ conda install -c conda-forge gcc - $ conda install -c conda-forge gfortran - +### For DAPPER + +To build `py_mod`, ensure that both `gcc` and `gfortran` are installed. +For example, with `conda`: + + conda install -c conda-forge gcc + conda install -c conda-forge gfortran + Then run: - $ cd dapper/mods/QG/f90 - $ rm -rf py_mod.cpython-* __pycache__ - $ f2py -c utils.f90 parameters.f90 helmholtz.f90 calc.f90 qgflux.f90 qgstep.f90 interface.f90 -m py_mod + cd dapper/mods/QG/f90 + rm -rf py_mod.cpython-* __pycache__ + f2py -c utils.f90 parameters.f90 helmholtz.f90 calc.f90 qgflux.f90 qgstep.f90 interface.f90 -m py_mod ### For the standalone executable `qg` + (not required for DAPPER), adapt the `Makefile` to your system, and run - $ make qg + make qg Example: here's how I compiled the standalone on my Mac: -- get netcdf: `brew install netcdf --with-fortran` -- In makefile, changed to: - FC = gfortran-5 - NCLIB = /usr/local/lib/libnetcdff.dylib /usr/local/lib/libnetcdf.dylib -- Matlab has a new netcdf interface. Therefore, qgread.m should use - % ncdisp(fname) - ncid = netcdf.open(fname); - psi = permute( netcdf.getVar(ncid, netcdf.inqVarID(ncid,'psi')), [3 2 1]); - q = permute( netcdf.getVar(ncid, netcdf.inqVarID(ncid,'q')) , [3 2 1]); - t = netcdf.getVar(ncid, netcdf.inqVarID(ncid,'t')); - -### Changelog since Sakov's enkf-matlab: - -- Rm Matlab interface funcs: qgplay.m qgplot.m qgread.m mexcmd.m qgstep_mex.f90 mexf90.f90 -- Modified makefile, as described above. -- In parameters.f90: - - Capitalized m,n - - Swapped NY1 and NX1 in definitions of M,N - - Changed typing of arrays in interface.f90, qgflux.f90, qgstep.f90 to allocatable. -- Made `interface.f90` for Python. + - get netcdf: `brew install netcdf --with-fortran` + - In makefile, changed to: + FC = gfortran-5 + NCLIB = /usr/local/lib/libnetcdff.dylib /usr/local/lib/libnetcdf.dylib + - Matlab has a new netcdf interface. Therefore, qgread.m should use + % ncdisp(fname) + ncid = netcdf.open(fname); + psi = permute( netcdf.getVar(ncid, netcdf.inqVarID(ncid,'psi')), [3 2 1]); + q = permute( netcdf.getVar(ncid, netcdf.inqVarID(ncid,'q')) , [3 2 1]); + t = netcdf.getVar(ncid, netcdf.inqVarID(ncid,'t')); + +### Changelog since Sakov's enkf-matlab + + - Rm Matlab interface funcs: qgplay.m qgplot.m qgread.m mexcmd.m qgstep_mex.f90 mexf90.f90 + - Modified makefile, as described above. + - In parameters.f90: + - Capitalized m,n + - Swapped NY1 and NX1 in definitions of M,N + - Changed typing of arrays in interface.f90, qgflux.f90, qgstep.f90 to allocatable. + - Made `interface.f90` for Python. diff --git a/docs/dev_guide.md b/docs/dev_guide.md index 2dbfec79..d8bbfe8b 100644 --- a/docs/dev_guide.md +++ b/docs/dev_guide.md @@ -110,7 +110,7 @@ the `lightscript` format), so that the paired files can be kept in synch. The documentation is built with `pdoc`, e.g. ```sh -pdoc -t docs/templates --math docs/bib/bib.py docs/dev_guide.py ./dapper +pdoc -t docs/templates --math --docformat=numpy docs/bib/bib.py docs/dev_guide.py ./dapper ``` ##### Hosting diff --git a/docs/templates/module.html.jinja2 b/docs/templates/module.html.jinja2 index faa4e616..1c14fd94 100644 --- a/docs/templates/module.html.jinja2 +++ b/docs/templates/module.html.jinja2 @@ -1,5 +1,5 @@ {% set logo = 'https://raw.githubusercontent.com/nansencenter/DAPPER/master/docs/imgs/logo_wtxt.png' %} -{% set logo_link = '/' %} +{% set logo_link = '/DAPPER' %} {% set favicon = 'https://raw.githubusercontent.com/nansencenter/DAPPER/master/docs/imgs/logo.png' %} {% extends "default/module.html.jinja2" %} diff --git a/tests/test_HMMs.py b/tests/test_HMMs.py index 3aa013dd..3e1d1ed1 100644 --- a/tests/test_HMMs.py +++ b/tests/test_HMMs.py @@ -35,25 +35,24 @@ def _defines_HMM(path): @pytest.mark.parametrize(("path"), HMMs, ids=str) def test_HMM(path): """Test that any HMM in module can be simulated.""" - p = "." + str(path.with_suffix("")).replace("/", ".") - module = import_module(p, root.stem) - - def exclude(key, HMM): - """Exclude certain, untestable HMMs""" - if key == "HMM_trunc": - return True - if "QG" in HMM.name: - try: - HMM.Dyn.model.__self__.f90 - except ModuleNotFoundError as err: - import warnings - warnings.warn(str(err), stacklevel=2) - return True - return False + p = "." + str(path.with_suffix("")).replace(os.sep, ".") + try: + module = import_module(p, root.stem) - for key, HMM in vars(module).items(): - if isinstance(HMM, HiddenMarkovModel) and not exclude(key, HMM): - HMM.tseq.BurnIn = 0 - HMM.tseq.Ko = 1 - xx, yy = HMM.simulate() - assert True + def exclude(key, _HMM): + """Exclude HMMs that should not be tested.""" + if key == "HMM_trunc": + return True + return False + + for key, HMM in vars(module).items(): + if isinstance(HMM, HiddenMarkovModel) and not exclude(key, HMM): + HMM.tseq.BurnIn = 0 + HMM.tseq.Ko = 1 + xx, yy = HMM.simulate() + assert True + + except ModuleNotFoundError as err: + import warnings + warnings.warn(str(err), stacklevel=2) + return # We're not testing importability, only runnability