diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e70762a22..f6905ccc1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,4 @@ -name: pfb-clean Workflow +name: pfb-imaging Workflow on: push: @@ -31,7 +31,7 @@ jobs: # - name: Pin setuptools # run: python -m pip install setuptools==65.5 - - name: Install pfb-clean + - name: Install pfb-imaging run: python -m pip install .[testing] - name: Run tests diff --git a/Dockerfile b/Dockerfile index 3e1df4a91..994f65604 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,6 +12,6 @@ RUN apt -y update && \ RUN python -m pip install -U pip setuptools wheel && \ - python -m pip install -U pfb-clean@git+https://github.com/ratt-ru/pfb-clean@main && \ + python -m pip install -U pfb-imaging@git+https://github.com/ratt-ru/pfb-imaging@main && \ python -m pip install numpy==1.22 && \ python -m pip cache purge diff --git a/README.rst b/README.rst index e85ed3283..d036b4419 100644 --- a/README.rst +++ b/README.rst @@ -1,10 +1,10 @@ -pfb-clean +pfb-imaging ========= -Preconditioned forward-backward clean algorithm. +Radio interferometric imaging suite base on the pre-conditioned forward-backward algorithm. Install the package by cloning and running -:code:`$ pip install -e pfb-clean/` +:code:`$ pip install -e pfb-imaging/` Note casacore needs to be installed on the system for this to work. @@ -19,14 +19,6 @@ no binary mode eg :code:`$ pip install -e ducc` -You may also have to make numba aware of the tbb layer by doing - -:code:`$ pip install tbb` - -:code:`$ export LD_LIBRARY_PATH=/path/to/venv/lib` - -see eg. https://github.com/ratt-ru/QuartiCal/issues/268 - If you find any of this useful please cite (for now) https://arxiv.org/abs/2101.08072 diff --git a/pfb/parser/forward.yaml b/pfb/parser/fluxmop.yaml similarity index 100% rename from pfb/parser/forward.yaml rename to pfb/parser/fluxmop.yaml diff --git a/pfb/parser/clean.yaml b/pfb/parser/klean.yaml similarity index 100% rename from pfb/parser/clean.yaml rename to pfb/parser/klean.yaml diff --git a/pfb/parser/restore.yaml b/pfb/parser/restore.yaml index acb73a9ec..54e934573 100644 --- a/pfb/parser/restore.yaml +++ b/pfb/parser/restore.yaml @@ -6,31 +6,37 @@ inputs: dtype: str abbreviation: mname default: MODEL - info: 'Name of model in mds' + info: + Name of model in dds residual-name: dtype: str abbreviation: rname default: RESIDUAL - info: 'Name of residual in dds' + info: + Name of residual in dds nband: dtype: int required: true abbreviation: nb - info: 'Number of imaging bands' + info: + Number of imaging bands postfix: dtype: str default: 'main' - info: 'Can be used to specify a custom name for the image space data \ - products' + info: + Can be used to specify a custom name for the image space data products outputs: dtype: str default: mMrRiI - info: 'Output products. (m)odel, (r)esidual, (i)mage, (c)lean beam. \ - Captitals correspond to cubes.' + info: + Output products (m)odel, (r)esidual, (i)mage, (c)lean beam, (d)irty, + (f)ft_residuals (amplitude and phase will be produced). + Use captitals to produce corresponding cubes. overwrite: dtype: bool default: false - info: Allow overwrite of output xds + info: + Allow overwriting fits files outputs: {} diff --git a/pfb/parser/smoovie.yaml b/pfb/parser/smoovie.yaml index 5f23df46a..445a197f8 100644 --- a/pfb/parser/smoovie.yaml +++ b/pfb/parser/smoovie.yaml @@ -152,6 +152,17 @@ inputs: The format to write movie out in. gifs are usually better quality but can get quite large. mp4 quality not currently great with streamjoy. + optimize: + dtype: bool + default: false + info: + To try and optimize the resulting gif. + Only possible if gifsicle is installed. + crf: + dtype: int + default: 12 + info: + Constant rate factor for controlling mp4 output quality. outputs: {} diff --git a/pfb/parser/spotless.yaml b/pfb/parser/spotless.yaml index 097ba89bb..ebeb14c79 100644 --- a/pfb/parser/spotless.yaml +++ b/pfb/parser/spotless.yaml @@ -96,7 +96,13 @@ inputs: dtype: float default: 0.5 info: - Lower the initail rmsfactor by this amount + Lower the initial rmsfactor by this amount + diverge-count: + dtype: int + default: 5 + info: + Will terminate the algorithm if the rms increases this many times. + Set to > niter to disable this check. outputs: {} diff --git a/pfb/parser/uncabbedcabs.yml b/pfb/parser/uncabbedcabs.yml index e8aa5bb73..83f56044a 100644 --- a/pfb/parser/uncabbedcabs.yml +++ b/pfb/parser/uncabbedcabs.yml @@ -25,14 +25,14 @@ pfb.degrid: _include: - (.)degrid.yaml -pfb.clean: - command: pfb.workers.clean.clean +pfb.klean: + command: pfb.workers.klean.klean flavour: python policies: pass_missing_as_none: true _include: - - (.)clean.yaml + - (.)klean.yaml pfb.restore: command: pfb.workers.restore.restore @@ -52,14 +52,14 @@ pfb.fwdbwd: _include: - (.)fwdbwd.yaml -pfb.forward: - command: pfb.workers.forward.forward +pfb.fluxmop: + command: pfb.workers.fluxmop.fluxmop flavour: python policies: pass_missing_as_none: true _include: - - (.)forward.yaml + - (.)fluxmop.yaml pfb.spotless: command: pfb.workers.spotless.spotless diff --git a/pfb/utils/fits.py b/pfb/utils/fits.py index abca41fc6..61b6f6de4 100644 --- a/pfb/utils/fits.py +++ b/pfb/utils/fits.py @@ -68,7 +68,7 @@ def set_wcs(cell_x, cell_y, nx, ny, radec, freq, header = w.to_header() header['RESTFRQ'] = fmean - header['ORIGIN'] = 'pfb-clean' + header['ORIGIN'] = 'pfb-imaging' header['BTYPE'] = 'Intensity' header['BUNIT'] = unit header['SPECSYS'] = 'TOPOCENT' diff --git a/pfb/utils/misc.py b/pfb/utils/misc.py index 10e000d9e..268221496 100644 --- a/pfb/utils/misc.py +++ b/pfb/utils/misc.py @@ -503,13 +503,14 @@ def _restore_corrs(vis, ncorr): return model_vis -# model to fit @jax.jit def psf_errorsq(x, data, xy): + ''' + Returns sum of square error for best fit Gaussian to data + ''' emaj, emin, pa = x Smin = jnp.minimum(emaj, emin) Smaj = jnp.maximum(emaj, emin) - # print(emaj, emin, pa) A = jnp.array([[1. / Smin ** 2, 0], [0, 1. / Smaj ** 2]]) @@ -519,7 +520,7 @@ def psf_errorsq(x, data, xy): B = jnp.dot(jnp.dot(R.T, A), R) Q = jnp.einsum('nb,bc,cn->n', xy.T, B, xy) # GaussPar should corresponds to FWHM - fwhm_conv = 2 * jnp.sqrt(2 * np.log(2)) + fwhm_conv = 2 * jnp.sqrt(2 * jnp.log(2)) model = jnp.exp(-fwhm_conv * Q) res = data - model return jnp.vdot(res, res) @@ -660,7 +661,7 @@ def init_mask(mask, model, output_type, log): return mask -def dds2cubes(dds, nband, apparent=False): +def dds2cubes(dds, nband, apparent=False, dual=True): real_type = dds[0].DIRTY.dtype complex_type = np.result_type(real_type, np.complex64) nx, ny = dds[0].DIRTY.shape @@ -686,7 +687,7 @@ def dds2cubes(dds, nband, apparent=False): psfhat = None mean_beam = [da.zeros((nx, ny), chunks=(-1, -1), dtype=real_type) for _ in range(nband)] - if 'DUAL' in dds[0]: + if dual and 'DUAL' in dds[0]: nbasis, nymax, nxmax = dds[0].DUAL.shape dual = [da.zeros((nbasis, nymax, nxmax), chunks=(-1, -1, -1), dtype=real_type) for _ in range(nband)] @@ -707,7 +708,7 @@ def dds2cubes(dds, nband, apparent=False): psfhat[b] += ds.PSFHAT.data if 'MODEL' in ds: model[b] = ds.MODEL.data - if 'DUAL' in ds: + if dual and 'DUAL' in ds: dual[b] = ds.DUAL.data mean_beam[b] += ds.BEAM.data * ds.WSUM.data[0] wsums[b] += ds.WSUM.data[0] @@ -720,7 +721,7 @@ def dds2cubes(dds, nband, apparent=False): if 'PSF' in ds: psf = da.stack(psf)/wsum psfhat = da.stack(psfhat)/wsum - if 'DUAL' in ds: + if dual and 'DUAL' in ds: dual = da.stack(dual) for b in range(nband): if wsums[b]: @@ -1441,3 +1442,72 @@ def combine_columns(x, y, dc, dc1, dc2): out=x, casting='same_kind') return x + + +# def fft_interp(image, cellxi, cellyi, nxo, nyo, +# cellxo, cellyo, shiftx, shifty): +# ''' +# Use non-uniform fft to interpolate image in a flux conservative way + +# image - input image +# cellxi - input x cell-size +# cellyi - input y cell-size +# nxo - number of x pixels in output +# nyo - number of y pixels in output +# cellxo - output x cell size +# cellyo - output y cell size +# shiftx - shift x coordinate by this amount +# shifty - shift y coordinate by this amount + +# All sizes are assumed to be in radians. +# ''' +# import matplotlib.pyplot as plt +# from scipy.fft import fftn, ifftn +# Fs = np.fft.fftshift +# iFs = np.fft.ifftshift +# # basic +# nx, ny = image.shape +# imhat = Fs(fftn(image)) + +# imabs = np.abs(imhat) +# imphase = np.angle(imhat) - 1.0 +# # imphase = np.roll(imphase, nx//2, axis=0) +# imshift = ifftn(iFs(imabs*np.exp(1j*imphase))).real + +# impad = np.pad(imhat, ((nx//2, nx//2), (ny//2, ny//2)), mode='constant') +# imo = ifftn(iFs(impad)).real + +# print(np.sum(image) - np.sum(imo)) + +# plt.figure(1) +# plt.imshow(image/image.max(), vmin=0, vmax=1, interpolation=None) +# plt.colorbar() +# plt.figure(2) +# plt.imshow(imo/imo.max(), vmin=0, vmax=1, interpolation=None) +# plt.colorbar() +# plt.figure(3) +# plt.imshow(imshift/imshift.max() - image/image.max(), vmin=0, vmax=1, interpolation=None) +# plt.colorbar() + +# plt.show() + + # # coordinates on input grid + # nx, ny = image.shapeimhat + # x = np.arange(-(nx//2), nx//2) * cellxi + # y = np.arange(-(ny//2), ny//2) * cellyi + # xx, yy = np.meshgrid(x, y, indexing='ij') + + # # frequencies on output grid + # celluo = 1/(nxo*cellxo) + # cellvo = 1/(nyo*cellyo) + # uo = np.arange(-(nxo//2), nxo//2) * celluo/nxo + # vo = np.arange(-(nyo//2), nyo//2) * cellvo/nyo + + # uu, vv = np.meshgrid(uo, vo, indexing='ij') + # uv = np.vstack((uo, vo)).T + + + # res1 = finufft.nufft2d3(xx.ravel(), yy.ravel(), image.ravel(), uu.ravel(), vv.ravel()) + + + diff --git a/pfb/utils/stokes2vis.py b/pfb/utils/stokes2vis.py index da20419e9..0c46905fc 100644 --- a/pfb/utils/stokes2vis.py +++ b/pfb/utils/stokes2vis.py @@ -60,7 +60,8 @@ def single_stokes(ds=None, dc, dc1, dc2, - chunks=data.chunks) + chunks=data.chunks, + meta=np.empty((0,0,0), dtype=data.dtype)) nrow, nchan, ncorr = data.shape @@ -90,7 +91,8 @@ def single_stokes(ds=None, # weight = 1.0/sigma**2 weight = da.map_blocks(weight_from_sigma, sigma, - chunks=sigma.chunks) + chunks=sigma.chunks, + meta=np.empty((0,0,0), dtype=sigma.dtype)) elif opts.weight_column is not None: weight = getattr(ds, opts.weight_column).data if opts.weight_column=='WEIGHT': diff --git a/pfb/utils/weighting.py b/pfb/utils/weighting.py index 5c65c4349..0f1f89ad3 100644 --- a/pfb/utils/weighting.py +++ b/pfb/utils/weighting.py @@ -177,14 +177,16 @@ def filter_extreme_counts(counts, nbox=16, nlevel=10): counts, 'xy', nbox, None, nlevel, None, - dtype=counts.dtype) + dtype=counts.dtype, + meta=np.empty((0,0), dtype=float)) -@njit(nogil=True, cache=True) +# @njit(nogil=True, cache=True) def _filter_extreme_counts(counts, nbox=16, level=10.0): ''' - Replaces extreme counts by local mean computed i + Replaces extremely small counts by median to prevent + upweighting nearly empty cells ''' # nx, ny = counts.shape # I, J = np.where(counts>0) @@ -203,10 +205,13 @@ def _filter_extreme_counts(counts, nbox=16, level=10.0): # local_mean = np.mean(tmp[ix, iy]) # if counts[i,j] < local_mean/level: # counts[i, j] = local_mean - # get the median ounts value - med = np.median(counts>0) - counts = np.where(counts > med/level, counts, med) - + # get the median counts value + ix, iy = np.where(counts > 0) + cnts = counts[ix,iy] + med = np.median(cnts) + lowval = med/level + cnts = np.maximum(cnts, lowval) + counts[ix,iy] = cnts return counts diff --git a/pfb/workers/forward.py b/pfb/workers/fluxmop.py similarity index 96% rename from pfb/workers/forward.py rename to pfb/workers/fluxmop.py index 83e658396..ef27249f9 100644 --- a/pfb/workers/forward.py +++ b/pfb/workers/fluxmop.py @@ -6,19 +6,19 @@ from omegaconf import OmegaConf import pyscilog pyscilog.init('pfb') -log = pyscilog.get_logger('FORWARD') +log = pyscilog.get_logger('FLUXMOP') from scabha.schema_utils import clickify_parameters from pfb.parser.schemas import schema # create default parameters from schema defaults = {} -for key in schema.forward["inputs"].keys(): - defaults[key.replace("-", "_")] = schema.forward["inputs"][key]["default"] +for key in schema.fluxmop["inputs"].keys(): + defaults[key.replace("-", "_")] = schema.fluxmop["inputs"][key]["default"] @cli.command(context_settings={'show_default': True}) -@clickify_parameters(schema.forward) -def forward(**kw): +@clickify_parameters(schema.fluxmop) +def fluxmop(**kw): ''' Forward step aka flux mop. @@ -59,7 +59,7 @@ def forward(**kw): opts = OmegaConf.create(defaults) import time timestamp = time.strftime("%Y%m%d-%H%M%S") - pyscilog.log_to_file(f'forward_{timestamp}.log') + pyscilog.log_to_file(f'fluxmop_{timestamp}.log') if opts.nworkers is None: if opts.scheduler=='distributed': @@ -78,9 +78,9 @@ def forward(**kw): for key in opts.keys(): print(' %25s = %s' % (key, opts[key]), file=log) - return _forward(**opts) + return _fluxmop(**opts) -def _forward(**kw): +def _fluxmop(**kw): opts = OmegaConf.create(kw) OmegaConf.set_struct(opts, True) diff --git a/pfb/workers/grid.py b/pfb/workers/grid.py index f01df8666..35338c363 100644 --- a/pfb/workers/grid.py +++ b/pfb/workers/grid.py @@ -43,12 +43,14 @@ def grid(**kw): from daskms.fsspec_store import DaskMSStore if opts.xds is not None: xdsstore = DaskMSStore(opts.xds.rstrip('/')) + xdsname = opts.xds else: xdsstore = DaskMSStore(f'{basename}.xds') + xdsname = f'{basename}.xds' try: assert xdsstore.exists() except Exception as e: - raise ValueError(f"There must be an xds at {opts.xds}. " + raise ValueError(f"There must be an xds at {xdsname}. " f"Original traceback {e}") opts.xds = xdsstore.url OmegaConf.set_struct(opts, True) diff --git a/pfb/workers/clean.py b/pfb/workers/klean.py similarity index 97% rename from pfb/workers/clean.py rename to pfb/workers/klean.py index 0e26f16db..ad22ec77a 100644 --- a/pfb/workers/clean.py +++ b/pfb/workers/klean.py @@ -6,19 +6,19 @@ from omegaconf import OmegaConf import pyscilog pyscilog.init('pfb') -log = pyscilog.get_logger('CLEAN') +log = pyscilog.get_logger('KLEAN') from scabha.schema_utils import clickify_parameters from pfb.parser.schemas import schema # create default parameters from schema defaults = {} -for key in schema.clean["inputs"].keys(): - defaults[key.replace("-", "_")] = schema.clean["inputs"][key]["default"] +for key in schema.klean["inputs"].keys(): + defaults[key.replace("-", "_")] = schema.klean["inputs"][key]["default"] @cli.command(context_settings={'show_default': True}) -@clickify_parameters(schema.clean) -def clean(**kw): +@clickify_parameters(schema.klean) +def klean(**kw): ''' Modified single-scale clean. ''' @@ -26,7 +26,7 @@ def clean(**kw): opts = OmegaConf.create(defaults) import time timestamp = time.strftime("%Y%m%d-%H%M%S") - pyscilog.log_to_file(f'clean_{timestamp}.log') + pyscilog.log_to_file(f'klean_{timestamp}.log') if opts.nworkers is None: if opts.scheduler=='distributed': @@ -46,10 +46,10 @@ def clean(**kw): for key in opts.keys(): print(' %25s = %s' % (key, opts[key]), file=log) - return _clean(**opts) + return _klean(**opts) -def _clean(ddsi=None, **kw): +def _klean(ddsi=None, **kw): opts = OmegaConf.create(kw) # always combine over ds during cleaning opts['mean_ds'] = True diff --git a/pfb/workers/main.py b/pfb/workers/main.py index aea0948ef..bad6a9152 100644 --- a/pfb/workers/main.py +++ b/pfb/workers/main.py @@ -7,7 +7,6 @@ def cli(): pass -from pfb.workers import (init, grid, degrid, - clean, restore, fwdbwd, - spotless, model2comps, - forward, fastim, smoovie) +from pfb.workers import (init, grid, degrid, klean, + restore, spotless, model2comps, + fluxmop, fastim, smoovie) diff --git a/pfb/workers/restore.py b/pfb/workers/restore.py index afbbd4b8e..616551eb0 100644 --- a/pfb/workers/restore.py +++ b/pfb/workers/restore.py @@ -51,6 +51,7 @@ def _restore(**kw): from pfb.utils.fits import (save_fits, add_beampars, set_wcs, dds2fits, dds2fits_mfs) from pfb.utils.misc import Gaussian2D, fitcleanbeam, convolve2gaussres, dds2cubes + from ducc0.fft import c2c basename = f'{opts.output_filename}_{opts.product.upper()}' dds_name = f'{basename}_{opts.postfix}.dds' @@ -78,7 +79,8 @@ def _restore(**kw): # stack cubes dirty, model, residual, psf, _, _, wsums, _ = dds2cubes(dds, nband, - apparent=True) + apparent=True, + dual=False) wsum = np.sum(wsums) output_type = dirty.dtype fmask = wsums > 0 @@ -154,6 +156,32 @@ def _restore(**kw): hdr, overwrite=opts.overwrite) + if 'f' in opts.outputs: + rhat_mfs = c2c(residual_mfs, forward=True, + nthreads=opts.nvthreads, inorm=0) + rhat_mfs = np.fft.fftshift(rhat_mfs) + save_fits(np.abs(rhat_mfs), + f'{basename}_{opts.postfix}.abs_fft_residual_mfs.fits', + hdr_mfs, + overwrite=opts.overwrite) + save_fits(np.angle(rhat_mfs), + f'{basename}_{opts.postfix}.phase_fft_residual_mfs.fits', + hdr_mfs, + overwrite=opts.overwrite) + + if 'F' in opts.outputs: + rhat = c2c(residual, axes=(1,2), forward=True, + nthreads=opts.nvthreads, inorm=0) + rhat = np.fft.fftshift(rhat, axes=(1,2)) + save_fits(np.abs(rhat), + f'{basename}_{opts.postfix}.abs_fft_residual.fits', + hdr, + overwrite=opts.overwrite) + save_fits(np.angle(rhat), + f'{basename}_{opts.postfix}.phase_fft_residual.fits', + hdr, + overwrite=opts.overwrite) + if 'd' in opts.outputs: dirty_mfs = np.sum(dirty, axis=0) save_fits(dirty_mfs, diff --git a/pfb/workers/smoovie.py b/pfb/workers/smoovie.py index 20319044d..79c155ef1 100644 --- a/pfb/workers/smoovie.py +++ b/pfb/workers/smoovie.py @@ -262,6 +262,15 @@ def plot_frame(frame): # this should preserve order progress(futures) results = client.gather(futures) + + # drop empty frames + nframes = len(results) + for i, res in enumerate(results): + if not res[1]: + results.pop(i) + nframeso = len(results) + print(f"Dropped {nframes - nframeso} empty frames in band {b}", file=log) + # get median rms medrms = np.median([res[1] for res in results]) # replace rms with medrms and add metadata @@ -278,16 +287,19 @@ def plot_frame(frame): # 4 - frame fraction # 5 - band id - # TODO - progressbar + # TODO: + # - progressbar + # - investigate writing frames to disk as xarray dataset and passing instead of frames idfy = f'fps{opts.fps}_tbin{opts.time_bin}_fbin{opts.freq_bin}' if opts.out_format.lower() == 'gif': outim = stream( results, renderer=plot_frame, intro_title=f"{opts.outname}-Band{b:04d}", - optimize=True, + optimize=opts.optimize, threads_per_worker=1, fps=opts.fps, + max_frames=-1, uri=f'{opts.outname}_band{b}_{idfy}.gif' ) elif opts.out_format.lower() == 'mp4': @@ -295,9 +307,10 @@ def plot_frame(frame): results, renderer=plot_frame, intro_title=f"{opts.outname}-Band{b:04d}", - # optimize=True, + write_kwargs={'crf':opts.crf}, threads_per_worker=1, fps=opts.fps, + max_frames=-1, uri=f'{opts.outname}_band{b}_{idfy}.mp4' ) else: diff --git a/pfb/workers/spotless.py b/pfb/workers/spotless.py index f2f68de98..f6bfd4b4e 100644 --- a/pfb/workers/spotless.py +++ b/pfb/workers/spotless.py @@ -214,6 +214,7 @@ def _spotless(ddsi=None, **kw): # get clean beam area to convert residual units during l1reweighting # TODO - could refine this with comparison between dirty and restored # if contiuing the deconvolution + print("Fitting PSF", file=log) GaussPar = fitcleanbeam(psf_mfs[None], level=0.5, pixsize=1.0)[0] pix_per_beam = GaussPar[0]*GaussPar[1]*np.pi/4 print(f"Number of pixels per beam estimated as {pix_per_beam}", @@ -413,7 +414,7 @@ def _spotless(ddsi=None, **kw): if rms > rmsp: diverge_count += 1 - if diverge_count > 3: + if diverge_count > opts.diverge_count: print("Algorithm is diverging. Terminating.", file=log) break diff --git a/setup.py b/setup.py index bd11be49d..8f9b31353 100644 --- a/setup.py +++ b/setup.py @@ -13,29 +13,29 @@ 'pyscilog >= 0.1.2', 'Click', "ducc0", - "QuartiCal" - "@git+https://github.com/ratt-ru/QuartiCal.git" - "@stimela_upgrade", + "QuartiCal >= 0.2.3", + # "@git+https://github.com/ratt-ru/QuartiCal.git" + # "@v0.2.3-fix-ver", "sympy", - "stimela >= 2.0rc14", - "streamjoy", + "stimela >= 2.0rc18", + "streamjoy >= 0.0.8", "tbb", "jax[cpu]", - "codex-africanus[complete]" - "@git+https://github.com/ratt-ru/codex-africanus.git" - "@master" + # "codex-africanus[complete]" + # "@git+https://github.com/ratt-ru/codex-africanus.git" + # "@master" ] setup( - name='pfb-clean', + name='pfb-imaging', version=pfb.__version__, author="Landman Bester", author_email="lbester@sarao.ac.za", - description="Pre-conditioned forward-backward CLEAN algorithm", + description="Radio interferometric imaging suite base on the pre-conditioned forward-backward algorithm", long_description=long_description, long_description_content_type="text/markdown", - url="https://github.com/ratt-ru/pfb-clean", + url="https://github.com/ratt-ru/pfb-imaging", packages=find_packages(), include_package_data=True, zip_safe=False, diff --git a/tbump.toml b/tbump.toml index 7b29ad549..f6412cc76 100644 --- a/tbump.toml +++ b/tbump.toml @@ -1,5 +1,5 @@ # Uncomment this if your project is hosted on GitHub: -github_url = "https://github.com/ratt-ru/pfb-clean/" +github_url = "https://github.com/ratt-ru/pfb-imaging/" [version] current = "0.0.1" diff --git a/tests/test_clean.py b/tests/test_klean.py similarity index 90% rename from tests/test_clean.py rename to tests/test_klean.py index 31c744203..e5683fdce 100644 --- a/tests/test_clean.py +++ b/tests/test_klean.py @@ -8,7 +8,7 @@ pmp = pytest.mark.parametrize @pmp('do_gains', (True, False)) -def test_clean(do_gains, ms_name): +def test_klean(do_gains, ms_name): ''' Here we test that clean correctly infers the fluxes of point sources placed at the centers of pixels in the presence of the wterm and DI gain @@ -215,30 +215,30 @@ def test_clean(do_gains, ms_name): from pfb.workers.grid import _grid dds = _grid(xdsi=xds, **grid_args) - # run clean - clean_args = {} - for key in schema.clean["inputs"].keys(): - clean_args[key.replace("-", "_")] = schema.clean["inputs"][key]["default"] - clean_args["output_filename"] = outname - clean_args["postfix"] = postfix - clean_args["nband"] = nchan - clean_args["dirosion"] = 0 - clean_args["do_residual"] = False - clean_args["nmiter"] = 100 + # run klean + klean_args = {} + for key in schema.klean["inputs"].keys(): + klean_args[key.replace("-", "_")] = schema.klean["inputs"][key]["default"] + klean_args["output_filename"] = outname + klean_args["postfix"] = postfix + klean_args["nband"] = nchan + klean_args["dirosion"] = 0 + klean_args["do_residual"] = False + klean_args["nmiter"] = 100 threshold = 1e-5 - clean_args["threshold"] = threshold - clean_args["gamma"] = 0.1 - clean_args["peak_factor"] = 0.75 - clean_args["sub_peak_factor"] = 0.75 - clean_args["nthreads"] = 8 - clean_args["nvthreads"] = 8 - clean_args["scheduler"] = 'sync' - clean_args["do_wgridding"] = True - clean_args["epsilon"] = epsilon - clean_args["mop_flux"] = True - clean_args["fits_mfs"] = False - from pfb.workers.clean import _clean - _clean(ddsi=dds, **clean_args) + klean_args["threshold"] = threshold + klean_args["gamma"] = 0.1 + klean_args["peak_factor"] = 0.75 + klean_args["sub_peak_factor"] = 0.75 + klean_args["nthreads"] = 8 + klean_args["nvthreads"] = 8 + klean_args["scheduler"] = 'sync' + klean_args["do_wgridding"] = True + klean_args["epsilon"] = epsilon + klean_args["mop_flux"] = True + klean_args["fits_mfs"] = False + from pfb.workers.klean import _klean + _klean(ddsi=dds, **klean_args) # get inferred model basename = f'{outname}_I' diff --git a/tests/test_spotless.py b/tests/test_spotless.py index 137fca49c..d24806495 100644 --- a/tests/test_spotless.py +++ b/tests/test_spotless.py @@ -173,7 +173,7 @@ def test_spotless(ms_name): writes = xds_to_zarr(dds, dds_name, columns='ALL') dask.compute(writes) - # run clean + # run spotless spotless_args = {} for key in schema.spotless["inputs"].keys(): spotless_args[key.replace("-", "_")] = schema.spotless["inputs"][key]["default"]