From 24111747361bd95edafac541c0dcd58c3f2538aa Mon Sep 17 00:00:00 2001
From: pthouvenin <pierreantoine.thouvenin@gmail.com>
Date: Tue, 31 Oct 2023 16:08:35 +0100
Subject: [PATCH] clean: Remove unused functionalities

---
 docs/coverage.svg                         |   2 +-
 docs/docstr_coverage_badge.svg            |   8 +-
 docs/source/index.rst                     |  18 +-
 run_tests.sh                              |   2 +-
 src/dsgs/operators/data.py                | 226 +-----
 src/dsgs/utils/checkpoint.py              |  78 +-
 src/dsgs/utils/communications.py          | 830 ----------------------
 src/dsgs/utils/communicators.py           |  14 -
 tests/mpi/test_mpi_inpainting_operator.py | 212 ------
 tests/operators/test_inpainting_model.py  |  59 --
 tests/utils/test_communications.py        |  25 -
 tests/utils/test_communicators.py         |   1 -
 12 files changed, 43 insertions(+), 1432 deletions(-)
 delete mode 100644 tests/mpi/test_mpi_inpainting_operator.py
 delete mode 100644 tests/operators/test_inpainting_model.py

diff --git a/docs/coverage.svg b/docs/coverage.svg
index 55cbdd3..ce67c48 100644
--- a/docs/coverage.svg
+++ b/docs/coverage.svg
@@ -1 +1 @@
-<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="114" height="20" role="img" aria-label="coverage: 78.85%"><title>coverage: 78.85%</title><linearGradient id="s" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><clipPath id="r"><rect width="114" height="20" rx="3" fill="#fff"/></clipPath><g clip-path="url(#r)"><rect width="61" height="20" fill="#555"/><rect x="61" width="53" height="20" fill="#97ca00"/><rect width="114" height="20" fill="url(#s)"/></g><g fill="#fff" text-anchor="middle" font-family="Verdana,Geneva,DejaVu Sans,sans-serif" text-rendering="geometricPrecision" font-size="110"><text aria-hidden="true" x="315" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="510">coverage</text><text x="315" y="140" transform="scale(.1)" fill="#fff" textLength="510">coverage</text><text aria-hidden="true" x="865" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="430">78.85%</text><text x="865" y="140" transform="scale(.1)" fill="#fff" textLength="430">78.85%</text></g></svg>
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="114" height="20" role="img" aria-label="coverage: 90.54%"><title>coverage: 90.54%</title><linearGradient id="s" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><clipPath id="r"><rect width="114" height="20" rx="3" fill="#fff"/></clipPath><g clip-path="url(#r)"><rect width="61" height="20" fill="#555"/><rect x="61" width="53" height="20" fill="#4c1"/><rect width="114" height="20" fill="url(#s)"/></g><g fill="#fff" text-anchor="middle" font-family="Verdana,Geneva,DejaVu Sans,sans-serif" text-rendering="geometricPrecision" font-size="110"><text aria-hidden="true" x="315" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="510">coverage</text><text x="315" y="140" transform="scale(.1)" fill="#fff" textLength="510">coverage</text><text aria-hidden="true" x="865" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="430">90.54%</text><text x="865" y="140" transform="scale(.1)" fill="#fff" textLength="430">90.54%</text></g></svg>
\ No newline at end of file
diff --git a/docs/docstr_coverage_badge.svg b/docs/docstr_coverage_badge.svg
index 447cc19..b8efa09 100644
--- a/docs/docstr_coverage_badge.svg
+++ b/docs/docstr_coverage_badge.svg
@@ -8,13 +8,13 @@
     </clipPath>
     <g clip-path="url(#r)">
         <rect width="99" height="20" fill="#555"/>
-        <rect x="99" width="43" height="20" fill="#97CA00"/>
+        <rect x="99" width="43" height="20" fill="#4c1"/>
         <rect width="142" height="20" fill="url(#s)"/>
     </g>
     <g fill="#fff" text-anchor="middle" font-family="Verdana,Geneva,DejaVu Sans,sans-serif" font-size="110">
         <text x="505" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="890">docstr-coverage</text>
         <text x="505" y="140" transform="scale(.1)" textLength="890">docstr-coverage</text>
-        <text x="1195" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)">92%</text>
-        <text x="1195" y="140" transform="scale(.1)">92%</text>
+        <text x="1195" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)">99%</text>
+        <text x="1195" y="140" transform="scale(.1)">99%</text>
     </g>
-</svg>
+</svg>
\ No newline at end of file
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 276a65c..a4dfd3a 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -1,14 +1,9 @@
-Welcome to dsgs documentation
-==============================
+``dsgs`` documentation
+======================
 
-This library is focussed on the Python implementation of a distributed Split-Gibbs sampler (SGS) :cite:p:`Vono2019tsp`, with specific applications to imaging inverse problems.
+This library provides the Python implementation of a distributed Split-Gibbs sampler (SGS) :cite:p:`Vono2019tsp`, with specific applications to imaging inverse problems.
 
-The library currently contains codes to reproduce the experiments reported in :cite:p:`Thouvenin2022submitted`.
-
-
-.. warning::
-
-   The code provided in this archive.
+The library currently contains codes to reproduce the experiments reported in :cite:p:`Thouvenin2023`.
 
 
 .. toctree::
@@ -19,16 +14,13 @@ The library currently contains codes to reproduce the experiments reported in :c
    biblio
 
 
-
 .. toctree::
    :maxdepth: 1
    :caption: Development
 
    autoapi/index
-..    Code coverage report<../coverage_html_report/index.html#http://>
-..    License<../../../LICENSE#http://>
    Gitlab repository<https://gitlab.cristal.univ-lille.fr/pthouven/dsgs>
-..    Code quality<../wily_report.html#http://>
+
 
 Indices and tables
 ==================
diff --git a/run_tests.sh b/run_tests.sh
index dee2597..4cfed9d 100644
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -3,7 +3,7 @@ session_name=test
 
 tmux new -d -s ${session_name}
 # tmux send-keys -t ${session_name}.0 "echo a=${a}; echo b=${b}" ENTER
-tmux send-keys -t ${session_name}.0 "conda activate async_sampling; \
+tmux send-keys -t ${session_name}.0 "conda activate dsgs; \
 export NUMBA_DISABLE_JIT=1; \
 coverage run -m pytest; \
 coverage html; \
diff --git a/src/dsgs/operators/data.py b/src/dsgs/operators/data.py
index 1c546a3..8d9f38c 100644
--- a/src/dsgs/operators/data.py
+++ b/src/dsgs/operators/data.py
@@ -22,7 +22,7 @@ from dsgs.operators.convolutions import fft_conv
 from dsgs.operators.distributed_convolutions import create_local_to_global_slice
 
 
-def get_image(imagefilename, M=None):
+def get_image(imagefilename, M=None): # pragma: no cover
     r"""Load an image from a .png of .h5 file, ensuring all pixel values are
     nonnegative.
 
@@ -58,7 +58,7 @@ def get_image(imagefilename, M=None):
     return x
 
 
-def generate_2d_gaussian_kernel(kernel_size, kernel_std):
+def generate_2d_gaussian_kernel(kernel_size, kernel_std): # pragma: no cover
     r"""Generate a square normalized 2D Gaussian kernel.
 
     Parameters
@@ -84,190 +84,9 @@ def generate_2d_gaussian_kernel(kernel_size, kernel_std):
     return h
 
 
-def generate_random_mask(image_size, percent, rng):
-    r"""Generate a random inpainting mask.
-
-    Parameters
-    ----------
-    image_size : numpy.ndarray of int
-        Shape of the image to be masked.
-    percent : float
-        Fraction of masked pixels, ``1-p`` corresponding to the fraction of
-        observed pixels in the image (``p`` needs to be nonnegative, smaller
-        than or equal to 1).
-    rng : numpy.random.Generator
-        Numpy random number generator.
-
-    Returns
-    -------
-    mask : numpy.ndarray of bool
-        Masking operator such that ``mask.shape == image_size``.
-
-    Raises
-    ------
-    ValueError
-        Fraction of observed pixels ``percent`` should be such that
-        :math:`0 \leq p \leq 1`.
-    """
-    if percent < 0 or percent > 1:
-        raise ValueError(
-            "Fraction of observed pixels percent should be such that: 0 <= percent <= 1."
-        )
-
-    N = np.prod(image_size)
-    masked_id = np.unravel_index(
-        rng.choice(N, (percent * N).astype(int), replace=False), image_size
-    )
-    mask = np.full(image_size, True, dtype=bool)
-    mask[masked_id] = False
-
-    return mask
-
-
-def generate_local_poisson_inpainting_data(
-    comm, cartcomm, grid_size, ranknd, tile, local_mask, tile_pixels
-):
-    """_summary_
-
-    _extended_summary_
-
-    Parameters
-    ----------
-    comm : _type_
-        _description_
-    cartcomm : _type_
-        _description_
-    grid_size : _type_
-        _description_
-    ranknd : _type_
-        _description_
-    tile : _type_
-        _description_
-    local_mask : _type_
-        _description_
-    tile_pixels : _type_
-        _description_
-
-    Returns
-    -------
-    _type_
-        _description_
-    """
-    # * communicator info
-    size = comm.Get_size()
-    rank = comm.Get_rank()
-
-    # * parallel rng
-    child_seed = None
-    if rank == 0:
-        ss = np.random.SeedSequence(1234)
-        child_seed = ss.spawn(size)
-    local_seed = comm.scatter(child_seed, root=0)
-    local_rng = np.random.default_rng(local_seed)
-
-    # * tile size and indices in the global image / data
-    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
-    local_data_size = tile_size
-
-    # * generate masked data
-    Hx = np.zeros(tile_size, dtype="d")
-    Hx[local_mask] = tile[local_mask]
-    local_data = np.zeros(tile_size, dtype="d")
-    local_data[local_mask] = local_rng.poisson(Hx[local_mask])
-
-    # * compute number of observations across all the workers
-    global_data_size = np.zeros(1, dtype="i")
-    local_size = np.sum(local_mask, dtype="i")
-    comm.Allreduce(local_size, global_data_size, op=MPI.SUM)
-
-    # slice for indexing into global arrays
-    global_slice_tile = create_local_to_global_slice(
-        tile_pixels,
-        ranknd,
-        np.zeros(tile_size.size, dtype="i"),
-        local_data_size,
-    )
-    return local_data, Hx, global_slice_tile, global_data_size
-
-
-def generate_local_gaussian_inpainting_data(
-    comm, cartcomm, grid_size, ranknd, tile, local_mask, tile_pixels, isnr
-):
-    """_summary_
-
-    _extended_summary_
-
-    Parameters
-    ----------
-    comm : _type_
-        _description_
-    cartcomm : _type_
-        _description_
-    grid_size : _type_
-        _description_
-    ranknd : _type_
-        _description_
-    tile : _type_
-        _description_
-    local_mask : _type_
-        _description_
-    tile_pixels : _type_
-        _description_
-    isnr : bool
-        _description_
-
-    Returns
-    -------
-    _type_
-        _description_
+def mpi_load_image_from_h5(comm, grid_size, ranknd, filename, image_size, overlap_size): # pragma: no cover
+    """Loading an image from a `.h5` file using MPI.
     """
-    # * communicator info
-    size = comm.Get_size()
-    rank = comm.Get_rank()
-
-    # * parallel rng
-    child_seed = None
-    if rank == 0:
-        ss = np.random.SeedSequence(1234)
-        child_seed = ss.spawn(size)
-    local_seed = comm.scatter(child_seed, root=0)
-    local_rng = np.random.default_rng(local_seed)
-
-    # * tile size and indices in the global image / data
-    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
-    local_data_size = tile_size
-
-    # * generate masked data
-    Hx = np.zeros(tile_size, dtype="d")
-    Hx[local_mask] = tile[local_mask]
-
-    # * compute noise variance across all workers
-    sqnorm_Hx = np.zeros(1, dtype="d")
-    local_sqnorm_Hx = np.sum(Hx[local_mask] ** 2)
-    global_data_size = np.zeros(1, dtype="i")
-    local_size = np.sum(local_mask, dtype="i")
-
-    comm.Allreduce(local_sqnorm_Hx, sqnorm_Hx, op=MPI.SUM)
-    comm.Allreduce(local_size, global_data_size, op=MPI.SUM)
-    sig2 = 10 ** (-isnr / 10) * sqnorm_Hx / global_data_size
-
-    # * generate noisy data
-    local_data = np.zeros(tile_size, dtype="d")
-    local_data[local_mask] = Hx[local_mask] + np.sqrt(sig2) * local_rng.standard_normal(
-        size=local_size
-    )
-
-    # * slice for indexing into global arrays
-    global_slice_tile = create_local_to_global_slice(
-        tile_pixels,
-        ranknd,
-        np.zeros(tile_size.size, dtype="i"),
-        local_data_size,
-    )
-    return local_data, Hx, global_slice_tile, global_data_size, sig2
-
-
-def mpi_load_image_from_h5(comm, grid_size, ranknd, filename, image_size, overlap_size):
     ndims = image_size.size
 
     # * useful sizes
@@ -316,7 +135,9 @@ def mpi_load_image_from_h5(comm, grid_size, ranknd, filename, image_size, overla
 
 def mpi_write_image_to_h5(
     comm, grid_size, ranknd, filename, image_size, local_slice_tile, facet, var_name
-):
+): # pragma: no cover
+    """Write an image to a `.h5` file using MPI.
+    """
     ndims = image_size.size
 
     # * useful sizes
@@ -340,9 +161,23 @@ def mpi_write_image_to_h5(
     pass
 
 
-def generate_data(x, h):
-    # serial version, linear convolution
+def generate_data(x, h): # pragma: no cover
+    """Generate blurred image corrupted by Poisson noise.
+
+    Parameters
+    ----------
+    x : numpy.ndarray
+        Input image.
+    h : numpy.ndarray
+        Convolution kernel.
 
+    Returns
+    -------
+    data : 
+        Degraded observations of `x` (convolution + noise).
+    Hx : 
+        Noise-free observation of the blurred image (convolution only).
+    """    
     # * rng
     rng = np.random.default_rng(1234)
 
@@ -364,7 +199,9 @@ def generate_data(x, h):
 
 def generate_local_data(
     comm, cartcomm, grid_size, ranknd, facet, h, image_size, tile_pixels, backward=False
-):
+): # pragma: no cover
+    """Generate a blurred image corrupted by Poisson noise in a distributed.
+    setting using MPI."""
     # distributed version, linear convolution
 
     # * communicator info
@@ -383,11 +220,6 @@ def generate_local_data(
     local_rng = np.random.default_rng(local_seed)
 
     # * facet / tile size and indices in the global image / data
-    # tile_pixels = ucomm.local_split_range_nd(grid_size, image_size, ranknd)
-    # facet_pixels = ucomm.local_split_range_nd(
-    #     grid_size, image_size, ranknd, overlap=overlap_size
-    # )
-    # facet_size = facet_pixels[:, 1] - facet_pixels[:, 0] + 1
     tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
     facet_size = np.array(facet.shape, dtype="i")
 
@@ -450,7 +282,8 @@ def generate_local_data(
 
 def mpi_save_data_to_h5(
     comm, filename, data_size, local_data, local_clean_data, global_slice_data
-):
+): # pragma: no cover
+    """Saving data to an `.h5` file with MPI."""
     f = h5py.File(filename, "r+", driver="mpio", comm=comm)
     dset = f.create_dataset("data", data_size, dtype="d")
     dset[global_slice_data] = local_data
@@ -472,7 +305,8 @@ def mpi_load_data_from_h5(
     overlap_size,
     var_name="data",
     backward=True,
-):
+): # pragma: no cover
+    """Loading data from an existing `.h5` file using MPI."""
     ndims = data_size.size
 
     local_slice = tuple(ndims * [np.s_[:]])
diff --git a/src/dsgs/utils/checkpoint.py b/src/dsgs/utils/checkpoint.py
index c654747..75f3c04 100644
--- a/src/dsgs/utils/checkpoint.py
+++ b/src/dsgs/utils/checkpoint.py
@@ -17,9 +17,6 @@ from os.path import join
 import h5py
 import numpy as np
 
-# import hdf5plugin
-# from mpi4py import MPI
-
 
 def extract_rng_state(rng):
     r"""Extract the state of a random number generator in the form of two
@@ -298,7 +295,6 @@ class SerialCheckpoint(BaseCheckpoint):
 
             # * backup other variables
             for count, (var_name, var) in enumerate(kwargs.items()):
-                # print("%s, %s = %s" % (count, var_name, var))
                 # ! only allow numpy.ndarray or scalar integers
                 if isinstance(var, np.ndarray):
                     if var.size > 1:
@@ -356,7 +352,6 @@ class SerialCheckpoint(BaseCheckpoint):
 
             for count, var in enumerate(args):
                 dic_var[var] = f[var][select[count]]
-            # {var: value for var in args}
 
         return dic_var
 
@@ -464,8 +459,6 @@ class DistributedCheckpoint(BaseCheckpoint):
             List of keyword arguments reprensenting Python variables to be
             saved. Only allows numpy.ndarray or integers.
         """
-        # TODO: add case of variables to be saved from a single core
-        # TODO- (typically the root)
         filename_ = self.filename(file_id)
 
         with h5py.File(
@@ -633,7 +626,7 @@ class DistributedCheckpoint(BaseCheckpoint):
                 )
 
             for count, var in enumerate(args):
-                dic_var[var] = f[var][select[count]]  # (np.s_[-1], *select[count])
+                dic_var[var] = f[var][select[count]]
 
         return dic_var
 
@@ -682,76 +675,9 @@ class DistributedCheckpoint(BaseCheckpoint):
                     )
 
                 for count, var in enumerate(args):
-                    dic_var[var] = f[var][select[count]]  # np.s_[-1], *select[count]
+                    dic_var[var] = f[var][select[count]]
                     # ! fixing error with h5py: load data in the native byteorder
                     # https://numpy.org/doc/stable/reference/generated/numpy.dtype.newbyteorder.html
                     # dic_var[var].dtype = dic_var[var].dtype.newbyteorder("=")
 
         return dic_var
-
-
-# if __name__ == "__main__":
-
-#     from mpi4py import MPI
-
-#     # * crude debug serial checkpoint
-#     # chkpt = SerialCheckpoint("test")
-#     # c = np.ones((2, 2))
-#     # chkpt.save(1, [None, None, None], rng=None, a=3, b=4, c=c)
-#     # dic_var = chkpt.load(1, 3 * [np.s_[:]], None, "a", "b", "c")
-#     # consistency = np.allclose(3, dic_var["a"])
-#     # test syntax h5py in absence of chunking (chunks=None)
-#     # syntax is fine
-#     # f = h5py.File("test_chunk.h5", "w")
-#     # dset = f.create_dataset("chunked", (1000, 1000), chunks=None)
-#     # dset[:] = np.ones((1000, 1000))
-#     # f.close()
-#     # f = h5py.File("test_chunk.h5", "r")
-#     # a = f["chunked"][:]
-#     # f.close()
-#     # print("Done")
-#     # * crude debug parallel checkpoint
-#     # TODO: test in realistic conditions, e.g., with convolution operator
-#     # comm = MPI.COMM_WORLD
-#     # size = comm.Get_size()
-#     # rank = comm.Get_rank()  # The process ID (integer 0-3 for 4-process run)
-#     # print("Process {}: world size={}".format(rank, size))
-#     # if rank == 0:
-#     #     ss = np.random.SeedSequence(1234)
-#     #     # Spawn off nworkers child SeedSequences to pass to child processes.
-#     #     child_seed = np.array(ss.spawn(size))
-#     # else:
-#     #     child_seed = None
-#     # local_seed = comm.scatter(child_seed, root=0)
-#     # local_rng = np.random.default_rng(local_seed)
-#     # local_dic0 = local_rng.__getstate__()
-#     # print(
-#     #     "Process {}: child_seed[{}], entropy={}, spawn_key={}".format(
-#     #         rank, rank, local_seed.entropy, local_seed.spawn_key
-#     #     )
-#     # )
-#     # dchkpt = DistributedCheckpoint(comm, "distributed_test")
-#     # # * backup for rng state
-#     # dchkpt.save(1, None, None, rng=local_rng)
-#     # a = local_rng.normal()
-#     # print("Process {}: a={}".format(rank, a))
-#     # # * load (warm-start for rng state)
-#     # dchkpt.load(1, None, local_rng)
-#     # b = local_rng.normal()
-#     # print("Process {}: a={}, b={}, b=a? {}".format(rank, a, b, np.allclose(b, a)))
-#     # # Check a=b on all processes
-#     # local_consistency_check = np.array([np.allclose(b, a)])
-#     # global_consistency_check = np.array([False])
-#     # # Reduce "local_consistency_check" on the root
-#     # comm.Reduce(
-#     #     [local_consistency_check, MPI.C_BOOL],
-#     #     [global_consistency_check, MPI.C_BOOL],
-#     #     op=MPI.LAND,
-#     #     root=0,
-#     # )
-#     # if rank == 0:
-#     #     print("Communication correctness: {}".format(global_consistency_check))
-#     # MPI.Finalize()
-
-
-# mpiexec -n 2 python -m mpi4py dsgs/utils/checkpoint.py
diff --git a/src/dsgs/utils/communications.py b/src/dsgs/utils/communications.py
index 8c876bc..6c0078a 100644
--- a/src/dsgs/utils/communications.py
+++ b/src/dsgs/utils/communications.py
@@ -13,10 +13,6 @@ from collections import deque
 import numpy as np
 from mpi4py import MPI
 
-# TODO: investigate
-# [persistent communications](https://mpi4py.readthedocs.io/en/stable/overview.
-# html#persistent-communications)
-
 
 def split_range(nchunks, N, overlap=0, backward=True):
     r"""Tessellates :math:`\{ 0, \dotsc , N-1 \}` into multiple subsets.
@@ -206,131 +202,6 @@ def local_split_range_nd(nchunks, N, index, overlap=None, backward=True):
     return np.squeeze(rg)
 
 
-# ! WARNING: may result in empty tiles, thus changing the total number of
-# ! facets
-def rebalance_tile_nd(
-    tile_indices, ranknd, N, grid_size, overlap_size
-):  # pragma: no cover
-    r""" "Resize the local tiles to rebalance data sizes across the grid.
-
-    Resize the local tiles across the grid to compensate for the increase in
-    the data size for border facets.
-
-    Parameters
-    ----------
-    tile_indices : numpy.ndarray[int]
-        Indices of the pixels from the full array composing the tile (first
-        point, last point). Shape: ``(ndims, 2)``.
-    ranknd : numpy.ndarray[int]
-        Rank of the current process (multiple dimensions).
-    N : numpy.ndarray[int]
-        Total number of indices along each dimension.
-    grid_size : numpy.ndarray[int]
-        Size of the process grid.
-    overlap_size : numpy.ndarray[int]
-        Overlap size between consecutive facets across the grid along each
-        dimension.
-
-    Returns
-    -------
-    numpy.ndarray[int]
-        Updated indices defining the local tile: shape ``(ndims, 2)``.
-    """
-
-    # safeguard: trigger rebalance only if the data chunk associated with the
-    # last facet is expected to be twice as large as a tile
-    ndims = ranknd.size
-    ideal_size = np.floor(N / grid_size + overlap_size)
-    tile_size = tile_indices[:, 1] - tile_indices[:, 0] + 1
-
-    for d in range(ndims):
-        if (
-            overlap_size[d] > 0
-            and grid_size[d] > 1
-            and ideal_size[d] > 2 * tile_size[d]
-        ):
-            if overlap_size[d] < grid_size[d]:
-                offset = 1
-                # increase size of the first overlap_size[d] tiles along dim. d
-                if ranknd[d] < overlap_size[d] - 1:
-                    tile_indices[d, 0] += ranknd[d] * offset
-                    tile_indices[d, 1] += (ranknd[d] + 1) * offset
-            else:
-                # increase size of the first grid_size[d]-1 along dim. d
-                offset = np.floor(overlap_size[d] / grid_size[d])
-                if ranknd[d] < grid_size[d] - 1:
-                    tile_indices[d, 0] += ranknd[d] * offset
-                    tile_indices[d, 1] += (ranknd[d] + 1) * offset
-                else:
-                    tile_indices[d, 0] += (
-                        ranknd[d] * offset
-                    )  # ! only modify starting point (last facet)
-
-    return tile_indices
-
-
-def local_split_range_symmetric_nd(
-    nchunks, N, index, overlap_pre=np.array([0]), overlap_post=np.array([0])
-):
-    r"""Return the portion of :math:`\{ 0, \dotsc , N-1 \}` (nD range of
-    indices) handled by a process (symmetric version).
-
-    Return the portion of :math:`\{ 0, \dotsc , N-1 \}`, tesselated into
-    (non-overlapping subsets along each dimension, owned by a process.
-
-    Parameters
-    ----------
-    nchunks : numpy.ndarray[int]
-        Number of nd-segments along each dimension.
-    N : numpy.ndarray[int]
-        Total number of indices along each dimension.
-    index : numpy.ndarray[int]
-        Index of the process.
-    overlap_pre : numpy.ndarray[int], optional
-        Defines overlap with preceding segment, by default ``np.array([0])``.
-    overlap_post : numpy.ndarray[int], optional
-        Defines overlap with following segment, by default ``np.array([0])``.
-
-    Returns
-    -------
-    numpy.ndarray[int]
-        Start and end index of the nD segment along each dimension. Shape:
-        ``(ndims, 2)``.
-
-    Raises
-    ------
-    ValueError
-        Error if any index is greater than ``nchunks-1``.
-    ValueError
-        Error if any overlap is greater than the size of a segment.
-    """
-
-    # TODO: see how to make it compatible with a 1D implementation
-    if np.any(nchunks <= index):
-        raise ValueError(
-            r"Index should be taken in [0, ..., nchunks-1], with nchunks={0}".format(
-                nchunks
-            )
-        )
-    step = N / nchunks
-    if np.any(overlap_pre > np.floor(step)) or np.any(overlap_post > np.floor(step)):
-        raise ValueError(r"More than 100% overlap between two consecutive segments")
-    start = -1 + index * step
-    stop = (start + step).astype(np.int64)
-    start = start.astype(np.int64)
-    rg = np.concatenate(((start + 1)[:, None], stop[:, None]), axis=1)
-    rg[np.logical_and(index > 0, overlap_pre > 0), 0] = (
-        rg[np.logical_and(index > 0, overlap_pre > 0), 0]
-        - overlap_pre[np.logical_and(index > 0, overlap_pre > 0)]
-    )
-
-    rg[np.logical_and(index < nchunks - 1, overlap_post > 0), 1] = (
-        rg[np.logical_and(index < nchunks - 1, overlap_post > 0), 1]
-        + overlap_post[np.logical_and(index < nchunks - 1, overlap_post > 0)]
-    )
-    return np.squeeze(rg)
-
-
 def split_range_interleaved(nchunks, N):
     r"""Tessellates :math:`\{ 0, \dotsc , N-1 \}` into interleaved subsets.
 
@@ -439,7 +310,6 @@ def get_neighbour(ranknd, grid_size, disp):
     return np.ravel_multi_index(ranknd + disp, grid_size)
 
 
-# TODO: check changes for circular convolutions
 def slice_valid_coefficients(ranknd, grid_size, overlap_size):
     r"""Helper elements to extract the valid local convolution coefficients.
 
@@ -550,121 +420,6 @@ def get_local_slice(ranknd, grid_size, overlap_size, backward=True):
     return tuple(local_slice)
 
 
-def get_local_slice_border(ranknd, grid_size, overlap_size, border_size, backward=True):
-    r"""Slice to extract the pixels specific to a given worker.
-
-    Get the slice corresponding to the elements exclusively handled by the
-    current process (i.e., remove the overlap from overlapping facets).
-
-    Parameters
-    ----------
-    ranknd : numpy.ndarray[int]
-        Rank of the current process in the nD Cartesian grid of MPI processes.
-    grid_size : numpy.ndarray[int]
-        Size of the process grid
-    overlap_size : numpy.ndarray[int]
-        Size of the overlap between contiguous facets.
-    border_size : numpy.ndarray[int]
-        Size of the border added around the overall image (boundary extension).
-    backward : bool, optional
-        Orientation of the overlap along the dimensions, by default True.
-
-    Returns
-    -------
-    tuple[slice]
-        Slice to extract the coefficients specifically handled by the current
-        process.
-
-    Raises
-    ------
-    AssertionError
-        `ranknd`, `grid_size` and `overlap_size` must all have the save shape.
-    """
-
-    ndims = ranknd.size
-
-    if not (grid_size.size == ndims and overlap_size.size == ndims):
-        raise AssertionError(
-            r"`ranknd`, `grid_size` and `overlap_size` must have the save \
-                shape"
-        )
-
-    local_slice = ndims * [np.s_[:]]
-    isvalid_splitting = np.logical_and(grid_size > 1, border_size > 0)
-
-    # ! need to adjust offest due to boundary extension
-    if backward:
-        for d in range(ndims):
-            if isvalid_splitting[d]:
-                if ranknd[d] == 0:
-                    local_slice[d] = np.s_[border_size[d] :]
-                elif ranknd[d] < grid_size[d] - 1:
-                    local_slice[d] = np.s_[overlap_size[d] :]
-                else:  # ranknd[d] == grid_size[d] - 1
-                    local_slice[d] = np.s_[overlap_size[d] : -border_size[d]]
-    else:
-        for d in range(ndims):
-            if isvalid_splitting[d]:
-                if ranknd[d] == grid_size[d] - 1:
-                    local_slice[d] = np.s_[: -border_size[d]]
-                elif ranknd[d] > 0:
-                    local_slice[d] = np.s_[: -overlap_size[d]]
-                else:  # ranknd[d] == 0
-                    local_slice[d] = np.s_[border_size[d] : -overlap_size[d]]
-
-    return tuple(local_slice)
-
-
-# def get_border_slice(ranknd, grid_size, overlap_size, backward=True):
-#     r"""Slice to extract the border pixels on a given worker.
-
-#     Get the slice corresponding to the elements on the borders of the current process (i.e., retain only the overlap area from the facets).
-
-#     Parameters
-#     ----------
-#     ranknd : numpy.ndarray[int]
-#         Rank of the current process in the nD Cartesian grid of MPI processes.
-#     grid_size : numpy.ndarray[int]
-#         Size of the process grid
-#     overlap_size : numpy.ndarray[int]
-#         Size of the overlap between contiguous facets.
-#     backward : bool, optional
-#         Orientation of the overlap along the dimensions, by default True.
-
-#     Returns
-#     -------
-#     tuple[slice]
-#         Slice to extract the coefficients overlapping with neighbour processes.
-
-#     Raises
-#     ------
-#     AssertionError
-#         `ranknd`, `grid_size` and `overlap_size` must all have the save shape.
-#     """
-
-#     ndims = ranknd.size
-
-#     if not (grid_size.size == ndims and overlap_size.size == ndims):
-#         raise AssertionError(
-#             r"`ranknd`, `grid_size` and `overlap_size` must have the save \
-#                 shape"
-#         )
-
-#     border_slice = ndims * [np.s_[:]]
-#     isvalid_splitting = np.logical_and(grid_size > 1, overlap_size > 0)
-
-#     if backward:
-#         for d in range(ndims):
-#             if ranknd[d] > 0 and isvalid_splitting[d]:
-#                 border_slice[d] = np.s_[: overlap_size[d]]
-#     else:
-#         for d in range(ndims):
-#             if ranknd[d] < grid_size[d] - 1 and isvalid_splitting[d]:
-#                 border_slice[d] = np.s_[-overlap_size[d] :]
-
-#     return tuple(border_slice)
-
-
 def isvalid_communication(ranknd, grid_size, overlap_size, N, backward=True):
     r"""Check which of the possible communications, along each axis or
     combination of axes ("diagonal"), are valid.
@@ -935,413 +690,6 @@ def setup_border_update(
     return dest, src, resizedsendsubarray, resizedrecvsubarray
 
 
-def setup_border_circular_update(
-    cartcomm, ndims, itemsize, facet_size, overlap_size, ranknd, backward=True
-):
-    r"""Source, destination types and ranks to update facet borders (for
-    `double` format data).
-
-    Set-up destination and source data types and process ranks to communicate
-    facet borders within an nD Cartesian communicator. Diagonal communications
-    (involving more than a single dimension) are not separated from the other
-    communications.
-
-    Parameters
-    ----------
-    cartcomm : mpi4py.MPI.Cartcomm
-        Cartesian topology intracommunicator.
-    ndims : int
-        Number of dimensions of the Cartesian grid.
-    itemsize : int
-        Size in bytes of an item from the array to be sent.
-    facet_size : numpy.ndarray[int]
-        Size of the overlapping facets.
-    overlap_size : numpy.ndarray[int]
-        Overlap size between contiguous facets.
-    backward : bool, optional
-        Direction of the overlap along each axis, by default True.
-
-    Returns
-    -------
-    dest : list[int]
-        List of process ranks to which the current process sends data.
-    src : list[int]
-        List of process ranks from which the current process receives data.
-    resizedsendsubarray : list[MPI subarray]
-        Custom MPI subarray type describing the data array sent by the current
-        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_).
-    resizedrecvsubarray : list[MPI subarray]
-        Custom MPI subarray type describing the data array sent by the current
-        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_).
-
-    Note
-    ----
-    Function appropriate only for subarrays of type float (``numpy.float64``).
-    Will trigger a segfault error otherwise.
-    """
-
-    # * defining custom types to communicate non-contiguous arrays in the
-    # directions considered
-    sendsubarray = []
-    recvsubarray = []
-    resizedsendsubarray = []
-    resizedrecvsubarray = []
-
-    border_sendsubarray = []
-    border_recvsubarray = []
-    border_resizedsendsubarray = []
-    border_resizedrecvsubarray = []
-
-    sizes = facet_size  # size of local array
-    sM = sizes - overlap_size
-
-    # * rank of processes involved in the communications
-    src = ndims * [MPI.PROC_NULL]
-    dest = ndims * [MPI.PROC_NULL]
-
-    # * comm. along each dimension
-    # TODO: backward option to be adapted to the setting considered
-    if backward:
-        for k in range(ndims):
-            if overlap_size[k] > 0:
-                # ! if there is no overlap along a dimension, make sure there
-                # ! is no communication
-                [src[k], dest[k]] = cartcomm.Shift(k, 1)
-
-                # send buffer
-                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
-                # ! change send area for first worker along the dimension (will
-                # ! communicate with the last worker for border exchange)
-                if ranknd[k] == cartcomm.dims[k] - 1:
-                    starts = np.r_[
-                        np.zeros(k, dtype="d"),
-                        sM[k] - overlap_size[k],
-                        np.zeros(ndims - k - 1, dtype="d"),
-                    ]
-                else:
-                    starts = np.r_[
-                        np.zeros(k, dtype="d"),
-                        sM[k],
-                        np.zeros(ndims - k - 1, dtype="d"),
-                    ]
-                sendsubarray.append(
-                    MPI.DOUBLE.Create_subarray(
-                        sizes, subsizes, starts, order=MPI.ORDER_C
-                    )
-                )
-                resizedsendsubarray.append(
-                    sendsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
-                )  # ! see if extent is still fine for more than 2 dimensions
-                resizedsendsubarray[-1].Commit()
-
-                # recv buffer
-                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
-                starts = np.zeros(ndims, dtype="d")
-                recvsubarray.append(
-                    MPI.DOUBLE.Create_subarray(
-                        sizes, subsizes, starts, order=MPI.ORDER_C
-                    )
-                )
-                resizedrecvsubarray.append(
-                    recvsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
-                )
-                resizedrecvsubarray[-1].Commit()
-
-                # add one more communication between first and last worker
-                # (along each dimension) for border exchange (last receives
-                # from last)
-                if ranknd[k] == cartcomm.dims[k] - 1:
-                    # recv
-                    subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
-                    starts = np.r_[
-                        np.zeros(k, dtype="d"),
-                        sM[k],
-                        np.zeros(ndims - k - 1, dtype="d"),
-                    ]
-                    border_recvsubarray.append(
-                        MPI.DOUBLE.Create_subarray(
-                            sizes, subsizes, starts, order=MPI.ORDER_C
-                        )
-                    )
-                    border_resizedrecvsubarray.append(
-                        border_recvsubarray[-1].Create_resized(
-                            0, overlap_size[k] * itemsize
-                        )
-                    )
-                    border_resizedrecvsubarray[-1].Commit()
-
-                    # send
-                    border_resizedsendsubarray.append(None)
-
-                elif ranknd[k] == 0:
-                    # recv
-                    border_resizedrecvsubarray.append(None)
-
-                    # send
-                    subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
-                    starts = np.r_[
-                        np.zeros(k, dtype="d"),
-                        overlap_size[k],
-                        np.zeros(ndims - k - 1, dtype="d"),
-                    ]
-                    border_sendsubarray.append(
-                        MPI.DOUBLE.Create_subarray(
-                            sizes, subsizes, starts, order=MPI.ORDER_C
-                        )
-                    )
-                    border_resizedsendsubarray.append(
-                        border_sendsubarray[-1].Create_resized(
-                            0, overlap_size[k] * itemsize
-                        )
-                    )
-                    border_resizedsendsubarray[-1].Commit()
-            else:
-                resizedsendsubarray.append(None)
-                resizedrecvsubarray.append(None)
-    else:
-        for k in range(ndims):
-            if overlap_size[k] > 0:
-                # ! if there is no overlap along a dimension, make sure there
-                # ! is no communication
-                [src[k], dest[k]] = cartcomm.Shift(k, -1)
-
-                # recv buffer
-                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
-                starts = np.r_[
-                    np.zeros(k, dtype="d"),
-                    sM[k],
-                    np.zeros(ndims - k - 1, dtype="d"),
-                ]
-                recvsubarray.append(
-                    MPI.DOUBLE.Create_subarray(
-                        sizes, subsizes, starts, order=MPI.ORDER_C
-                    )
-                )
-                resizedrecvsubarray.append(
-                    recvsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
-                )  # ! see if extent is still fine for more than 2 dimensions
-                resizedrecvsubarray[-1].Commit()
-
-                # send buffer
-                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
-                # ! change send area for first worker along the dimension (will
-                # ! communicate with the last worker for border exchange)
-                if ranknd[k] == 0:
-                    starts = np.r_[
-                        np.zeros(k, dtype="d"),
-                        overlap_size[k],
-                        np.zeros(ndims - k - 1, dtype="d"),
-                    ]
-                else:
-                    starts = np.zeros(ndims, dtype="d")
-                sendsubarray.append(
-                    MPI.DOUBLE.Create_subarray(
-                        sizes, subsizes, starts, order=MPI.ORDER_C
-                    )
-                )
-                resizedsendsubarray.append(
-                    sendsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
-                )
-                resizedsendsubarray[-1].Commit()
-
-                # add one more communication between first and last worker
-                # (along each dimension) for border exchange (first receives
-                # from last)
-                if ranknd[k] == 0:
-                    # recv
-                    subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
-                    starts = np.zeros(ndims, dtype="d")
-                    border_recvsubarray.append(
-                        MPI.DOUBLE.Create_subarray(
-                            sizes, subsizes, starts, order=MPI.ORDER_C
-                        )
-                    )
-                    border_resizedrecvsubarray.append(
-                        border_recvsubarray[-1].Create_resized(
-                            0, overlap_size[k] * itemsize
-                        )
-                    )
-                    border_resizedrecvsubarray[-1].Commit()
-
-                    # send
-                    border_resizedsendsubarray.append(None)
-
-                elif ranknd[k] == cartcomm.dims[k] - 1:
-                    # recv
-                    border_resizedrecvsubarray.append(None)
-
-                    # send
-                    subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
-                    starts = np.r_[
-                        np.zeros(k, dtype="d"),
-                        sM[k] - overlap_size[k],
-                        np.zeros(ndims - k - 1, dtype="d"),
-                    ]
-                    border_sendsubarray.append(
-                        MPI.DOUBLE.Create_subarray(
-                            sizes, subsizes, starts, order=MPI.ORDER_C
-                        )
-                    )
-                    border_resizedsendsubarray.append(
-                        border_sendsubarray[-1].Create_resized(
-                            0, overlap_size[k] * itemsize
-                        )
-                    )
-                    border_resizedsendsubarray[-1].Commit()
-                else:
-                    # find a way not to receive sth !!
-                    border_resizedsendsubarray.append(None)
-                    border_resizedrecvsubarray.append(None)
-
-            else:
-                resizedsendsubarray.append(None)
-                resizedrecvsubarray.append(None)
-                border_resizedsendsubarray.append(None)
-                border_resizedrecvsubarray.append(None)
-
-    return (
-        dest,
-        src,
-        resizedsendsubarray,
-        resizedrecvsubarray,
-        border_resizedsendsubarray,
-        border_resizedrecvsubarray,
-    )
-
-
-def setup_border_update_int(
-    cartcomm, ndims, itemsize, facet_size, overlap_size, backward=True
-):  # pragma: no cover
-    r"""Source and destination types and ranks to update facet borders (for
-    `int` format data). Only serving as a relevant MPI-based example.
-
-    Set-up destination and source data types and process ranks to communicate
-    facet borders within an nD Cartesian communicator. Diagonal communications
-    (involving more than a single dimension) are not separated from the other
-    communications.
-
-    Parameters
-    ----------
-    cartcomm : mpi4py.MPI.Cartcomm
-        Cartesian topology intracommunicator.
-    ndims : int
-        Number of dimensions of the Cartesian grid.
-    itemsize : int
-        Size in bytes of an item from the array to be sent.
-    facet_size : numpy.ndarray[int]
-        Size of the overlapping facets.
-    overlap_size : numpy.ndarray[int]
-        Overlap size between contiguous facets.
-    backward : bool, optional
-        Direction of the overlap along each axis, by default True.
-
-    Returns
-    -------
-    dest : list[int]
-        List of process ranks to which the current process sends data.
-    src : list[int]
-        List of process ranks from which the current process receives data.
-    resizedsendsubarray : list[MPI subarray]
-        Custom MPI subarray type describing the data array sent by the current
-        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_).
-    resizedrecvsubarray : list[MPI subarray]
-        Custom MPI subarray type describing the data array sent by the current
-        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_).
-
-    Note
-    ----
-    Function appropriate only for subarrays of type int (``numpy.int32``).
-    Will trigger a segfault error otherwise.
-    """
-
-    # * defining custom types to communicate non-contiguous arrays
-    sendsubarray = []
-    recvsubarray = []
-    resizedsendsubarray = []
-    resizedrecvsubarray = []
-
-    sizes = facet_size  # size of local array
-    sM = sizes - overlap_size
-
-    # * rank of processes involved in the communications
-    src = ndims * [MPI.PROC_NULL]
-    dest = ndims * [MPI.PROC_NULL]
-
-    # * comm. along each dimension
-    if backward:
-        for k in range(ndims):
-            if overlap_size[k] > 0:
-                # ! if there is no overlap along a dimension, make sure there is no
-                # ! communication
-                [src[k], dest[k]] = cartcomm.Shift(k, 1)
-
-                # send buffer
-                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
-                starts = np.r_[
-                    np.zeros(k, dtype="i"),
-                    sM[k],
-                    np.zeros(ndims - k - 1, dtype="i"),
-                ]
-                sendsubarray.append(
-                    MPI.INT.Create_subarray(sizes, subsizes, starts, order=MPI.ORDER_C)
-                )
-                resizedsendsubarray.append(
-                    sendsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
-                )  # ! see if extent is still fine for more than 2 dimensions
-                resizedsendsubarray[-1].Commit()
-
-                # recv buffer
-                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
-                starts = np.zeros(ndims, dtype="i")
-                recvsubarray.append(
-                    MPI.INT.Create_subarray(sizes, subsizes, starts, order=MPI.ORDER_C)
-                )
-                resizedrecvsubarray.append(
-                    recvsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
-                )
-                resizedrecvsubarray[-1].Commit()
-            else:
-                resizedsendsubarray.append(None)
-                resizedrecvsubarray.append(None)
-    else:
-        for k in range(ndims):
-            if overlap_size[k] > 0:
-                # ! if there is no overlap along a dimension, make sure there
-                # ! is no communication
-                [src[k], dest[k]] = cartcomm.Shift(k, -1)
-
-                # recv buffer
-                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
-                starts = np.r_[
-                    np.zeros(k, dtype="d"),
-                    sM[k],
-                    np.zeros(ndims - k - 1, dtype="d"),
-                ]
-                recvsubarray.append(
-                    MPI.INT.Create_subarray(sizes, subsizes, starts, order=MPI.ORDER_C)
-                )
-                resizedrecvsubarray.append(
-                    recvsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
-                )  # ! see if extent is still fine for more than 2 dimensions
-                resizedrecvsubarray[-1].Commit()
-
-                # send buffer
-                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
-                starts = np.zeros(ndims, dtype="d")
-                sendsubarray.append(
-                    MPI.INT.Create_subarray(sizes, subsizes, starts, order=MPI.ORDER_C)
-                )
-                resizedsendsubarray.append(
-                    sendsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
-                )
-                resizedsendsubarray[-1].Commit()
-            else:
-                resizedsendsubarray.append(None)
-                resizedrecvsubarray.append(None)
-
-    return dest, src, resizedsendsubarray, resizedrecvsubarray
-
-
 def setup_border_update_tv(
     cartcomm, ndims, itemsize, facet_size, overlap_size, backward=True
 ):
@@ -1535,181 +883,3 @@ def mpi_update_borders(
         )
 
     return
-
-
-def mpi_circular_update_borders(
-    comm,
-    local_array,
-    dest,
-    src,
-    resizedsendsubarray,
-    resizedrecvsubarray,
-    border_resizedsendsubarray,
-    border_resizedrecvsubarray,
-    ranknd,
-    grid_size,
-    backward=True,
-):
-    r"""Update borders of the overlapping facets.
-
-    Parameters
-    ----------
-    comm : mpi4py.MPI.Comm
-        Communicator object.
-    local_array : array
-        Local buffer, from which data data is sent to (resp. from) workers
-        whose rank is given in the list `dest` (resp. `src`).
-    dest : list[int]
-        List of process ranks to which the current process sends data.
-    src : list[int]
-        List of process ranks from which the current process receives data.
-    resizedsendsubarray : MPI subarray
-        Custom MPI subarray type describing the data sent by the current
-        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_.
-    resizedrecvsubarray : MPI subarray
-        Custom MPI subarray type describing the data received by the current
-        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_).
-    border_resizedsendsubarray : MPI subarray
-        Custom MPI subarray type describing the data received by the first
-        worker to the last along each axis (border exchange).
-    border_resizedrecvsubarray : MPI subarray
-        Custom MPI subarray type describing the data sent by the last worker to
-        the first along each axis (border exchange).
-    ranknd : numpy.ndarray of int
-        nD rank of the current process in the Cartesian process grid.
-    grid_size : numpy.ndarray of int
-        Dimension of the MPI process grid (Cartesian communicator)..
-
-    Note
-    ----
-    The input array ``local_array`` is updated in-place.
-    """
-
-    ndims = len(dest)
-    requests = []
-
-    for d in range(ndims):
-        comm.Sendrecv(
-            [local_array, 1, resizedsendsubarray[d]],
-            dest[d],
-            recvbuf=[local_array, 1, resizedrecvsubarray[d]],
-            source=src[d],
-        )
-
-        # ! additional communication (between first and last worker along each
-        # ! axis)
-        if backward:
-            if ranknd[d] == 0:
-                requests.append(
-                    comm.Isend(
-                        [local_array, 1, border_resizedsendsubarray[d]],
-                        dest[d],
-                    )
-                )
-
-            if ranknd[d] == grid_size[d] - 1:
-                requests.append(
-                    comm.Irecv(
-                        [local_array, 1, border_resizedrecvsubarray[d]],
-                        src[d],
-                    )
-                )
-        else:
-            if ranknd[d] == 0:
-                requests.append(
-                    comm.Irecv(
-                        [local_array, 1, border_resizedrecvsubarray[d]],
-                        src[d],
-                    )
-                )
-
-            if ranknd[d] == grid_size[d] - 1:
-                requests.append(
-                    comm.Isend(
-                        [local_array, 1, border_resizedsendsubarray[d]],
-                        dest[d],
-                    )
-                )
-
-    MPI.Request.Waitall(requests)
-
-    return
-
-
-# if __name__ == "__main__":
-#     # TODO: example to be reactivated / cleansed
-#     N = 10
-#     nchunks = 3
-#     overlap = 1  # number of pixels in the overlap
-#     index = 0
-
-#     # no overlap
-#     rg0 = split_range(nchunks, N)
-#     # check that 2 consecutive entries are distant from 1
-#     test1 = np.all(rg0[1:, 0] - rg0[:-1, 1] == 1)
-#     # check size of each chunk (uniform in this case)
-#     test2 = np.all(np.diff(rg0, n=1, axis=1) + 1 == 3)
-
-#     # # overlap
-#     # rg_overlap = split_range(nchunks, N, overlap=overlap)
-#     # test1 = np.all(np.abs(rg_overlap[:-1, 1] - rg_overlap[1:, 0] + 1) == overlap)
-
-#     # rgo_r = split_range(nchunks, N, overlap=overlap, backward=False)
-#     # test2 = np.all(np.abs(rg_overlap[:-1, 1] - rg_overlap[1:, 0] + 1) == overlap)
-#     # rgo_r2 = local_split_range_nd(np.array(2*[nchunks], dtype="i"), np.array(2*[N], dtype="i"), np.array(2*[0], dtype="i"), overlap=np.array(2*[overlap], dtype="i"), backward=False)
-
-#     # # interleaved
-#     # rg_interleaved = split_range_interleaved(nchunks, N)
-#     # x = np.arange(N)
-
-#     # # start all slices start by k
-#     # test_start = np.all(
-#     #     [rg_interleaved[k].start == k for k in range(len(rg_interleaved))]
-#     # )
-#     # test_stop = np.all(
-#     #     [rg_interleaved[k].stop == N for k in range(len(rg_interleaved))]
-#     # )
-#     # test_step = np.all(
-#     #     [rg_interleaved[k].step == nchunks for k in range(len(rg_interleaved))]
-#     # )
-
-#     # # checking indices for a single process
-#     # rg_local = local_split_range(nchunks, N, index, overlap=overlap)
-#     # print(rg_local)
-#     # rg_local1 = local_split_range_nd(
-#     #     np.array([nchunks]),
-#     #     np.array([N]),
-#     #     np.array([index]),
-#     #     overlap=np.array([overlap]),
-#     # )
-#     # print(np.allclose(rg_local, rg_local1))
-
-#     # index2 = np.array(np.unravel_index(index, 2 * [nchunks]))
-#     # rg_local2 = local_split_range_nd(
-#     #     np.array(2 * [nchunks]),
-#     #     np.array(2 * [N]),
-#     #     index2,
-#     #     overlap=np.array(2 * [overlap]),
-#     # )
-
-#     # rg_local2_sym = local_split_range_symmetric_nd(
-#     #     np.array(2 * [nchunks]),
-#     #     np.array(2 * [N]),
-#     #     index2,
-#     #     overlap_pre=np.array(2 * [overlap]),
-#     #     overlap_post=np.array(2 * [overlap]),
-#     # )
-
-#     # # check consistency between split_range and local version
-#     # global_rg = np.concatenate(
-#     #     [
-#     #         local_split_range(nchunks, N, k, overlap=overlap)[None, :]
-#     #         for k in range(nchunks)
-#     #     ],
-#     #     axis=0,
-#     # )
-#     # print(global_rg)
-#     # err = np.allclose(global_rg, rg_overlap)
-#     # print(err)
-
-#     pass
diff --git a/src/dsgs/utils/communicators.py b/src/dsgs/utils/communicators.py
index 538d39d..55622bb 100644
--- a/src/dsgs/utils/communicators.py
+++ b/src/dsgs/utils/communicators.py
@@ -230,20 +230,6 @@ class SyncCartesianCommunicator(BaseCommunicator):
             backward=direction,
         )
 
-        # (
-        #     self.dest,
-        #     self.src,
-        #     self.resizedsendsubarray,
-        #     self.resizedrecvsubarray,
-        # ) = libcomm.setup_border_update_int(
-        #     cartcomm,
-        #     self.ndims,
-        #     self.itemsize,
-        #     self.facet_size,
-        #     self.overlap_size,
-        #     backward=direction,
-        # )
-
         # setup finalizer
         self.isvalid_comm = self.overlap_size > 0
         # np.logical_and(self.overlap_size > 0, self.grid_size > 1)
diff --git a/tests/mpi/test_mpi_inpainting_operator.py b/tests/mpi/test_mpi_inpainting_operator.py
deleted file mode 100644
index fb70a6b..0000000
--- a/tests/mpi/test_mpi_inpainting_operator.py
+++ /dev/null
@@ -1,212 +0,0 @@
-"""Test MPI model objects (SyncInpainting).
-"""
-
-# author: pthouvenin (pierre-antoine.thouvenin@centralelille.fr)
-#
-# reference: P.-A. Thouvenin, A. Repetti, P. Chainais - **A distributed Gibbs
-# Sampler with Hypergraph Structure for High-Dimensional Inverse Problems**,
-# [arxiv preprint 2210.02341](http://arxiv.org/abs/2210.02341), October 2022.
-
-import h5py
-import numpy as np
-import pytest
-from mpi4py import MPI
-
-import dsgs.utils.communications as ucomm
-from dsgs.operators.data import generate_random_mask
-from dsgs.operators.inpainting import SyncInpainting
-
-pytestmark = pytest.mark.mpi
-
-
-@pytest.fixture
-def comm():
-    return MPI.COMM_WORLD
-
-
-@pytest.fixture
-def size(comm):
-    return comm.Get_size()
-
-
-@pytest.fixture
-def rank(comm):
-    return comm.Get_rank()
-
-
-@pytest.fixture
-def seed():
-    return 123
-
-
-@pytest.fixture
-def ndims():
-    return 2
-
-
-@pytest.fixture
-def grid_size(ndims, size):
-    return MPI.Compute_dims(size, ndims)
-
-
-@pytest.fixture
-def ranknd(comm, rank, grid_size, ndims):
-    # * Cartesian communicator and nd rank
-    cartcomm = comm.Create_cart(
-        dims=grid_size,
-        periods=ndims * [False],
-        reorder=False,
-    )
-    return np.array(cartcomm.Get_coords(rank), dtype="i")
-
-
-@pytest.fixture
-def image_size(ndims):
-    """Full image size."""
-    return np.array(ndims * [20], dtype="i")
-
-
-@pytest.fixture
-def tile_pixels(image_size, grid_size, ranknd):
-    """Index of the pixels from the full image involved in the local image tile."""
-    return ucomm.local_split_range_nd(grid_size, image_size, ranknd)
-
-
-@pytest.fixture
-def tile_size(tile_pixels):
-    """Size of local image tiles."""
-    return tile_pixels[:, 1] - tile_pixels[:, 0] + 1
-
-
-@pytest.fixture
-def local_rng(comm, size, seed):
-    child_seed = None
-    if rank == 0:
-        ss = np.random.SeedSequence(seed)
-        child_seed = ss.spawn(size)
-    local_seed = comm.scatter(child_seed, root=0)
-    return np.random.default_rng(local_seed)
-
-
-@pytest.fixture
-def local_mask(tile_size, local_rng):
-    """Local part of the full inpainting mask."""
-    return generate_random_mask(tile_size, 0.4, local_rng)
-
-
-def test_SyncInpainting_throws_exceptions(comm, size, ndims, image_size, grid_size):
-    """Testing errors thrown by the object."""
-
-    # checking number of elements in image_size is consistent with
-    # data_size
-    m_ = np.ones(1)
-    g_ = np.array(grid_size, dtype="i")
-    with pytest.raises(ValueError) as excinfo:
-        SyncInpainting(image_size, m_, comm, g_)
-    assert "local mask and image tile should have the same size" in str(excinfo.value)
-
-
-def test_SyncInpainting_2d(
-    comm, size, ndims, image_size, grid_size, local_mask, tile_pixels, local_rng
-):
-    """Testing 2d distributed inpainting model."""
-    g_ = np.array(grid_size, dtype="i")
-
-    # * distributed inpainting model
-    inpainting_model = SyncInpainting(image_size, local_mask, comm, g_)
-
-    # * setup useful slices
-    # indexing into global arrays
-    global_slice_tile = tuple(
-        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
-    )
-
-    # * local image
-    local_image = local_rng.standard_normal(size=inpainting_model.tile_size)
-
-    # * applying distributed direct operator
-    local_data = inpainting_model.forward(local_image)
-
-    # * save to an h5 test file (parallel writing)
-    f = h5py.File("inpainting_test.h5", "w", driver="mpio", comm=comm)
-
-    dset_image = f.create_dataset("x", image_size, dtype="d")
-    dset_image[global_slice_tile] = local_image
-
-    # TODO: see if this step can be simplified
-    dset_data = f.create_dataset("y", image_size, dtype="d")
-    local_data_facet = np.zeros(inpainting_model.tile_size, dtype=local_data.dtype)
-    local_data_facet[inpainting_model.mask_id] = local_data
-    dset_data[global_slice_tile] = local_data_facet
-
-    dset_mask = f.create_dataset("mask", image_size, dtype="d")
-    dset_mask[global_slice_tile] = local_mask
-    f.close()
-
-    del f, dset_image, dset_data, dset_mask
-
-    # * compare to full inpainting operator
-    g = h5py.File("inpainting_test.h5", "r+", driver="mpio", comm=comm)
-    if rank == 0:
-        y0 = g["y"]
-        x0 = g["x"][()]
-        mask = g["mask"][()]
-        y0 = y0[mask]
-        y = x0[mask]
-        assert np.allclose(y, y0)
-    g.close()
-
-
-def test_adjoint_distributed_inpainting_2d(
-    comm, size, ndims, image_size, grid_size, local_mask, tile_pixels, local_rng
-):
-    """Testing adjoint distributed convolution with backward facet
-    overlap for the direct operator."""
-    g_ = np.array(grid_size, dtype="i")
-
-    # * distributed inpainting model
-    inpainting_model = SyncInpainting(image_size, local_mask, comm, g_)
-
-    # * setup useful slices
-    # indexing into global arrays
-    global_slice_tile = tuple(
-        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
-    )
-
-    # * local data
-    local_data = local_rng.standard_normal(size=inpainting_model.local_data_size)
-
-    # * applying distributed adjoint operator
-    local_image = inpainting_model.adjoint(local_data)
-
-    # * save to an h5 test file (parallel writing)
-    f = h5py.File("inpainting_adjoint_test.h5", "w", driver="mpio", comm=comm)
-
-    dset_image = f.create_dataset("x", image_size, dtype="d")
-    dset_image[global_slice_tile] = local_image
-
-    dset_data = f.create_dataset("y", image_size, dtype="d")
-    local_data_facet = np.zeros(inpainting_model.tile_size, dtype=local_data.dtype)
-    local_data_facet[inpainting_model.mask_id] = local_data
-    dset_data[global_slice_tile] = local_data_facet
-
-    dset_mask = f.create_dataset("mask", image_size, dtype="d")
-    dset_mask[global_slice_tile] = local_mask
-    f.close()
-
-    del f, dset_image, dset_data, dset_mask
-
-    # * compare to full inpainting operator
-    g = h5py.File("inpainting_adjoint_test.h5", "r+", driver="mpio", comm=comm)
-    if rank == 0:
-        y0 = g["y"][()]
-        x0 = g["x"][()]
-        mask = g["mask"][()]
-
-        y0 = y0[mask]
-        y = x0[mask]
-        assert np.allclose(y, y0)
-    g.close()
-
-
-# mpiexec -n 2 python -m pytest tests/mpi/test_mpi_operators_inpainting.py
diff --git a/tests/operators/test_inpainting_model.py b/tests/operators/test_inpainting_model.py
deleted file mode 100644
index f50a21c..0000000
--- a/tests/operators/test_inpainting_model.py
+++ /dev/null
@@ -1,59 +0,0 @@
-"""Test the functionalities of the SerialInpainting object (errors and
-correctness of the direct and adjoint operators)."""
-
-# author: pthouvenin (pierre-antoine.thouvenin@centralelille.fr)
-#
-# reference: P.-A. Thouvenin, A. Repetti, P. Chainais - **A distributed Gibbs
-# Sampler with Hypergraph Structure for High-Dimensional Inverse Problems**,
-# [arxiv preprint 2210.02341](http://arxiv.org/abs/2210.02341), October 2022.
-
-import numpy as np
-import pytest
-
-from dsgs.operators.data import generate_random_mask
-from dsgs.operators.inpainting import SerialInpainting
-
-
-@pytest.fixture
-def rng():
-    return np.random.default_rng(1234)
-
-
-@pytest.fixture
-def image_size():
-    return np.array([10, 7], dtype="i")
-
-
-@pytest.fixture
-def mask(rng, image_size):
-    """Inpainting mask."""
-    return generate_random_mask(image_size, 0.4, rng)
-
-
-@pytest.fixture
-def x(image_size, rng):
-    """Random image."""
-    return rng.normal(loc=0.0, scale=1.0, size=image_size)
-
-
-@pytest.fixture
-def inpainting_model(image_size, mask):
-    """Inpainting model."""
-    return SerialInpainting(image_size, mask)
-
-
-def test_SerialInpainting_throws_exceptions(image_size, mask):
-    # inconsistent number of elements between image_size and data_size
-    with pytest.raises(ValueError) as excinfo:
-        SerialInpainting(image_size[1:], mask)
-    assert "mask and image should have the same size" in str(excinfo.value)
-
-
-def test_SerialInpatingModel_adjoint(x, inpainting_model, rng):
-    """Check correctness adjoint operator."""
-    y = rng.standard_normal(inpainting_model.data_size)
-    Hx = inpainting_model.forward(x)
-    Hadj_y = inpainting_model.adjoint(y)
-    sp1 = np.sum(Hx * y)  # usual scalar product
-    sp2 = np.sum(x * Hadj_y)
-    assert np.isclose(sp1, sp2)
diff --git a/tests/utils/test_communications.py b/tests/utils/test_communications.py
index 63aaa99..c3f6c81 100644
--- a/tests/utils/test_communications.py
+++ b/tests/utils/test_communications.py
@@ -109,24 +109,6 @@ class TestCommunications(unittest.TestCase):
         )
         self.assertTrue(np.allclose(rg, rg2))
 
-    # TODO: to be expanded (see if additional elements can be added to the unit-test)
-    def test_rebalance_tile_n(self):
-        array_size = np.array([20], dtype="i")
-        grid_size = np.array([self.nchunks], dtype="i")
-        overlap_size = np.array([self.overlap], dtype="i")
-
-        rg0 = np.zeros((self.nchunks, 2), dtype="i")
-        rg = np.zeros((self.nchunks, 2), dtype="i")
-
-        for k in range(self.nchunks):
-            ranknd = np.array([k], dtype="i")
-            rg[k, :] = ucomm.local_split_range_nd(grid_size, array_size, ranknd)
-            rg0[k, :] = rg[k, :]
-            ucomm.rebalance_tile_nd(
-                rg[k, :][None, :], ranknd, array_size, grid_size, overlap_size
-            )
-        pass
-
     def test_split_range_interleaved_error(self):
         with self.assertRaises(ValueError):
             ucomm.split_range_interleaved(self.N + 1, self.N)
@@ -440,13 +422,6 @@ class TestCommunications(unittest.TestCase):
         with self.assertRaises(AssertionError):
             ucomm.get_local_slice(ranknd, grid_size, overlap_size)
 
-    # def test_slice_valid_coefficients(self):
-
-    #     ranknd = np.zeros(2, dtype="i")
-    #     grid_size = np.full(2, fill_value=2, dtype="i")
-    #     overlap_size = np.array([1, 0], dtype="i")
-    #     select_y = ucomm.slice_valid_coefficients(ranknd, grid_size, overlap_size)
-
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/tests/utils/test_communicators.py b/tests/utils/test_communicators.py
index c7e4181..fe5496c 100644
--- a/tests/utils/test_communicators.py
+++ b/tests/utils/test_communicators.py
@@ -318,7 +318,6 @@ class TestCommunicators(unittest.TestCase):
 
             H0 = np.fft.rfftn(h0, data_size)
             y = uconv.fft_conv(x0, H0, data_size)
-            # print(np.allclose(y, y0))
             self.assertTrue(np.allclose(y, y0))
         g.close()
 
-- 
GitLab