diff --git a/.docstr.yaml b/.docstr.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e3112b067580bf57b8ca7304450306a4986d5e61
--- /dev/null
+++ b/.docstr.yaml
@@ -0,0 +1,28 @@
+paths: # list or string
+  - docstr_coverage
+badge: docs # Path
+exclude: .*/test # regex
+verbose: 1 # int (0-3)
+skip_magic: True # Boolean
+skip_file_doc: True # Boolean
+skip_init: True # Boolean
+skip_class_def: True # Boolean
+skip_private: True # Boolean
+follow_links: True # Boolean
+accept_empty: True # Boolean
+ignore_names_file: ./notes/* /matlab .*/drafts/* */archive/* tests/* sandbox/* # regex
+fail_under: 90 # int
+percentage_only: True # Boolean
+ignore_patterns: # Dict with key/value pairs of file-pattern/node-pattern
+  .*: method_to_ignore_in_all_files
+  FileWhereWeWantToIgnoreAllSpecialMethods: "__.+__"
+  SomeFile:
+    - method_to_ignore1
+    - method_to_ignore2
+    - method_to_ignore3
+  a_very_important_view_file:
+    - "^get$"
+    - "^set$"
+    - "^post$"
+  detect_.*:
+    - "get_val.*"
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0528efbc0fb4ccba78e115e0ac5cf1cf2b9c3329
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,94 @@
+# https://pre-commit.com/
+# poetry add pre-commit --dev
+# poetry run pre-commit install
+repos:
+  # using custom command in pre-commit
+  # https://stackoverflow.com/questions/59499061/how-to-run-custom-shell-script-file-before-pre-commit-hook
+  # code formatting
+  - repo: https://github.com/pre-commit/pre-commit-hooks
+    rev: v4.2.0
+    hooks:
+      - id: check-case-conflict
+      - id: check-docstring-first
+      # - id: check-json
+      #   exclude: ^(.vscode/|other/folder/) # weird errors with .vscode/*.json
+      - id: check-toml
+      - id: check-yaml
+      - id: end-of-file-fixer
+        exclude: ^((.*)\.(ipynb|otherext)) # avoid reformat notebooks everytime
+      - id: mixed-line-ending
+      - id: trailing-whitespace
+  - repo: https://github.com/psf/black
+    rev: 22.3.0
+    hooks:
+      - id: black
+      - id: black-jupyter
+  - repo: https://github.com/asottile/blacken-docs
+    rev: v1.12.1
+    hooks:
+    - id: blacken-docs
+      additional_dependencies: [black==21.11b1]
+  # making isort compatible with black
+  # see https://github.com/PyCQA/isort/issues/1518
+  - repo: https://github.com/pycqa/isort
+    rev: 5.10.1
+    hooks:
+      - id: isort
+        args: ["--profile", "black"]  # to be set in pyproject.toml
+  # - repo: https://github.com/myint/rstcheck
+  #   rev: 3f92957478422df87bd730abde66f089cc1ee19b
+  #   hooks:
+  #     - id: rstcheck
+  #       args: [
+  #           # "--report",
+  #           # "info",
+  #           "--ignore-directives",
+  #           "autofunction,automodule,bibliography,math,numfig,plot,testcode,testoutput",
+  #           "--ignore-roles",
+  #           "eq,cite",
+  #         ]
+  - repo: https://github.com/executablebooks/mdformat
+    rev: 0.7.14
+    hooks:
+      - id: mdformat
+        additional_dependencies:
+        - mdformat-gfm
+        - mdformat-black
+  # linter: flake8
+  - repo: https://github.com/PyCQA/flake8
+    rev: 4.0.1
+    hooks:
+      - id: flake8
+        args: # arguments to configure flake8
+          # making isort line length compatible with black
+          - "--max-line-length=88"
+          - "--max-complexity=24"
+          - "--select=B,C,E,F,W,T4,B9"
+          - "--exclude=drafts/*, docs/*, sandbox/*, src/aaxda/samplers/parallel/archive/*, src/aaxda/samplers/serial/archive/*, src/aaxda/models/prox.py, src/aaxda/utils/custom_log.py, src/aaxda/samplers/parallel/axda_sync_debug.py, src/aaxda/samplers/parallel/spa_psgla_sync_gaussian_inpainting.py"
+
+          # these are errors that will be ignored by flake8
+          # check out their meaning here
+          # https://flake8.pycqa.org/en/latest/user/error-codes.html
+          # F841
+          - "--ignore=E203,E266,E501,W503,F403,F401,E402"
+  # notebooks
+  - repo: https://github.com/nbQA-dev/nbQA
+    rev: 1.3.1
+    hooks:
+      - id: nbqa-black
+      - id: nbqa-isort
+        args: ["-black"]
+  # - repo: https://github.com/kynan/nbstripout
+  #   rev: 0.5.0
+  #   hooks:
+  #     - id: nbstripout
+  # local regression monitoring with wily
+  # https://stackoverflow.com/questions/59499061/how-to-run-custom-shell-script-file-before-pre-commit-hook
+  # - repo: local
+  #   hooks:
+  #   - id: wily
+  #     name: wily
+  #     entry: bash -c 'wily report aaxda -f HTML -o docs/build/wily_report.html'
+  #     verbose: true
+  #     language: python
+  #     additional_dependencies: [wily]
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,674 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.  We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors.  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights.  Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received.  You must make sure that they, too, receive
+or can get the source code.  And you must show them these terms so they
+know their rights.
+
+  Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+  For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software.  For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+  Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so.  This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software.  The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable.  Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products.  If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+  Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary.  To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Use with the GNU Affero General Public License.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+    <program>  Copyright (C) <year>  <name of author>
+    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+  The GNU General Public License does not permit incorporating your program
+into proprietary programs.  If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.  But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/README.md b/README.md
index 38131cd564eefd31a48f61c4a6f7ca5df239c79f..894dab8eb5100a15086907202785f19e00c9773f 100644
--- a/README.md
+++ b/README.md
@@ -45,7 +45,7 @@ conda install -c anaconda conda-build
 source ~/.bashrc # or .zshrc, depending on your shell
 
 # Cloning the repo
-git clone https://gitlab.com/pthouvenin/dspa.git
+git clone https://gitlab.cristal.univ-lille.fr/pthouven/dspa.git
 cd dspa
 
 # Create anaconda environment
diff --git a/conda-linux-64.lock b/conda-linux-64.lock
new file mode 100644
index 0000000000000000000000000000000000000000..e2a6cd8e3d88fc9d2f5cf4a28740f3898bb30ba6
--- /dev/null
+++ b/conda-linux-64.lock
@@ -0,0 +1,269 @@
+# Generated by conda-lock.
+# platform: linux-64
+# input_hash: 11e751d2f7755f93aa218d92351d70ac61426688b2aa488cc33e570c2ddb5cc8
+@EXPLICIT
+https://repo.anaconda.com/pkgs/main/linux-64/_libgcc_mutex-0.1-main.conda#c3473ff8bdb3d124ed5ff11ec380d6f9
+https://repo.anaconda.com/pkgs/main/linux-64/blas-1.0-mkl.conda#9a7a051e9bd41da46523acb017d8a517
+https://repo.anaconda.com/pkgs/main/linux-64/ca-certificates-2022.4.26-h06a4308_0.conda#fc9c0bf2e7893f5407ff74289dbcf295
+https://repo.anaconda.com/pkgs/main/linux-64/intel-openmp-2021.4.0-h06a4308_3561.conda#15d9e331499432d8e22935c1d3b5d2a1
+https://repo.anaconda.com/pkgs/main/linux-64/ld_impl_linux-64-2.38-h1181459_1.conda#68eedfd9c06f2b0e6888d8db345b7f5b
+https://repo.anaconda.com/pkgs/main/linux-64/libgfortran4-7.5.0-ha8ba4b0_17.conda#e3883581cbf0a98672250c3e80d292bf
+https://repo.anaconda.com/pkgs/main/linux-64/libstdcxx-ng-11.2.0-h1234567_0.conda#ce541c2473bd2d56da84ec8f241a8574
+https://repo.anaconda.com/pkgs/main/linux-64/mpi-1.0-mpich.conda#3310b0d36925b82ad3bd800758f4bf9f
+https://repo.anaconda.com/pkgs/main/linux-64/libgfortran-ng-7.5.0-ha8ba4b0_17.conda#ecb35c8952579d5c8dc56c6e076ba948
+https://repo.anaconda.com/pkgs/main/linux-64/libgomp-11.2.0-h1234567_0.conda#c8acb8d9aff1ead1b273ace299ca12d2
+https://repo.anaconda.com/pkgs/main/linux-64/mkl-2021.2.0-h06a4308_296.conda#06c81ed0b0c637506b4b0305cf59d121
+https://repo.anaconda.com/pkgs/main/linux-64/_openmp_mutex-5.1-1_gnu.conda#71d281e9c2192cb3fa425655a8defb85
+https://repo.anaconda.com/pkgs/main/linux-64/libgcc-ng-11.2.0-h1234567_0.conda#83c045906d7d785252a34846348d16c6
+https://repo.anaconda.com/pkgs/main/linux-64/brotli-1.0.9-he6710b0_2.conda#6513a2c97bd29ec2978d163c7e1f7932
+https://repo.anaconda.com/pkgs/main/linux-64/bzip2-1.0.8-h7b6447c_0.conda#9303f4af7c004e069bae22bde8d800ee
+https://repo.anaconda.com/pkgs/main/linux-64/c-ares-1.18.1-h7f8727e_0.conda#677634b170f698ba548ca8d57aa3fd1a
+https://repo.anaconda.com/pkgs/main/linux-64/charls-2.2.0-h2531618_0.conda#abcace262ab5673ba2b89c658b7bc846
+https://repo.anaconda.com/pkgs/main/linux-64/expat-2.4.4-h295c915_0.conda#f9930c60940181cf06d0bd0b8095063c
+https://repo.anaconda.com/pkgs/main/linux-64/giflib-5.2.1-h7b6447c_0.conda#c2583ad8de5051f19479580c58336f15
+https://repo.anaconda.com/pkgs/main/linux-64/icu-58.2-he6710b0_3.conda#48cc14d5ad1a9bcd8dac17211a8deb8b
+https://repo.anaconda.com/pkgs/main/linux-64/jpeg-9e-h7f8727e_0.conda#a0571bd2254b360aef526307a17f3526
+https://repo.anaconda.com/pkgs/main/linux-64/jxrlib-1.1-h7b6447c_2.conda#3cc305f3788177c8ea28088590ab75a1
+https://repo.anaconda.com/pkgs/main/linux-64/lerc-3.0-h295c915_0.conda#b97309770412f10bed8d9448f6f98f87
+https://repo.anaconda.com/pkgs/main/linux-64/libaec-1.0.4-he6710b0_1.conda#95e3b23fe7c0108bce3b6826749bb94d
+https://repo.anaconda.com/pkgs/main/linux-64/libdeflate-1.8-h7f8727e_5.conda#6942d65edab9a800900f43e750b3ad1f
+https://repo.anaconda.com/pkgs/main/linux-64/libev-4.33-h7f8727e_1.conda#5065620db4393fb549f30114a33897d1
+https://repo.anaconda.com/pkgs/main/linux-64/libffi-3.3-he6710b0_2.conda#88a54b8f50e351c650e16f4ee781440c
+https://repo.anaconda.com/pkgs/main/linux-64/libsodium-1.0.18-h7b6447c_0.conda#c8783b20f0e14bc1d701352c26c264d5
+https://repo.anaconda.com/pkgs/main/linux-64/libuuid-1.0.3-h7f8727e_2.conda#6c4c9e96bfa4744d4839b9ed128e1114
+https://repo.anaconda.com/pkgs/main/linux-64/libwebp-base-1.2.2-h7f8727e_0.conda#162451b4884cfc7db8400580c711e83a
+https://repo.anaconda.com/pkgs/main/linux-64/libxcb-1.15-h7f8727e_0.conda#ada518dcadd6aaee9aae47ba9a671553
+https://repo.anaconda.com/pkgs/main/linux-64/libzopfli-1.0.3-he6710b0_0.conda#8671895f71c9046f38b814f8662226f5
+https://repo.anaconda.com/pkgs/main/linux-64/lz4-c-1.9.3-h295c915_1.conda#d9bd18f73ff566e08add10a54a3463cf
+https://repo.anaconda.com/pkgs/main/linux-64/lzo-2.10-h7b6447c_2.conda#65722a7644f424de73fea6e87edd7653
+https://repo.anaconda.com/pkgs/main/linux-64/mpich-3.3.2-hc856adb_0.conda#3ca2b609c8caabd6ebe6122f60069fda
+https://repo.anaconda.com/pkgs/main/linux-64/ncurses-6.3-h7f8727e_2.conda#4edf660a09cc7adcb21120464b2a1783
+https://repo.anaconda.com/pkgs/main/linux-64/openssl-1.1.1o-h7f8727e_0.conda#dff07c1e2347fed6e5a3afbbcd5bddcc
+https://repo.anaconda.com/pkgs/main/linux-64/pcre-8.45-h295c915_0.conda#b32ccc24d1d9808618c1e898da60f68d
+https://repo.anaconda.com/pkgs/main/linux-64/snappy-1.1.9-h295c915_0.conda#807eab3ad2fe697e8013dd6b9ea830f2
+https://repo.anaconda.com/pkgs/main/linux-64/tbb-2020.3-hfd86e86_0.conda#7d06fdc8b4f3e389f26f67311c7ccf5f
+https://repo.anaconda.com/pkgs/main/linux-64/xz-5.2.5-h7f8727e_1.conda#5d01fcf310bf465237f6b95a019d73bc
+https://repo.anaconda.com/pkgs/main/linux-64/yaml-0.2.5-h7b6447c_0.conda#39fdbf4db769e494ffb06d95680c83d8
+https://repo.anaconda.com/pkgs/main/linux-64/zfp-0.5.5-h295c915_6.conda#a20971c5ed1ae5a1ebb442b75edb48df
+https://repo.anaconda.com/pkgs/main/linux-64/zlib-1.2.12-h7f8727e_2.conda#4f4080e9939f082332cd8be7fedad087
+https://repo.anaconda.com/pkgs/main/linux-64/brunsli-0.1-h2531618_0.conda#6777d1b10f8e02143f9708699f7ab354
+https://repo.anaconda.com/pkgs/main/linux-64/glib-2.69.1-h4ff587b_1.conda#4c3eae7c0b8b1c8fb3046a0740313bbf
+https://repo.anaconda.com/pkgs/main/linux-64/libedit-3.1.20210910-h7f8727e_0.conda#cf16006f8f24e4224ddce196471d2509
+https://repo.anaconda.com/pkgs/main/linux-64/libllvm10-10.0.1-hbcb73fb_5.conda#198e840fc17a5bff7f1ee543ee1981b2
+https://repo.anaconda.com/pkgs/main/linux-64/libnghttp2-1.46.0-hce63b2e_0.conda#bf09e3bce5afd6b281f6e3722c4b7f7b
+https://repo.anaconda.com/pkgs/main/linux-64/libpng-1.6.37-hbc83047_0.conda#689f903925dcf6c5ab7bc1de0f58b67b
+https://repo.anaconda.com/pkgs/main/linux-64/libssh2-1.10.0-h8f2d780_0.conda#dede0f0061d9891642f640c2c4ea442e
+https://repo.anaconda.com/pkgs/main/linux-64/libxml2-2.9.12-h74e7548_2.conda#eff5ba91c84a8329c2a1117bee13cd68
+https://repo.anaconda.com/pkgs/main/linux-64/readline-8.1.2-h7f8727e_1.conda#ea33f478fea12406f394944e7e4f3d20
+https://repo.anaconda.com/pkgs/main/linux-64/tk-8.6.11-h1ccaba5_1.conda#5d7d7abe559370a7a8519177929dd338
+https://repo.anaconda.com/pkgs/main/linux-64/zeromq-4.3.4-h2531618_0.conda#45ce422428d9c98f354aec4b5cbd1046
+https://repo.anaconda.com/pkgs/main/linux-64/zstd-1.4.9-haebb681_0.conda#2e81424da35919b0f552b9e5ba0a37ba
+https://repo.anaconda.com/pkgs/main/linux-64/blosc-1.21.0-h8c45485_0.conda#34cd84f63d186aa8c921317683523a25
+https://repo.anaconda.com/pkgs/main/linux-64/dbus-1.13.18-hb2f20db_0.conda#6a6a6f1391f807847404344489ef6cf4
+https://repo.anaconda.com/pkgs/main/linux-64/freetype-2.11.0-h70c0345_0.conda#b767874a6273e1058027cb2e300d00ac
+https://repo.anaconda.com/pkgs/main/linux-64/gstreamer-1.14.0-h28cd5cc_2.conda#6af5d0cbd7310e1cd8a6a5c1c99649b2
+https://repo.anaconda.com/pkgs/main/linux-64/krb5-1.19.2-hac12032_0.conda#62a43976b48799377103390c340a3824
+https://repo.anaconda.com/pkgs/main/linux-64/libtiff-4.2.0-h85742a9_0.conda#a70887f6e46ea21d5e4e27685bd59ff9
+https://repo.anaconda.com/pkgs/main/linux-64/sqlite-3.38.3-hc218d9a_0.conda#94e50b233f796aa4e0b7cf38611c0852
+https://repo.anaconda.com/pkgs/main/linux-64/fontconfig-2.13.1-h6c09931_0.conda#fa04e89166d4b44326c6d76e2f708715
+https://repo.anaconda.com/pkgs/main/linux-64/gst-plugins-base-1.14.0-h8213a91_2.conda#838648422452405b86699e780e293c1d
+https://repo.anaconda.com/pkgs/main/linux-64/lcms2-2.12-h3be6417_0.conda#719db47afba9f6586eecb5eacac70bff
+https://repo.anaconda.com/pkgs/main/linux-64/libcurl-7.82.0-h0b77cf5_0.conda#1a3f73bece51854cd6afd18238a983c6
+https://repo.anaconda.com/pkgs/main/linux-64/libwebp-1.2.2-h55f646e_0.conda#c9ed6bddefc09dbfc246301c3ce3ca14
+https://repo.anaconda.com/pkgs/main/linux-64/openjpeg-2.4.0-h3ad879b_0.conda#86baecb47ecaa7f7ff2657a1f03b90c9
+https://repo.anaconda.com/pkgs/main/linux-64/python-3.8.13-h12debd9_0.conda#edc17980bae484b711e090f0a0cbbaef
+https://repo.anaconda.com/pkgs/main/noarch/alabaster-0.7.12-pyhd3eb1b0_0.tar.bz2#21ad3b69a5ce6c22e724e9dbb4cffa65
+https://repo.anaconda.com/pkgs/main/noarch/appdirs-1.4.4-pyhd3eb1b0_0.conda#5673d98d06171cb6eed03a6736845c4d
+https://repo.anaconda.com/pkgs/main/noarch/asciitree-0.3.3-py_2.conda#88e5fad50e595d527acfc96b782261cb
+https://repo.anaconda.com/pkgs/main/noarch/attrs-21.4.0-pyhd3eb1b0_0.conda#3bc977a57587a7964921e3e1e2e31f9e
+https://repo.anaconda.com/pkgs/main/noarch/backcall-0.2.0-pyhd3eb1b0_0.tar.bz2#b2aa5503875aba2f1d88cae9df9a96d5
+https://repo.anaconda.com/pkgs/main/linux-64/certifi-2022.5.18.1-py38h06a4308_0.conda#dee2837b4ce535119636eb15ab312fd2
+https://conda.anaconda.org/conda-forge/noarch/cfgv-3.3.1-pyhd8ed1ab_0.tar.bz2#ebb5f5f7dc4f1a3780ef7ea7738db08c
+https://repo.anaconda.com/pkgs/main/linux-64/cfitsio-3.470-hf0d0db6_6.conda#f590184d85f5ba8564bcfd052c09862d
+https://repo.anaconda.com/pkgs/main/noarch/charset-normalizer-2.0.4-pyhd3eb1b0_0.conda#e7a441d94234b2b5fafee06e25dbf076
+https://repo.anaconda.com/pkgs/main/noarch/cloudpickle-2.0.0-pyhd3eb1b0_0.conda#8e38585c33e6c659e0e5b0b18e6bf3e2
+https://repo.anaconda.com/pkgs/main/noarch/colorama-0.4.4-pyhd3eb1b0_0.conda#f550604d18b83878f647a491b2b343d6
+https://repo.anaconda.com/pkgs/main/linux-64/coverage-5.5-py38h27cfd23_2.conda#42741b84cb7f6cfd80c5093f26d75659
+https://repo.anaconda.com/pkgs/main/noarch/cycler-0.11.0-pyhd3eb1b0_0.conda#f5e365d2cdb66d547eb8c3ab93843aab
+https://repo.anaconda.com/pkgs/main/linux-64/debugpy-1.5.1-py38h295c915_0.conda#15761a758f4788b5e068c0b2288afab5
+https://repo.anaconda.com/pkgs/main/noarch/decorator-5.1.1-pyhd3eb1b0_0.conda#4d969aac32a0faf84af90c797bfc7fec
+https://repo.anaconda.com/pkgs/main/noarch/defusedxml-0.7.1-pyhd3eb1b0_0.conda#d912068b0729930972adcaac338882c0
+https://repo.anaconda.com/pkgs/main/noarch/distlib-0.3.2-pyhd3eb1b0_0.conda#86c256c16d9b416ffee75a4cfccf6c9a
+https://repo.anaconda.com/pkgs/main/linux-64/docutils-0.17.1-py38h06a4308_1.conda#d90ea0ab17a922c881d967ef5b239752
+https://repo.anaconda.com/pkgs/main/linux-64/entrypoints-0.4-py38h06a4308_0.conda#3cdf167326744187efd003c24ab77c99
+https://repo.anaconda.com/pkgs/main/noarch/executing-0.8.3-pyhd3eb1b0_0.conda#7be61d1c3c555fb37682b28d7a53d622
+https://repo.anaconda.com/pkgs/main/noarch/filelock-3.6.0-pyhd3eb1b0_0.conda#527be2ebbc60c0de6533ce33132ce303
+https://repo.anaconda.com/pkgs/main/linux-64/fsspec-2022.3.0-py38h06a4308_0.conda#45df02eb7d28e318d96e44caa6294626
+https://conda.anaconda.org/conda-forge/linux-64/hdf5-1.10.5-mpi_mpich_hc41e3f9_1011.tar.bz2#fbff5732eb8a94e4107e3bb8a1ec14c8
+https://repo.anaconda.com/pkgs/main/noarch/idna-3.3-pyhd3eb1b0_0.conda#8f43a528cf83b43af38a4d142fa38b8a
+https://repo.anaconda.com/pkgs/main/noarch/imagesize-1.3.0-pyhd3eb1b0_0.conda#306855b2038e489d01dff5b343a8adb9
+https://repo.anaconda.com/pkgs/main/noarch/iniconfig-1.1.1-pyhd3eb1b0_0.tar.bz2#e40edff2c5708f342cef43c7f280c507
+https://repo.anaconda.com/pkgs/main/noarch/ipython_genutils-0.2.0-pyhd3eb1b0_1.conda#553832c0b872a28088a0001fa2ba3822
+https://repo.anaconda.com/pkgs/main/noarch/isort-5.9.3-pyhd3eb1b0_0.conda#75f2497fe01a9ac6208d72e26066b76a
+https://repo.anaconda.com/pkgs/main/noarch/joblib-1.1.0-pyhd3eb1b0_0.conda#cae25b839f3b24686e683addde01b742
+https://repo.anaconda.com/pkgs/main/noarch/json5-0.9.6-pyhd3eb1b0_0.conda#4e721ee2dbfa20069719d2ee19185031
+https://repo.anaconda.com/pkgs/main/linux-64/kiwisolver-1.4.2-py38h295c915_0.conda#00e5f5a50b547c8c31d1a559828f3251
+https://repo.anaconda.com/pkgs/main/linux-64/llvmlite-0.36.0-py38h612dafd_4.conda#0272f086c1709b7b8be5accdc8c3c9e9
+https://repo.anaconda.com/pkgs/main/linux-64/locket-1.0.0-py38h06a4308_0.conda#9075221bb581a3c2fc5a079efc183784
+https://repo.anaconda.com/pkgs/main/linux-64/markupsafe-2.0.1-py38h27cfd23_0.conda#50dcb2135a5ded2e9f60a74bf5093e58
+https://repo.anaconda.com/pkgs/main/linux-64/mccabe-0.6.1-py38_1.conda#5dec11e3e49e564e6c3a50877767444e
+https://repo.anaconda.com/pkgs/main/linux-64/mistune-0.8.4-py38h7b6447c_1000.conda#5b985f581fab6e8aff8157976b0868a9
+https://repo.anaconda.com/pkgs/main/noarch/mock-4.0.3-pyhd3eb1b0_0.conda#e30b674f018b25357c076ae407d769b9
+https://repo.anaconda.com/pkgs/main/noarch/more-itertools-8.12.0-pyhd3eb1b0_0.conda#ac1210cc005fb8bd631ea8beb8343332
+https://repo.anaconda.com/pkgs/main/linux-64/mpi4py-3.0.3-py38h028fd6f_0.conda#e5479dc898e61765deee83aba69afe7c
+https://repo.anaconda.com/pkgs/main/linux-64/msgpack-python-1.0.3-py38hd09550d_0.conda#c64b1bcb40a89d090ea5cf08360b4f57
+https://repo.anaconda.com/pkgs/main/noarch/munkres-1.1.4-py_0.conda#148362ba07f92abab76999a680c80084
+https://repo.anaconda.com/pkgs/main/linux-64/nest-asyncio-1.5.5-py38h06a4308_0.conda#bbf9623dd1f0b2d8091f8b5a6eb63da5
+https://repo.anaconda.com/pkgs/main/noarch/networkx-2.7.1-pyhd3eb1b0_0.conda#6c97a8687676de8dac42bd8373892397
+https://repo.anaconda.com/pkgs/main/noarch/pandocfilters-1.5.0-pyhd3eb1b0_0.conda#5547ced9e3bb4c513405998957b52c7b
+https://repo.anaconda.com/pkgs/main/noarch/parso-0.8.3-pyhd3eb1b0_0.conda#c6f0f6219bf5ce2b510ef4b75cbc3e01
+https://repo.anaconda.com/pkgs/main/noarch/pickleshare-0.7.5-pyhd3eb1b0_1003.conda#4a6363fd8dda664b95f99f7c5aa95abc
+https://repo.anaconda.com/pkgs/main/linux-64/pillow-9.0.1-py38h22f2fdc_0.conda#13c7b8b727dc6af99e9f6d75b3ec18f3
+https://repo.anaconda.com/pkgs/main/linux-64/pluggy-0.13.1-py38h06a4308_0.conda#4e25e8a00605b6b286f96900d6e8caa9
+https://conda.anaconda.org/conda-forge/noarch/prefixed-0.3.2-pyhd8ed1ab_0.tar.bz2#101a437c0ab238eaa1736dd665b33fa2
+https://repo.anaconda.com/pkgs/main/noarch/prometheus_client-0.13.1-pyhd3eb1b0_0.conda#05275f89084c4ce7f9b0bc1e258b3e9e
+https://repo.anaconda.com/pkgs/main/noarch/ptyprocess-0.7.0-pyhd3eb1b0_2.conda#7441d2827d4bfbcc1fa308875a146246
+https://repo.anaconda.com/pkgs/main/noarch/pure_eval-0.2.2-pyhd3eb1b0_0.conda#a87d6d9827e5dff68d34d69971f8a9b1
+https://repo.anaconda.com/pkgs/main/noarch/py-1.11.0-pyhd3eb1b0_0.conda#7205a898ed2abbf6e9b903dff6abe08e
+https://repo.anaconda.com/pkgs/main/noarch/pycodestyle-2.7.0-pyhd3eb1b0_0.conda#30e8cdd78a0754c2d789d53fa465cd30
+https://repo.anaconda.com/pkgs/main/noarch/pycparser-2.21-pyhd3eb1b0_0.conda#135a72ff2a31150a3a3ff0b1edd41ca9
+https://repo.anaconda.com/pkgs/main/noarch/pyflakes-2.3.1-pyhd3eb1b0_0.conda#eaecb0dee9d296e2ba1dadf6902149f3
+https://repo.anaconda.com/pkgs/main/noarch/pygments-2.11.2-pyhd3eb1b0_0.conda#eff55c770961f459a734cf86768aac98
+https://repo.anaconda.com/pkgs/main/noarch/pyparsing-3.0.4-pyhd3eb1b0_0.conda#6bca2ae9c9aae9ccdebcb8cf2aa87cb3
+https://repo.anaconda.com/pkgs/main/linux-64/pyrsistent-0.18.0-py38heee7806_0.conda#d350240d35e34c9e250cbb1b9aa6460b
+https://repo.anaconda.com/pkgs/main/linux-64/pysocks-1.7.1-py38h06a4308_0.conda#21c67581f3a81ffbb02728eb2178d693
+https://repo.anaconda.com/pkgs/main/noarch/python-fastjsonschema-2.15.1-pyhd3eb1b0_0.conda#ad1b2f7b33a45d0d68979ca2ad84b6a9
+https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.8-2_cp38.tar.bz2#bfbb29d517281e78ac53e48d21e6e860
+https://repo.anaconda.com/pkgs/main/noarch/pytz-2021.3-pyhd3eb1b0_0.conda#76415b791ffd2007687ac5f0665aa7af
+https://repo.anaconda.com/pkgs/main/linux-64/pyyaml-6.0-py38h7f8727e_1.conda#2bd06f71e7a66ee11a748fd2f3a49aa9
+https://repo.anaconda.com/pkgs/main/linux-64/pyzmq-22.3.0-py38h295c915_2.conda#4b7508c58d9552487e5730f55c5cd5ac
+https://repo.anaconda.com/pkgs/main/linux-64/qt-5.9.7-h5867ecd_1.conda#05507dbc35c46ac5a7066fc860a62341
+https://repo.anaconda.com/pkgs/main/noarch/send2trash-1.8.0-pyhd3eb1b0_1.conda#bfa3c5c61a5a91e528a1d2d1e3cae6c9
+https://repo.anaconda.com/pkgs/main/linux-64/sip-4.19.13-py38h295c915_0.conda#2046e66b7d12f7c0cda5687e4c27b692
+https://repo.anaconda.com/pkgs/main/noarch/six-1.16.0-pyhd3eb1b0_1.conda#34586824d411d36af2fa40e799c172d0
+https://repo.anaconda.com/pkgs/main/linux-64/sniffio-1.2.0-py38h06a4308_1.conda#95d3411f0f78c324a6655a2b370df2c5
+https://repo.anaconda.com/pkgs/main/noarch/snowballstemmer-2.2.0-pyhd3eb1b0_0.conda#c8c10f2cd854c0a27630760958bba60c
+https://repo.anaconda.com/pkgs/main/noarch/sphinx_rtd_theme-0.4.3-pyhd3eb1b0_0.conda#0c60976249f116d5aa21fd50f0f94990
+https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-applehelp-1.0.2-pyhd3eb1b0_0.tar.bz2#ac923499f97b9a9ab7c672b27cb2a1a8
+https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-devhelp-1.0.2-pyhd3eb1b0_0.tar.bz2#bc39c2b70430734b5879d6b504e3311f
+https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-htmlhelp-2.0.0-pyhd3eb1b0_0.conda#2af558ca8b56151110c7a3639a1ea348
+https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-jsmath-1.0.1-pyhd3eb1b0_0.tar.bz2#e43f8de7d6a717935ab220a0c957771d
+https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-qthelp-1.0.3-pyhd3eb1b0_0.tar.bz2#08d67f73f640b4d1e5e8890a324b60e3
+https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-serializinghtml-1.1.5-pyhd3eb1b0_0.conda#0440b84dfd478f340cf14c2d7c24f6c7
+https://repo.anaconda.com/pkgs/main/noarch/testpath-0.5.0-pyhd3eb1b0_0.conda#bd2a5c664c982e8637ae17b1662bd9a4
+https://repo.anaconda.com/pkgs/main/noarch/threadpoolctl-2.2.0-pyh0d69192_0.conda#bbfdbae4934150b902f97daaf287efe2
+https://repo.anaconda.com/pkgs/main/noarch/toml-0.10.2-pyhd3eb1b0_0.conda#cda05f5f6d8509529d1a2743288d197a
+https://repo.anaconda.com/pkgs/main/noarch/toolz-0.11.2-pyhd3eb1b0_0.conda#9fedc09c1ff4c9bc22695093c1ecd335
+https://repo.anaconda.com/pkgs/main/linux-64/tornado-6.1-py38h27cfd23_0.conda#d2d3043f631807af72b0fde504baf625
+https://repo.anaconda.com/pkgs/main/noarch/tqdm-4.62.2-pyhd3eb1b0_1.conda#9e0c24d3f7c51fbd42a2ebeb50b5c0fa
+https://repo.anaconda.com/pkgs/main/noarch/traitlets-5.1.1-pyhd3eb1b0_0.conda#675f60e84f695e63749b09f9ed464eda
+https://repo.anaconda.com/pkgs/main/noarch/typing_extensions-4.1.1-pyh06a4308_0.conda#8d4303f11560fe9621c962e87cf64d27
+https://repo.anaconda.com/pkgs/main/noarch/wcwidth-0.2.5-pyhd3eb1b0_0.conda#ffa649340272c3f6466ba01da254c3b0
+https://repo.anaconda.com/pkgs/main/linux-64/webencodings-0.5.1-py38_1.conda#5c9a80af48919815917612e58474a391
+https://repo.anaconda.com/pkgs/main/noarch/wheel-0.37.1-pyhd3eb1b0_0.conda#ab85e96e26da8d5797c2458232338b86
+https://repo.anaconda.com/pkgs/main/linux-64/zipp-3.8.0-py38h06a4308_0.conda#256d0b94f739fe6c4bd23670dc30da68
+https://repo.anaconda.com/pkgs/main/linux-64/anyio-3.5.0-py38h06a4308_0.conda#85440472f52d5b17de2ca8d27d41d21d
+https://repo.anaconda.com/pkgs/main/noarch/asttokens-2.0.5-pyhd3eb1b0_0.conda#140486e2ce4f3931b44aa5f7ff8d88da
+https://repo.anaconda.com/pkgs/main/noarch/babel-2.9.1-pyhd3eb1b0_0.conda#61575e8b70e18ebc54e65da5e441b861
+https://conda.anaconda.org/conda-forge/linux-64/blessed-1.19.1-py38h578d9bd_1.tar.bz2#fd18c5d7ca06563889b1da307eaca453
+https://repo.anaconda.com/pkgs/main/linux-64/cffi-1.15.0-py38hd667e15_1.conda#7b12fe728b28de7b8851af1eb1ba1d38
+https://repo.anaconda.com/pkgs/main/linux-64/cytoolz-0.11.0-py38h7b6447c_0.conda#674167bf0c35566f9fb70653cfc196c0
+https://repo.anaconda.com/pkgs/main/noarch/fasteners-0.16.3-pyhd3eb1b0_0.conda#335fdb99580fb176808d42ccd3c332e1
+https://repo.anaconda.com/pkgs/main/noarch/fonttools-4.25.0-pyhd3eb1b0_0.conda#bb9c5b5a6d892fca5efe4bf0203b6a48
+https://repo.anaconda.com/pkgs/main/linux-64/importlib-metadata-4.11.3-py38h06a4308_0.conda#423bacee14c9d4174efc8cd379f6c2dc
+https://repo.anaconda.com/pkgs/main/noarch/importlib_resources-5.2.0-pyhd3eb1b0_1.conda#3e7caf9dbd3b4771e9b951ffc7cdad80
+https://repo.anaconda.com/pkgs/main/linux-64/jedi-0.18.1-py38h06a4308_1.conda#d3629035a1f2c61553814cc20fa2ec11
+https://repo.anaconda.com/pkgs/main/noarch/jinja2-3.0.3-pyhd3eb1b0_0.conda#a5b0429ead9704cd1ad0b044c97e728f
+https://repo.anaconda.com/pkgs/main/linux-64/jupyter_core-4.10.0-py38h06a4308_0.conda#dbb24641c111c02f0620a3c339517d74
+https://repo.anaconda.com/pkgs/main/noarch/jupyterlab_pygments-0.1.2-py_0.conda#af46aff4922ca45df6ba19b313df6070
+https://repo.anaconda.com/pkgs/main/noarch/matplotlib-inline-0.1.2-pyhd3eb1b0_2.conda#47e865f8b884de7c5d516349e83457a7
+https://repo.anaconda.com/pkgs/main/linux-64/mkl-service-2.4.0-py38h7f8727e_0.conda#d44a183b4c54b461a8fa24805c53179a
+https://repo.anaconda.com/pkgs/main/noarch/packaging-21.3-pyhd3eb1b0_0.conda#07bbfbb961db7fa329cc42716943ea62
+https://repo.anaconda.com/pkgs/main/noarch/partd-1.2.0-pyhd3eb1b0_1.conda#d02d8b6ea30c680d3fafe4ac50cc4b18
+https://repo.anaconda.com/pkgs/main/noarch/pexpect-4.8.0-pyhd3eb1b0_3.conda#765b2562d6cdd14bb6d44fc170a04331
+https://repo.anaconda.com/pkgs/main/noarch/prompt-toolkit-3.0.20-pyhd3eb1b0_0.conda#19fa1fa6a03645e39e7dce3bdbe9d72f
+https://repo.anaconda.com/pkgs/main/linux-64/pyqt-5.9.2-py38h05f1152_4.conda#d3e6b8e1a634125414f87f231c755dae
+https://repo.anaconda.com/pkgs/main/noarch/python-dateutil-2.8.2-pyhd3eb1b0_0.conda#211ee00320b08a1ac9fea6677649f6c9
+https://repo.anaconda.com/pkgs/main/linux-64/setuptools-61.2.0-py38h06a4308_0.conda#c5e94bc1e8e443ae6d53b191a351591e
+https://repo.anaconda.com/pkgs/main/linux-64/snakeviz-2.0.1-py38h06a4308_0.conda#106357aab51b20535ac3a59a48035875
+https://repo.anaconda.com/pkgs/main/linux-64/terminado-0.13.1-py38h06a4308_0.conda#3fc07a4bdf64fbf00766564f1aaa5618
+https://repo.anaconda.com/pkgs/main/noarch/typing-extensions-4.1.1-hd3eb1b0_0.conda#0b535dfd0618653dd772c78c9c2b56a8
+https://repo.anaconda.com/pkgs/main/linux-64/virtualenv-20.4.6-py38h06a4308_1.conda#41648f5a5f164a1dd02d394600a67200
+https://repo.anaconda.com/pkgs/main/linux-64/websocket-client-0.58.0-py38h06a4308_4.conda#f717b8646e57dfa782479480407f6647
+https://repo.anaconda.com/pkgs/main/linux-64/argon2-cffi-bindings-21.2.0-py38h7f8727e_0.conda#933eaa8ad180b8fb3b0e2ca36fb6954e
+https://repo.anaconda.com/pkgs/main/noarch/bleach-4.1.0-pyhd3eb1b0_0.conda#256eb7e384e35f993ef8ccd6c4f45e58
+https://repo.anaconda.com/pkgs/main/linux-64/brotlipy-0.7.0-py38h27cfd23_1003.conda#e881c8ee8a4048f29da5d20f0330fe37
+https://repo.anaconda.com/pkgs/main/linux-64/cryptography-37.0.1-py38h9ce1e76_0.conda#16d301ed789096eb9881a25ed7a1155e
+https://repo.anaconda.com/pkgs/main/linux-64/dask-core-2022.5.0-py38h06a4308_0.conda#3d9e2917bed5f51aeb2ea2ea70f8e552
+https://conda.anaconda.org/conda-forge/noarch/enlighten-1.10.1-pyhd8ed1ab_0.tar.bz2#f5c404e6c73888f69932895043ea5938
+https://repo.anaconda.com/pkgs/main/noarch/flake8-3.9.2-pyhd3eb1b0_0.conda#04cb15847ce1ae281bac8eb5d67da440
+https://repo.anaconda.com/pkgs/main/linux-64/jsonschema-4.4.0-py38h06a4308_0.conda#877ad2da45101c06e0e779df2abcaf30
+https://repo.anaconda.com/pkgs/main/linux-64/jupyter_client-7.2.2-py38h06a4308_0.conda#6252e586fe1056d82d0cdeac2f46fec1
+https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.6.0-pyhd8ed1ab_0.tar.bz2#0941325bf48969e2b3b19d0951740950
+https://repo.anaconda.com/pkgs/main/linux-64/numpy-base-1.20.1-py38h7d8b39e_0.conda#b790b4a82a538017f30fd4ca9c0e2301
+https://repo.anaconda.com/pkgs/main/linux-64/pip-21.2.4-py38h06a4308_0.conda#4cc887ba625309097ff29d8ce96329d9
+https://repo.anaconda.com/pkgs/main/linux-64/pytest-6.2.4-py38h06a4308_2.conda#62967f3cfdf22d8f7fc89826cc72f312
+https://repo.anaconda.com/pkgs/main/noarch/stack_data-0.2.0-pyhd3eb1b0_0.conda#6212968e73726f6da42e5ffcd2bea92d
+https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py38h43d8883_2.tar.bz2#3f6ce81c7d28563fe2af763d9ff43e62
+https://repo.anaconda.com/pkgs/main/noarch/argon2-cffi-21.3.0-pyhd3eb1b0_0.conda#f00b851bc61b4c313903d31c7daecb09
+https://conda.anaconda.org/conda-forge/noarch/identify-2.5.1-pyhd8ed1ab_0.tar.bz2#6f41e3056fcd3061fbc2b49b3309fe0c
+https://repo.anaconda.com/pkgs/main/linux-64/ipython-8.3.0-py38h06a4308_0.conda#770f26af389c5103c26addcae7fe755b
+https://repo.anaconda.com/pkgs/main/linux-64/nbformat-5.3.0-py38h06a4308_0.conda#a9c19f2c0244559b292acc2ce458e871
+https://repo.anaconda.com/pkgs/main/noarch/pyopenssl-22.0.0-pyhd3eb1b0_0.conda#1dbbf9422269cd62c7094960d9b43f36
+https://repo.anaconda.com/pkgs/main/linux-64/ipykernel-6.9.1-py38h06a4308_0.conda#64a5bfbc32f1e68b0eda430a1e3e2c11
+https://repo.anaconda.com/pkgs/main/linux-64/nbclient-0.5.13-py38h06a4308_0.conda#a0606d4de8c491a8381e72f7925000ca
+https://conda.anaconda.org/conda-forge/linux-64/pre-commit-2.15.0-py38h578d9bd_1.tar.bz2#82341297a478aaca561a2db20b037aab
+https://repo.anaconda.com/pkgs/main/linux-64/urllib3-1.26.9-py38h06a4308_0.conda#40c1c6f5e634ec77344a822ab3aa84cc
+https://repo.anaconda.com/pkgs/main/linux-64/nbconvert-6.1.0-py38h06a4308_0.conda#17bbbaa994689cb955db812d4f8f594d
+https://repo.anaconda.com/pkgs/main/noarch/requests-2.27.1-pyhd3eb1b0_0.conda#9b593f86737e69140c47c2107ecf277c
+https://repo.anaconda.com/pkgs/main/noarch/jupyter_server-1.13.5-pyhd3eb1b0_0.conda#303eb09f873fde3c13abaaed542d54e0
+https://repo.anaconda.com/pkgs/main/linux-64/notebook-6.4.11-py38h06a4308_0.conda#2e58e04bd54880bea66b833c68f5f8c0
+https://repo.anaconda.com/pkgs/main/noarch/sphinx-4.2.0-pyhd3eb1b0_1.conda#8f65a307ecef80b3afd979777cc5b549
+https://repo.anaconda.com/pkgs/main/linux-64/jupyterlab_server-2.12.0-py38h06a4308_0.conda#ae60c37f4ef571584563c5cbd6ac9466
+https://repo.anaconda.com/pkgs/main/noarch/nbclassic-0.3.5-pyhd3eb1b0_0.conda#22683be353228acd015cae8a4676b462
+https://repo.anaconda.com/pkgs/main/noarch/jupyterlab-3.1.7-pyhd3eb1b0_0.conda#9292f2b7ad621d8a6d9a9a7f7338664d
+https://repo.anaconda.com/pkgs/main/linux-64/bottleneck-1.3.4-py38hce1f21e_0.conda#9ca7b8cff6a7f97cd2395f20dd46bc90
+https://conda.anaconda.org/conda-forge/linux-64/h5py-2.10.0-mpi_mpich_py38haaae0f6_2.tar.bz2#d1d5e76f488eea5e325ec02c8375cc3d
+https://conda.anaconda.org/conda-forge/linux-64/hdf5plugin-2.3.0-py38h5235d98_0.tar.bz2#32d5f10a85464bc063594b91d1fd2b12
+https://repo.anaconda.com/pkgs/main/linux-64/imagecodecs-2021.8.26-py38h4cda21f_0.conda#4cfb229b623dd26c7cca3bfa18638a98
+https://repo.anaconda.com/pkgs/main/noarch/imageio-2.9.0-pyhd3eb1b0_0.conda#4f1d37bdc3afdb2d237fd9b6b920ec3d
+https://repo.anaconda.com/pkgs/main/linux-64/matplotlib-3.5.1-py38h06a4308_1.conda#d7a15b57f9ba4a2bad00be3fbda25206
+https://repo.anaconda.com/pkgs/main/linux-64/matplotlib-base-3.5.1-py38ha18d171_1.conda#61037444fe5aef294f20fadac4204392
+https://repo.anaconda.com/pkgs/main/linux-64/mkl_fft-1.3.0-py38h42c9631_2.conda#466c36695155a88248c4cde9871309e2
+https://repo.anaconda.com/pkgs/main/linux-64/mkl_random-1.2.2-py38h51133e4_0.conda#1e3b7251e474e099a4bfd672fdca17d8
+https://repo.anaconda.com/pkgs/main/linux-64/numpy-1.20.1-py38h93e21f0_0.conda#e38538afb376c0f772ce8f09db54a6ae
+https://repo.anaconda.com/pkgs/main/linux-64/numba-0.53.1-py38ha9443f7_0.conda#8660e6f10449493daebc4ecd0a744ace
+https://repo.anaconda.com/pkgs/main/linux-64/numcodecs-0.8.0-py38h2531618_0.conda#1f60b0f58011d77643f2fafb9c764d1c
+https://repo.anaconda.com/pkgs/main/linux-64/numexpr-2.7.3-py38h22e1b3c_1.conda#fdd4dd744e6409461fbc9543e30da04f
+https://repo.anaconda.com/pkgs/main/noarch/opt_einsum-3.3.0-pyhd3eb1b0_1.conda#53205b8b5762c06f85b6bb7abd4f496e
+https://repo.anaconda.com/pkgs/main/linux-64/pywavelets-1.3.0-py38h7f8727e_0.conda#e82c8fa9f3829ecd054bf24a35932d1d
+https://repo.anaconda.com/pkgs/main/linux-64/scipy-1.6.2-py38had2a1c9_1.conda#e6c2e8a210cbd7d6a361d3bf6c70093a
+https://repo.anaconda.com/pkgs/main/noarch/tifffile-2021.7.2-pyhd3eb1b0_2.conda#5a265e3b9694c13bcfb8c40a3b8e3d8f
+https://repo.anaconda.com/pkgs/main/linux-64/pandas-1.3.3-py38h8c16a72_0.conda#f32e088c2b8e54889f2e0580a92fb73e
+https://conda.anaconda.org/conda-forge/linux-64/pytables-3.6.1-py38h9f153d1_1.tar.bz2#2b8a8a6f30674b3272b4755b21030c02
+https://repo.anaconda.com/pkgs/main/linux-64/scikit-image-0.19.2-py38h51133e4_0.conda#cbe42091efd07c1dbdd5f14031a03ea7
+https://repo.anaconda.com/pkgs/main/linux-64/scikit-learn-0.24.2-py38ha9443f7_0.conda#0afaea75ccd4254c7683c0813579c9f4
+https://repo.anaconda.com/pkgs/main/noarch/zarr-2.8.1-pyhd3eb1b0_0.conda#7df763b90dcefae1c6039911fc72b694
+https://repo.anaconda.com/pkgs/main/noarch/seaborn-0.11.2-pyhd3eb1b0_0.conda#36b64fb4e3b76ded59d6388c9582de69
+# pip click @ https://files.pythonhosted.org/packages/d2/3d/fa76db83bf75c4f8d338c2fd15c8d33fdd7ad23a9b5e57eb6c5de26b430e/click-7.1.2-py2.py3-none-any.whl#md5=None
+# pip colorlog @ https://files.pythonhosted.org/packages/51/62/61449c6bb74c2a3953c415b2cdb488e4f0518ac67b35e2b03a6d543035ca/colorlog-4.8.0-py2.py3-none-any.whl#md5=None
+# pip fastjsonschema @ https://files.pythonhosted.org/packages/e6/0b/24795939622d60f4b453aa7040f23c6a6f8b44c7c026c3b42d9842e6cc31/fastjsonschema-2.15.3-py3-none-any.whl#md5=None
+# pip future @ https://files.pythonhosted.org/packages/45/0b/38b06fd9b92dc2b68d58b75f900e97884c45bedd2ff83203d933cf5851c9/future-0.18.2.tar.gz#md5=None
+# pip lazy-object-proxy @ https://files.pythonhosted.org/packages/45/9f/405023669e74d96d3c221832fdea58fdd4a6faaef569146c34bf4072813e/lazy_object_proxy-1.7.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl#md5=None
+# pip progress @ https://files.pythonhosted.org/packages/e9/ff/7871f3736dc6707435b2a2f217c46b5a5bc6ea7e0a9a443cd69146a1afd1/progress-1.4.tar.gz#md5=None
+# pip smmap @ https://files.pythonhosted.org/packages/6d/01/7caa71608bc29952ae09b0be63a539e50d2484bc37747797a66a60679856/smmap-5.0.0-py3-none-any.whl#md5=None
+# pip tabulate @ https://files.pythonhosted.org/packages/ca/80/7c0cad11bd99985cfe7c09427ee0b4f9bd6b048bd13d4ffb32c6db237dfb/tabulate-0.8.9-py3-none-any.whl#md5=None
+# pip unidecode @ https://files.pythonhosted.org/packages/f9/5b/7603add7f192252916b85927263b598c74585f82389e6e42318a6278159b/Unidecode-1.3.4-py3-none-any.whl#md5=None
+# pip wrapt @ https://files.pythonhosted.org/packages/36/ee/944dc7e5462662270e8a379755bcc543fc8f09029866288060dc163ed5b4/wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl#md5=None
+# pip astroid @ https://files.pythonhosted.org/packages/94/58/6f1bbfd88b6ba5271b4a9be99cb15cb2fe369794ba410390f0d672c6ad39/astroid-2.11.5-py3-none-any.whl#md5=None
+# pip docstr-coverage @ https://files.pythonhosted.org/packages/ef/97/80f5de5ab716ece99fec79ce1ae51821ef4fcd6ccd64902b4481991fbba4/docstr_coverage-2.1.1-py3-none-any.whl#md5=None
+# pip flake8-polyfill @ https://files.pythonhosted.org/packages/86/b5/a43fed6fd0193585d17d6faa7b85317d4461f694aaed546098c69f856579/flake8_polyfill-1.0.2-py2.py3-none-any.whl#md5=None
+# pip genbadge @ https://files.pythonhosted.org/packages/20/b8/61d32e888fdcced280813ec871c50c6d0ef17fc266fe56d600fd77201566/genbadge-1.0.6-py2.py3-none-any.whl#md5=None
+# pip gitdb @ https://files.pythonhosted.org/packages/a3/7c/5d747655049bfbf75b5fcec57c8115896cb78d6fafa84f6d3ef4c0f13a98/gitdb-4.0.9-py3-none-any.whl#md5=None
+# pip mando @ https://files.pythonhosted.org/packages/e6/cc/f6e25247c1493a654785e68cd975e479c311e99dafedd49ed17f8d300e0c/mando-0.6.4-py2.py3-none-any.whl#md5=None
+# pip retrying @ https://files.pythonhosted.org/packages/44/ef/beae4b4ef80902f22e3af073397f079c96969c69b2c7d52a57ea9ae61c9d/retrying-1.3.3.tar.gz#md5=None
+# pip gitpython @ https://files.pythonhosted.org/packages/83/32/ce68915670da6fd6b1e3fb4b3554b4462512f6441dddd194fc0f4f6ec653/GitPython-3.1.27-py3-none-any.whl#md5=None
+# pip plotly @ https://files.pythonhosted.org/packages/1f/f6/bd3c17c8003b6641df1228e80e1acac97ed8402635e46c2571f8e1ef63af/plotly-4.14.3-py2.py3-none-any.whl#md5=None
+# pip radon @ https://files.pythonhosted.org/packages/cf/fe/c400dbbbbde6649ad0164ef2ffef3672baefc62ecb676f58d0f25d8f83b0/radon-4.0.0-py2.py3-none-any.whl#md5=None
+# pip sphinx-autoapi @ https://files.pythonhosted.org/packages/5e/67/249380ade22a7efaa8a335f45a9b87f2fdda499c9fdc53913096dec5d1fe/sphinx_autoapi-1.8.4-py2.py3-none-any.whl#md5=None
+# pip wily @ https://files.pythonhosted.org/packages/e7/2c/53638ade80511eee70c29bcc52e90ca017836feecba1762c935112249aea/wily-1.20.0-py3-none-any.whl#md5=None
diff --git a/conda-lock.yml b/conda-lock.yml
new file mode 100644
index 0000000000000000000000000000000000000000..05d7917b213b681fbeea56fa09b4994c87a4fa86
--- /dev/null
+++ b/conda-lock.yml
@@ -0,0 +1,7096 @@
+# This lock file was generated by conda-lock (https://github.com/conda-incubator/conda-lock). DO NOT EDIT!
+#
+# A "lock file" contains a concrete list of package versions (with checksums) to be installed. Unlike
+# e.g. `conda env create`, the resulting environment will not change as new package versions become
+# available, unless you explicitly update the lock file.
+#
+# Install this environment as "YOURENV" with:
+#     conda-lock install -n YOURENV --file conda-lock.yml
+# To update a single package to the latest version compatible with the version constraints in the source:
+#     conda-lock lock --lockfile conda-lock.yml --update PACKAGE
+# To re-solve the entire environment, e.g. after changing a version constraint in the source file:
+#     conda-lock -f environment.yml --lockfile conda-lock.yml
+metadata:
+  channels:
+  - url: defaults
+    used_env_vars: []
+  - url: conda-forge
+    used_env_vars: []
+  - url: anaconda
+    used_env_vars: []
+  content_hash:
+    linux-64: 11e751d2f7755f93aa218d92351d70ac61426688b2aa488cc33e570c2ddb5cc8
+    osx-64: b5d1f81dc1316eb7babb5d0e38665c10fdfd704a7ebe538ada1201badd2269fc
+  platforms:
+  - osx-64
+  - linux-64
+  sources:
+  - environment.yml
+package:
+- category: main
+  dependencies: {}
+  hash:
+    md5: c3473ff8bdb3d124ed5ff11ec380d6f9
+    sha256: 476626712f60e5ef0fe04c354727152b1ee5285d57ccd3575c7be930122bd051
+  manager: conda
+  name: _libgcc_mutex
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/_libgcc_mutex-0.1-main.conda
+  version: '0.1'
+- category: main
+  dependencies: {}
+  hash:
+    md5: 9a7a051e9bd41da46523acb017d8a517
+    sha256: 05f806e9b610f80e15f2c8d7b4e9a7a2817ed4870b85427acb2a89a1db44ec0e
+  manager: conda
+  name: blas
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/blas-1.0-mkl.conda
+  version: '1.0'
+- category: main
+  dependencies: {}
+  hash:
+    md5: fc9c0bf2e7893f5407ff74289dbcf295
+    sha256: cd75d93dfee17a55d1a05fa2a791432649cf6e7b6ee240c7707310ec75e70880
+  manager: conda
+  name: ca-certificates
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/ca-certificates-2022.4.26-h06a4308_0.conda
+  version: 2022.4.26
+- category: main
+  dependencies: {}
+  hash:
+    md5: 15d9e331499432d8e22935c1d3b5d2a1
+    sha256: 6a2d668a0a33fddbf047f8ef55c29ba5f803ef2c6fed923b7c753a836f4ce002
+  manager: conda
+  name: intel-openmp
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/intel-openmp-2021.4.0-h06a4308_3561.conda
+  version: 2021.4.0
+- category: main
+  dependencies: {}
+  hash:
+    md5: 68eedfd9c06f2b0e6888d8db345b7f5b
+    sha256: 0c7a6f340f4a9e15cc99b3c7e11f07c7670a9b881161739edd77753e5530fe31
+  manager: conda
+  name: ld_impl_linux-64
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/ld_impl_linux-64-2.38-h1181459_1.conda
+  version: '2.38'
+- category: main
+  dependencies: {}
+  hash:
+    md5: e3883581cbf0a98672250c3e80d292bf
+    sha256: 66d9ef6bbf134d7d39b39d79b48b7abccc042405880ab9c2a1799dabc78cf590
+  manager: conda
+  name: libgfortran4
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libgfortran4-7.5.0-ha8ba4b0_17.conda
+  version: 7.5.0
+- category: main
+  dependencies: {}
+  hash:
+    md5: ce541c2473bd2d56da84ec8f241a8574
+    sha256: 75b0814dd95da20035c99f2bf494e5f77135f0e2b01eacda3f7fa8ae80f2a36e
+  manager: conda
+  name: libstdcxx-ng
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libstdcxx-ng-11.2.0-h1234567_0.conda
+  version: 11.2.0
+- category: main
+  dependencies: {}
+  hash:
+    md5: 3310b0d36925b82ad3bd800758f4bf9f
+    sha256: 03438633485604de5e53d7ab47da533e85ce229099014dd7b20b1fe284d2937a
+  manager: conda
+  name: mpi
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/mpi-1.0-mpich.conda
+  version: '1.0'
+- category: main
+  dependencies:
+    __glibc: '>=2.17'
+    libgfortran4: 7.5.0.*
+  hash:
+    md5: ecb35c8952579d5c8dc56c6e076ba948
+    sha256: f962258281d978f7b0e8207304531a60860c2e54cf2f2e59ae380d22fe10f1d2
+  manager: conda
+  name: libgfortran-ng
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libgfortran-ng-7.5.0-ha8ba4b0_17.conda
+  version: 7.5.0
+- category: main
+  dependencies:
+    _libgcc_mutex: 0.1 main
+  hash:
+    md5: c8acb8d9aff1ead1b273ace299ca12d2
+    sha256: af5e140f4545d54aaa12c263879b0f9c0d6a30b5d1e7735d85a1eb70bc64d50a
+  manager: conda
+  name: libgomp
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libgomp-11.2.0-h1234567_0.conda
+  version: 11.2.0
+- category: main
+  dependencies:
+    intel-openmp: 2021.*
+  hash:
+    md5: 06c81ed0b0c637506b4b0305cf59d121
+    sha256: 19cd4c513867c2fb79962122a6fb38fd36b49b3d08402907a389f3ce14b17f9c
+  manager: conda
+  name: mkl
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/mkl-2021.2.0-h06a4308_296.conda
+  version: 2021.2.0
+- category: main
+  dependencies:
+    _libgcc_mutex: 0.1 main
+    libgomp: '>=7.5.0'
+  hash:
+    md5: 71d281e9c2192cb3fa425655a8defb85
+    sha256: 576011048d23f2e03372263493c5529f802286ff53e8426df99a5b11cc2572f3
+  manager: conda
+  name: _openmp_mutex
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/_openmp_mutex-5.1-1_gnu.conda
+  version: '5.1'
+- category: main
+  dependencies:
+    _libgcc_mutex: '* main'
+    _openmp_mutex: ''
+  hash:
+    md5: 83c045906d7d785252a34846348d16c6
+    sha256: 834dcb50b7574a072755541bc9c0264b214ae7a8ad6b63f3ce24a90bf079f004
+  manager: conda
+  name: libgcc-ng
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libgcc-ng-11.2.0-h1234567_0.conda
+  version: 11.2.0
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+  hash:
+    md5: 6513a2c97bd29ec2978d163c7e1f7932
+    sha256: 9e936304be2da4ca78b208d52c09a3511a36c07ce2e2de7108f29da35bc36753
+  manager: conda
+  name: brotli
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/brotli-1.0.9-he6710b0_2.conda
+  version: 1.0.9
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+  hash:
+    md5: 9303f4af7c004e069bae22bde8d800ee
+    sha256: d3fa503351a53692866182a3819fb28d105e32c25d177bf6a9444c4153215e0d
+  manager: conda
+  name: bzip2
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/bzip2-1.0.8-h7b6447c_0.conda
+  version: 1.0.8
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+  hash:
+    md5: 677634b170f698ba548ca8d57aa3fd1a
+    sha256: b4fac6b42ba3e06457b70723e88f43d7a7905288e5f8e3226687b0b9bdfc62f4
+  manager: conda
+  name: c-ares
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/c-ares-1.18.1-h7f8727e_0.conda
+  version: 1.18.1
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+  hash:
+    md5: abcace262ab5673ba2b89c658b7bc846
+    sha256: f4ed7440c21b469e10b84a6bd5a3bc9a59147526e9b6ec06b51b9d92724db169
+  manager: conda
+  name: charls
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/charls-2.2.0-h2531618_0.conda
+  version: 2.2.0
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+  hash:
+    md5: f9930c60940181cf06d0bd0b8095063c
+    sha256: e5ee99a475f5bebbdc3fe5773cec890cbcdf8c28c08ccab088ed0d8c1d8bff54
+  manager: conda
+  name: expat
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/expat-2.4.4-h295c915_0.conda
+  version: 2.4.4
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+  hash:
+    md5: c2583ad8de5051f19479580c58336f15
+    sha256: 6026e9f1bd54bbff846d938a7e060121bf941a3d82ebddc88de01d7f0af7b6dd
+  manager: conda
+  name: giflib
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/giflib-5.2.1-h7b6447c_0.conda
+  version: 5.2.1
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+  hash:
+    md5: 48cc14d5ad1a9bcd8dac17211a8deb8b
+    sha256: fdf7a480fcb598d164cbd28d3ce015d7de52db30f76faf7a15fbb8bbc7b8c5af
+  manager: conda
+  name: icu
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/icu-58.2-he6710b0_3.conda
+  version: '58.2'
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+  hash:
+    md5: a0571bd2254b360aef526307a17f3526
+    sha256: 4a1bc1d3d77df3a4dfc9a6de450e304e7bb2bc7e5a37140352b7497474fbcfe6
+  manager: conda
+  name: jpeg
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/jpeg-9e-h7f8727e_0.conda
+  version: 9e
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+  hash:
+    md5: 3cc305f3788177c8ea28088590ab75a1
+    sha256: 26d8bdd124ba25a45425c18b41d9765db810deba870d55082104e011a4c6d7be
+  manager: conda
+  name: jxrlib
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/jxrlib-1.1-h7b6447c_2.conda
+  version: '1.1'
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+  hash:
+    md5: b97309770412f10bed8d9448f6f98f87
+    sha256: e9b84d986a46d22c6a1713b267f98ed3adbd486e423d2be74a3b42416e5b3e60
+  manager: conda
+  name: lerc
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/lerc-3.0-h295c915_0.conda
+  version: '3.0'
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+  hash:
+    md5: 95e3b23fe7c0108bce3b6826749bb94d
+    sha256: f3c5042e35d675d1bec54e14d5f6c45fd4147446f8486dfc897daf603c81c8e2
+  manager: conda
+  name: libaec
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libaec-1.0.4-he6710b0_1.conda
+  version: 1.0.4
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+  hash:
+    md5: 6942d65edab9a800900f43e750b3ad1f
+    sha256: f0a3d0a69ee162bba747e81b0770d3a14c0551600c41d607ac7b3e08e0a3bff4
+  manager: conda
+  name: libdeflate
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libdeflate-1.8-h7f8727e_5.conda
+  version: '1.8'
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+  hash:
+    md5: 5065620db4393fb549f30114a33897d1
+    sha256: 75f04cf201848d58df127caf9f316f71e1103b28e00b5add9b0c8025e52d7569
+  manager: conda
+  name: libev
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libev-4.33-h7f8727e_1.conda
+  version: '4.33'
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+  hash:
+    md5: 88a54b8f50e351c650e16f4ee781440c
+    sha256: 2cef7c80db19e83a38b6e02110f0e9828d3ef4045c38791d5b01006c44529093
+  manager: conda
+  name: libffi
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libffi-3.3-he6710b0_2.conda
+  version: '3.3'
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+  hash:
+    md5: c8783b20f0e14bc1d701352c26c264d5
+    sha256: be446d634c6f366fce72b77c7a3b2709f2fd29dd8eedab05df253e99ce08650a
+  manager: conda
+  name: libsodium
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libsodium-1.0.18-h7b6447c_0.conda
+  version: 1.0.18
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+  hash:
+    md5: 6c4c9e96bfa4744d4839b9ed128e1114
+    sha256: 78e174d87231225fd813bd13b281cf249e59d21cc32fb7717473c4f65163af1b
+  manager: conda
+  name: libuuid
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libuuid-1.0.3-h7f8727e_2.conda
+  version: 1.0.3
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+  hash:
+    md5: 162451b4884cfc7db8400580c711e83a
+    sha256: 200b4ae8d83efca7dda8a3855a380a602509a031678af5cf08b7aa3c62d56c08
+  manager: conda
+  name: libwebp-base
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libwebp-base-1.2.2-h7f8727e_0.conda
+  version: 1.2.2
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+  hash:
+    md5: ada518dcadd6aaee9aae47ba9a671553
+    sha256: 9feb197125e4803d702cfdf10116c856e381b59f845b3346f50cb10e9bb8c643
+  manager: conda
+  name: libxcb
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libxcb-1.15-h7f8727e_0.conda
+  version: '1.15'
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+  hash:
+    md5: 8671895f71c9046f38b814f8662226f5
+    sha256: b7d6dee99369d2e602bb0901e7d468db3d6567a4af3666b75213ef268c560d39
+  manager: conda
+  name: libzopfli
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libzopfli-1.0.3-he6710b0_0.conda
+  version: 1.0.3
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+  hash:
+    md5: d9bd18f73ff566e08add10a54a3463cf
+    sha256: e37c0efa15afddeaa329a76add1c657787efba334002b31449a3996175215330
+  manager: conda
+  name: lz4-c
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/lz4-c-1.9.3-h295c915_1.conda
+  version: 1.9.3
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+  hash:
+    md5: 65722a7644f424de73fea6e87edd7653
+    sha256: 99dc28dd81f7d94a363a242e56fb9ebdaf350ca6445a8f1b31dfe78be474a5df
+  manager: conda
+  name: lzo
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/lzo-2.10-h7b6447c_2.conda
+  version: '2.10'
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    libgfortran-ng: '>=7,<8.0a0'
+    libstdcxx-ng: '>=7.3.0'
+    mpi: 1.0 mpich
+  hash:
+    md5: 3ca2b609c8caabd6ebe6122f60069fda
+    sha256: 62adbd580e9b7956d5292abb3afacaff8fe8714a2e93c49fd0efacbcfea2f4c7
+  manager: conda
+  name: mpich
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/mpich-3.3.2-hc856adb_0.conda
+  version: 3.3.2
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+  hash:
+    md5: 4edf660a09cc7adcb21120464b2a1783
+    sha256: c1a4de3a57e4ed4f2a02b71dc722a0dde18fda80838a7d4aaeaefd834f8fbea2
+  manager: conda
+  name: ncurses
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/ncurses-6.3-h7f8727e_2.conda
+  version: '6.3'
+- category: main
+  dependencies:
+    ca-certificates: ''
+    libgcc-ng: '>=7.5.0'
+  hash:
+    md5: dff07c1e2347fed6e5a3afbbcd5bddcc
+    sha256: ad0413946d9dd5e56a99f01a241a9125462f6c23ebc4f11550191fefeac0d936
+  manager: conda
+  name: openssl
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/openssl-1.1.1o-h7f8727e_0.conda
+  version: 1.1.1o
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+  hash:
+    md5: b32ccc24d1d9808618c1e898da60f68d
+    sha256: 2bf0144c02007ade42d6a5305987974cc751c7f2e2107d6f88c23d43ce354d5a
+  manager: conda
+  name: pcre
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/pcre-8.45-h295c915_0.conda
+  version: '8.45'
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+  hash:
+    md5: 807eab3ad2fe697e8013dd6b9ea830f2
+    sha256: d073a1e9f4cb5d86aae341338aee24b5dd4e7ea658892f28dfab4c05766aa6d2
+  manager: conda
+  name: snappy
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/snappy-1.1.9-h295c915_0.conda
+  version: 1.1.9
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+  hash:
+    md5: 7d06fdc8b4f3e389f26f67311c7ccf5f
+    sha256: a8985c2c4dd9d08f335eeb2d8de3326b27c68d48d1c8d02025b7f71482f70497
+  manager: conda
+  name: tbb
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/tbb-2020.3-hfd86e86_0.conda
+  version: '2020.3'
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+  hash:
+    md5: 5d01fcf310bf465237f6b95a019d73bc
+    sha256: 3aebdee7910426ecbecd5e67c3759dfb68d21c9d24c0f093ccbdda42cc23b23c
+  manager: conda
+  name: xz
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/xz-5.2.5-h7f8727e_1.conda
+  version: 5.2.5
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+  hash:
+    md5: 39fdbf4db769e494ffb06d95680c83d8
+    sha256: e22753e19432d606139f7a604757839d265dff93345226ba0732676526870e28
+  manager: conda
+  name: yaml
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/yaml-0.2.5-h7b6447c_0.conda
+  version: 0.2.5
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+  hash:
+    md5: a20971c5ed1ae5a1ebb442b75edb48df
+    sha256: a4487c46599640c69bda69747fdc110c49799c15d56c3e421401d2e17821b783
+  manager: conda
+  name: zfp
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/zfp-0.5.5-h295c915_6.conda
+  version: 0.5.5
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+  hash:
+    md5: 4f4080e9939f082332cd8be7fedad087
+    sha256: d4aa8bdf122d3a53845db459d343c8b21ba51907d3c8dc5d2df3ff22dc98c6ca
+  manager: conda
+  name: zlib
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/zlib-1.2.12-h7f8727e_2.conda
+  version: 1.2.12
+- category: main
+  dependencies:
+    brotli: '>=1.0.9,<2.0a0'
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+  hash:
+    md5: 6777d1b10f8e02143f9708699f7ab354
+    sha256: 931b2dfe51265fa8c1d240a91fd4b763302ec3a550e7ff0b0ab5ed68ff17b6c7
+  manager: conda
+  name: brunsli
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/brunsli-0.1-h2531618_0.conda
+  version: '0.1'
+- category: main
+  dependencies:
+    libffi: '>=3.3,<3.4.0a0'
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+    pcre: '>=8.45,<9.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: 4c3eae7c0b8b1c8fb3046a0740313bbf
+    sha256: ff780e0972134a6395d7e17cd74094a275c11d67538a99fc0cac31b8e09f266b
+  manager: conda
+  name: glib
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/glib-2.69.1-h4ff587b_1.conda
+  version: 2.69.1
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    ncurses: '>=6.3,<7.0a0'
+  hash:
+    md5: cf16006f8f24e4224ddce196471d2509
+    sha256: 6bfecab0a81cd806c353cfe60bc7528ed93f047e6355476d0db131b9d1e1ba31
+  manager: conda
+  name: libedit
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libedit-3.1.20210910-h7f8727e_0.conda
+  version: 3.1.20210910
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: 198e840fc17a5bff7f1ee543ee1981b2
+    sha256: 58e06c97453d1dd486d35274fa5c60eb2a666c9b99df4b3594f4758ac1eda984
+  manager: conda
+  name: libllvm10
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libllvm10-10.0.1-hbcb73fb_5.conda
+  version: 10.0.1
+- category: main
+  dependencies:
+    c-ares: '>=1.7.5'
+    libev: '>=4.33,<4.34.0a0'
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+    openssl: '>=1.1.1l,<1.1.2a'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: bf09e3bce5afd6b281f6e3722c4b7f7b
+    sha256: 026a81c38532e2009b45343002303115c6af4d1ad830f66abdf392796a26f3d7
+  manager: conda
+  name: libnghttp2
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libnghttp2-1.46.0-hce63b2e_0.conda
+  version: 1.46.0
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: 689f903925dcf6c5ab7bc1de0f58b67b
+    sha256: 4f99638c8da0abe841c2b4bc337839f2a01363d40cfc334c0084e79dc0fd0a0d
+  manager: conda
+  name: libpng
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libpng-1.6.37-hbc83047_0.conda
+  version: 1.6.37
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    openssl: '>=1.1.1n,<1.1.2a'
+  hash:
+    md5: dede0f0061d9891642f640c2c4ea442e
+    sha256: d462b8a03cfa2599b8d8711c63109997ee555b6db63a411d7632c1b087229f40
+  manager: conda
+  name: libssh2
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libssh2-1.10.0-h8f2d780_0.conda
+  version: 1.10.0
+- category: main
+  dependencies:
+    icu: '>=58.2,<59.0a0'
+    libgcc-ng: '>=7.5.0'
+    xz: '>=5.2.5,<6.0a0'
+    zlib: '>=1.2.12,<1.3.0a0'
+  hash:
+    md5: eff5ba91c84a8329c2a1117bee13cd68
+    sha256: f09c6452b6bd81cf0d7ea05325a7c5aaaa99a10ec12b8be7a68982e3e95dfd1b
+  manager: conda
+  name: libxml2
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libxml2-2.9.12-h74e7548_2.conda
+  version: 2.9.12
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    ncurses: '>=6.3,<7.0a0'
+  hash:
+    md5: ea33f478fea12406f394944e7e4f3d20
+    sha256: ab74641903ecb92f45c392cffe5bbb712208854c6ea20ab86b65dd4dfc1b1beb
+  manager: conda
+  name: readline
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/readline-8.1.2-h7f8727e_1.conda
+  version: 8.1.2
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    zlib: '>=1.2.12,<1.3.0a0'
+  hash:
+    md5: 5d7d7abe559370a7a8519177929dd338
+    sha256: 7b3e90bdd6be37143e32047c0a2f22cd1fee609c3802c5f0affe9d469817a7e8
+  manager: conda
+  name: tk
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/tk-8.6.11-h1ccaba5_1.conda
+  version: 8.6.11
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    libsodium: '>=1.0.18,<1.0.19.0a0'
+    libstdcxx-ng: '>=7.3.0'
+  hash:
+    md5: 45ce422428d9c98f354aec4b5cbd1046
+    sha256: 3d338592b4515c84764249b5a7be8ebfdd82d1e79953ceb0c23352bf2f6817b6
+  manager: conda
+  name: zeromq
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/zeromq-4.3.4-h2531618_0.conda
+  version: 4.3.4
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+    lz4-c: '>=1.9.3,<1.10.0a0'
+    xz: '>=5.2.5,<6.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: 2e81424da35919b0f552b9e5ba0a37ba
+    sha256: 5ebb3e77ad431e24226cdee8884fb39f511d36fae598d1ce1f50081545a046a3
+  manager: conda
+  name: zstd
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/zstd-1.4.9-haebb681_0.conda
+  version: 1.4.9
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+    lz4-c: '>=1.9.3,<1.10.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+    zstd: '>=1.4.5,<1.5.0a0'
+  hash:
+    md5: 34cd84f63d186aa8c921317683523a25
+    sha256: a176a2b7b1ea9f30c5e11925dddc151690173f38bcde1386433291291737056e
+  manager: conda
+  name: blosc
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/blosc-1.21.0-h8c45485_0.conda
+  version: 1.21.0
+- category: main
+  dependencies:
+    expat: '>=2.2.10,<3.0a0'
+    glib: ''
+    libgcc-ng: '>=7.3.0'
+  hash:
+    md5: 6a6a6f1391f807847404344489ef6cf4
+    sha256: 33cffa33d77d6bfa4e157322e188d82deb904efe1bc867d1ee08fcee9275b8ab
+  manager: conda
+  name: dbus
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/dbus-1.13.18-hb2f20db_0.conda
+  version: 1.13.18
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    libpng: '>=1.6.37,<1.7.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: b767874a6273e1058027cb2e300d00ac
+    sha256: cb29bcdeabea6bb96f2b6339f2ecdbeeb6d4683ff3ddb1b05a490138511419a2
+  manager: conda
+  name: freetype
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/freetype-2.11.0-h70c0345_0.conda
+  version: 2.11.0
+- category: main
+  dependencies:
+    glib: '>=2.66.1,<3.0a0'
+    libgcc-ng: '>=7.3.0'
+  hash:
+    md5: 6af5d0cbd7310e1cd8a6a5c1c99649b2
+    sha256: 254b4fe8b685fc293c250ca239d0be03e95f3d909972827fc472129ca2590d85
+  manager: conda
+  name: gstreamer
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/gstreamer-1.14.0-h28cd5cc_2.conda
+  version: 1.14.0
+- category: main
+  dependencies:
+    libedit: '>=3.1.20210216,<4.0a0'
+    libgcc-ng: '>=7.5.0'
+    openssl: '>=1.1.1k,<1.1.2a'
+  hash:
+    md5: 62a43976b48799377103390c340a3824
+    sha256: b8bddb4e3cb60400d5d43f020ff8c233f3fe963fe11876967de01f205d6a45e6
+  manager: conda
+  name: krb5
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/krb5-1.19.2-hac12032_0.conda
+  version: 1.19.2
+- category: main
+  dependencies:
+    jpeg: '>=9b,<10a'
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+    libwebp-base: ''
+    xz: '>=5.2.5,<6.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+    zstd: '>=1.4.5,<1.5.0a0'
+  hash:
+    md5: a70887f6e46ea21d5e4e27685bd59ff9
+    sha256: abef7c57f548dd2b9dac487db1a9ffaf949b9505e94af5566778ed8116babbe7
+  manager: conda
+  name: libtiff
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libtiff-4.2.0-h85742a9_0.conda
+  version: 4.2.0
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    readline: '>=8.0,<9.0a0'
+    zlib: '>=1.2.12,<1.3.0a0'
+  hash:
+    md5: 94e50b233f796aa4e0b7cf38611c0852
+    sha256: 2778a98881e694c4cc602f14da045feb20d4c3680776935e252732f9ef223852
+  manager: conda
+  name: sqlite
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/sqlite-3.38.3-hc218d9a_0.conda
+  version: 3.38.3
+- category: main
+  dependencies:
+    freetype: '>=2.10.4,<3.0a0'
+    libgcc-ng: '>=7.3.0'
+    libuuid: '>=1.0.3,<2.0a0'
+    libxml2: '>=2.9.10,<2.10.0a0'
+  hash:
+    md5: fa04e89166d4b44326c6d76e2f708715
+    sha256: fa9d98e9653b68717f4cf09f3abfd364c84ac89e2190b976a8f1bb8771b80115
+  manager: conda
+  name: fontconfig
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/fontconfig-2.13.1-h6c09931_0.conda
+  version: 2.13.1
+- category: main
+  dependencies:
+    glib: '>=2.66.1,<3.0a0'
+    gstreamer: '>=1.14.0,<2.0a0'
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+    libxcb: '>=1.14,<2.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: 838648422452405b86699e780e293c1d
+    sha256: 61394888b29e57742a477f2134f735fbd4c39ae867c111badd7d8d410046bc7a
+  manager: conda
+  name: gst-plugins-base
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/gst-plugins-base-1.14.0-h8213a91_2.conda
+  version: 1.14.0
+- category: main
+  dependencies:
+    jpeg: '>=9b,<10a'
+    libgcc-ng: '>=7.3.0'
+    libtiff: '>=4.1.0,<5.0a0'
+  hash:
+    md5: 719db47afba9f6586eecb5eacac70bff
+    sha256: c7fdebc0954fe6d272c2a477a1067dc96ebc36fb43109b33378e0e9c2732a847
+  manager: conda
+  name: lcms2
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/lcms2-2.12-h3be6417_0.conda
+  version: '2.12'
+- category: main
+  dependencies:
+    krb5: '>=1.19.2,<1.20.0a0'
+    libgcc-ng: '>=7.5.0'
+    libnghttp2: '>=1.46.0,<2.0a0'
+    libssh2: '>=1.9.0,<2.0a0'
+    openssl: '>=1.1.1n,<1.1.2a'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: 1a3f73bece51854cd6afd18238a983c6
+    sha256: 57464474ae0515249d1d3fdd8a5c62b34e9478b6e67a71bbf26f4f335c2e653d
+  manager: conda
+  name: libcurl
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libcurl-7.82.0-h0b77cf5_0.conda
+  version: 7.82.0
+- category: main
+  dependencies:
+    giflib: '>=5.2.1,<5.3.0a0'
+    jpeg: '>=9d,<10a'
+    libgcc-ng: '>=7.5.0'
+    libpng: '>=1.6.37,<1.7.0a0'
+    libtiff: '>=4.1.0,<5.0a0'
+    libwebp-base: ''
+  hash:
+    md5: c9ed6bddefc09dbfc246301c3ce3ca14
+    sha256: 7d5eb348332133225e5d39eaa143a964239bcd1c4044cf55fdf0dfcbdd8c5853
+  manager: conda
+  name: libwebp
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/libwebp-1.2.2-h55f646e_0.conda
+  version: 1.2.2
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    libpng: '>=1.6.37,<1.7.0a0'
+    libstdcxx-ng: '>=7.5.0'
+    libtiff: '>=4.1.0,<5.0a0'
+  hash:
+    md5: 86baecb47ecaa7f7ff2657a1f03b90c9
+    sha256: 7db5ad4d3119105b26a023702b24eb4d190700e9393df04aec3ad1e0f07c7354
+  manager: conda
+  name: openjpeg
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/openjpeg-2.4.0-h3ad879b_0.conda
+  version: 2.4.0
+- category: main
+  dependencies:
+    ld_impl_linux-64: ''
+    libffi: '>=3.3,<3.4.0a0'
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+    ncurses: '>=6.3,<7.0a0'
+    openssl: '>=1.1.1n,<1.1.2a'
+    readline: '>=8.0,<9.0a0'
+    sqlite: '>=3.38.0,<4.0a0'
+    tk: '>=8.6.11,<8.7.0a0'
+    xz: '>=5.2.5,<6.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: edc17980bae484b711e090f0a0cbbaef
+    sha256: cd1517bcec4137ceff1441221ace858266b33189c56115538c097a6c87e5b03c
+  manager: conda
+  name: python
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/python-3.8.13-h12debd9_0.conda
+  version: 3.8.13
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 21ad3b69a5ce6c22e724e9dbb4cffa65
+    sha256: 014761192dcba8728848de354b9bbde31dbecdc966fbe3622c495ac50dfa834e
+  manager: conda
+  name: alabaster
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/alabaster-0.7.12-pyhd3eb1b0_0.tar.bz2
+  version: 0.7.12
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 5673d98d06171cb6eed03a6736845c4d
+    sha256: 4d849f6c8a4b60166ec21c7716de9589c083c74416a64876dd9ac6c613520a08
+  manager: conda
+  name: appdirs
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/appdirs-1.4.4-pyhd3eb1b0_0.conda
+  version: 1.4.4
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 88e5fad50e595d527acfc96b782261cb
+    sha256: 1d4334a37f237428b8ab4319dbc8d24fa2bf949da3b86715989be7384e243490
+  manager: conda
+  name: asciitree
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/asciitree-0.3.3-py_2.conda
+  version: 0.3.3
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 3bc977a57587a7964921e3e1e2e31f9e
+    sha256: daf213916e7797c3493db7bbe43b3235d0606a18dd152af129a14bec8e5f56a2
+  manager: conda
+  name: attrs
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/attrs-21.4.0-pyhd3eb1b0_0.conda
+  version: 21.4.0
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: b2aa5503875aba2f1d88cae9df9a96d5
+    sha256: 09f2cf5b30825a39257320204b08146367f4346655197515b2cabcd363736488
+  manager: conda
+  name: backcall
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/backcall-0.2.0-pyhd3eb1b0_0.tar.bz2
+  version: 0.2.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: dee2837b4ce535119636eb15ab312fd2
+    sha256: d34416942dee94a56eb2e1a66205194e093ec7dcb197c19c1a0fdb016f750640
+  manager: conda
+  name: certifi
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/certifi-2022.5.18.1-py38h06a4308_0.conda
+  version: 2022.5.18.1
+- category: main
+  dependencies:
+    python: '>=3.6.1'
+  hash:
+    md5: ebb5f5f7dc4f1a3780ef7ea7738db08c
+    sha256: fbc03537a27ef756162c49b1d0608bf7ab12fa5e38ceb8563d6f4859e835ac5c
+  manager: conda
+  name: cfgv
+  optional: false
+  platform: linux-64
+  url: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.3.1-pyhd8ed1ab_0.tar.bz2
+  version: 3.3.1
+- category: main
+  dependencies:
+    bzip2: '>=1.0.8,<2.0a0'
+    libcurl: '>=7.71.1,<8.0a0'
+    libgcc-ng: '>=7.3.0'
+    libgfortran-ng: '>=7,<8.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: f590184d85f5ba8564bcfd052c09862d
+    sha256: 4d63abb7c67aed8eca1d887433b829c2195911ab4ad492261216dd3fbba6caba
+  manager: conda
+  name: cfitsio
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/cfitsio-3.470-hf0d0db6_6.conda
+  version: '3.470'
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: e7a441d94234b2b5fafee06e25dbf076
+    sha256: b39aea12bf02654cdd0094c79bfa6edbc8d054787f6e2d0b96d403cd4ba4cc0d
+  manager: conda
+  name: charset-normalizer
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/charset-normalizer-2.0.4-pyhd3eb1b0_0.conda
+  version: 2.0.4
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: 8e38585c33e6c659e0e5b0b18e6bf3e2
+    sha256: 1fef66d73901bd403bd5f1631e686d98e15d0b6ec7d59dc59f819bffc755b832
+  manager: conda
+  name: cloudpickle
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/cloudpickle-2.0.0-pyhd3eb1b0_0.conda
+  version: 2.0.0
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: f550604d18b83878f647a491b2b343d6
+    sha256: 2224b228e6d511f11b6d76ed95107b4ea79f3d58939b8638b78485d205d38140
+  manager: conda
+  name: colorama
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/colorama-0.4.4-pyhd3eb1b0_0.conda
+  version: 0.4.4
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 42741b84cb7f6cfd80c5093f26d75659
+    sha256: 7f671e792020ae90eb382ffb9fa85d6f6b42bc5b6b62e917f4d5bf8a460e96b8
+  manager: conda
+  name: coverage
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/coverage-5.5-py38h27cfd23_2.conda
+  version: '5.5'
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: f5e365d2cdb66d547eb8c3ab93843aab
+    sha256: dda35af3a1f92960aa01fee1e3c3587da67a23f186a09a2c313fc325392216c3
+  manager: conda
+  name: cycler
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/cycler-0.11.0-pyhd3eb1b0_0.conda
+  version: 0.11.0
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 15761a758f4788b5e068c0b2288afab5
+    sha256: 06b2eae48400bff98062ded7708e53310e0d4ef56763ee78e59f089e3bfeace6
+  manager: conda
+  name: debugpy
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/debugpy-1.5.1-py38h295c915_0.conda
+  version: 1.5.1
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: 4d969aac32a0faf84af90c797bfc7fec
+    sha256: 094d553ca784fb55afdb969a028ec1de177b152f14359e998a6ba94394a09d7f
+  manager: conda
+  name: decorator
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/decorator-5.1.1-pyhd3eb1b0_0.conda
+  version: 5.1.1
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: d912068b0729930972adcaac338882c0
+    sha256: d5ccad2e614ba3f953c202a42270fe0cfdaf6c5071311a3accf28446c49a6c5b
+  manager: conda
+  name: defusedxml
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/defusedxml-0.7.1-pyhd3eb1b0_0.conda
+  version: 0.7.1
+- category: main
+  dependencies:
+    python: 2.7|>=3.6
+  hash:
+    md5: 86c256c16d9b416ffee75a4cfccf6c9a
+    sha256: 5cc5c5d866dc7dce069e27a430bd9ae7668d56b8a5814552fd0b52930679cfa3
+  manager: conda
+  name: distlib
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/distlib-0.3.2-pyhd3eb1b0_0.conda
+  version: 0.3.2
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: d90ea0ab17a922c881d967ef5b239752
+    sha256: 6c7f5675341c0ffdffd73801c7b4a1c01dd1d421768a7411823120460b221769
+  manager: conda
+  name: docutils
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/docutils-0.17.1-py38h06a4308_1.conda
+  version: 0.17.1
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 3cdf167326744187efd003c24ab77c99
+    sha256: 5fa28fb8902784fd0382a7fcb7743dced5d6055a12420447b0741b69b89e23dd
+  manager: conda
+  name: entrypoints
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/entrypoints-0.4-py38h06a4308_0.conda
+  version: '0.4'
+- category: main
+  dependencies:
+    python: '>=2.7'
+  hash:
+    md5: 7be61d1c3c555fb37682b28d7a53d622
+    sha256: 19861fd553ee36da352401ece4564271be8a958726a527b9731f229fa6131c5d
+  manager: conda
+  name: executing
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/executing-0.8.3-pyhd3eb1b0_0.conda
+  version: 0.8.3
+- category: main
+  dependencies:
+    python: '>=3.7'
+  hash:
+    md5: 527be2ebbc60c0de6533ce33132ce303
+    sha256: c73632a2f1b916a7f21a0aac4e8bed971bff9baa665f46e0674f3f5d8c304dc1
+  manager: conda
+  name: filelock
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/filelock-3.6.0-pyhd3eb1b0_0.conda
+  version: 3.6.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 45df02eb7d28e318d96e44caa6294626
+    sha256: 5d2f6b43566fce7db18ce34a300ff0ec2722fefe74203967d364de3b03932ddb
+  manager: conda
+  name: fsspec
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/fsspec-2022.3.0-py38h06a4308_0.conda
+  version: 2022.3.0
+- category: main
+  dependencies:
+    libcurl: '>=7.71.1,<8.0a0'
+    libgcc-ng: '>=7.5.0'
+    libgfortran-ng: ''
+    libgfortran4: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+    mpich: '>=3.3,<5.0.0a0'
+    openssl: '>=1.1.1h,<1.1.2a'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: fbff5732eb8a94e4107e3bb8a1ec14c8
+    sha256: def503388d9c3ca569887982fafa03675d44fa9185e66a14191394844ea2a062
+  manager: conda
+  name: hdf5
+  optional: false
+  platform: linux-64
+  url: https://conda.anaconda.org/conda-forge/linux-64/hdf5-1.10.5-mpi_mpich_hc41e3f9_1011.tar.bz2
+  version: 1.10.5
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: 8f43a528cf83b43af38a4d142fa38b8a
+    sha256: 2214ad8a3c5f0afc3c13ced28dd1961b98ff780b4e8562357a85c243e7fe678e
+  manager: conda
+  name: idna
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/idna-3.3-pyhd3eb1b0_0.conda
+  version: '3.3'
+- category: main
+  dependencies:
+    python: '>=3.4'
+  hash:
+    md5: 306855b2038e489d01dff5b343a8adb9
+    sha256: 53067081ee63245250c34ce8224c43acafffd4b4d661302594203d54a9ba02b2
+  manager: conda
+  name: imagesize
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/imagesize-1.3.0-pyhd3eb1b0_0.conda
+  version: 1.3.0
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: e40edff2c5708f342cef43c7f280c507
+    sha256: d288f67c03b20a885464056a215baef626d29f95df56e1974379a17f8e55a24e
+  manager: conda
+  name: iniconfig
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/iniconfig-1.1.1-pyhd3eb1b0_0.tar.bz2
+  version: 1.1.1
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 553832c0b872a28088a0001fa2ba3822
+    sha256: 57b26ed1992a45f5c3c1261001956cd31c05ffe7c57c452af07bc3417c57d143
+  manager: conda
+  name: ipython_genutils
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/ipython_genutils-0.2.0-pyhd3eb1b0_1.conda
+  version: 0.2.0
+- category: main
+  dependencies:
+    python: '>=3.6.1,<4.0'
+  hash:
+    md5: 75f2497fe01a9ac6208d72e26066b76a
+    sha256: c20bf1d70180ea452b16b89b9a62aab2192a8b8ad71d63c46beabc1fbb9b2451
+  manager: conda
+  name: isort
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/isort-5.9.3-pyhd3eb1b0_0.conda
+  version: 5.9.3
+- category: main
+  dependencies:
+    python: '>=3.7'
+  hash:
+    md5: cae25b839f3b24686e683addde01b742
+    sha256: fba9ae67546481614bc575c5be838184f3856110a0d6a948aff34872da54ef67
+  manager: conda
+  name: joblib
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/joblib-1.1.0-pyhd3eb1b0_0.conda
+  version: 1.1.0
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 4e721ee2dbfa20069719d2ee19185031
+    sha256: 595b96c7e20703125ff4865ee55ba282357b57438a609dd7660610318127ca2e
+  manager: conda
+  name: json5
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/json5-0.9.6-pyhd3eb1b0_0.conda
+  version: 0.9.6
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 00e5f5a50b547c8c31d1a559828f3251
+    sha256: 2d6a427df3ce57ddced40c49409354ee6cdb4f7e264b8fc1ff0a9076e5e6eef7
+  manager: conda
+  name: kiwisolver
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/kiwisolver-1.4.2-py38h295c915_0.conda
+  version: 1.4.2
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    libllvm10: '>=10.0.1,<10.1.0a0'
+    libstdcxx-ng: '>=7.3.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 0272f086c1709b7b8be5accdc8c3c9e9
+    sha256: e90d13bbb2d72f079ad8b3d93b51206eb3a7c9d358f806f6582a50085b147e8c
+  manager: conda
+  name: llvmlite
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/llvmlite-0.36.0-py38h612dafd_4.conda
+  version: 0.36.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 9075221bb581a3c2fc5a079efc183784
+    sha256: 28593c590ee13208124cbba08fe01c3d607a657b2e32881cb6df1575ecc7d138
+  manager: conda
+  name: locket
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/locket-1.0.0-py38h06a4308_0.conda
+  version: 1.0.0
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 50dcb2135a5ded2e9f60a74bf5093e58
+    sha256: 80c0b3f965568e071d3dbf7ca454da285fcd58702bd56deb05359890e10d03f0
+  manager: conda
+  name: markupsafe
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/markupsafe-2.0.1-py38h27cfd23_0.conda
+  version: 2.0.1
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 5dec11e3e49e564e6c3a50877767444e
+    sha256: 3349b74a44d698b5a1f8444c19710df4529497581056c1ccc75c27418413ad15
+  manager: conda
+  name: mccabe
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/mccabe-0.6.1-py38_1.conda
+  version: 0.6.1
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 5b985f581fab6e8aff8157976b0868a9
+    sha256: 6f6e48e137e32f74d12f8dffcd1a68bb73882ed33213b5d941b6a8dbeb5b6a74
+  manager: conda
+  name: mistune
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/mistune-0.8.4-py38h7b6447c_1000.conda
+  version: 0.8.4
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: e30b674f018b25357c076ae407d769b9
+    sha256: 0924c0efe3d2b48a34664533b14b2df85e979c56e2d578db9cadd58bb490b815
+  manager: conda
+  name: mock
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/mock-4.0.3-pyhd3eb1b0_0.conda
+  version: 4.0.3
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: ac1210cc005fb8bd631ea8beb8343332
+    sha256: 96aefa911a4022f832148e09df4cecb3a0e62ac353e68f7b27018b5c594c9491
+  manager: conda
+  name: more-itertools
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/more-itertools-8.12.0-pyhd3eb1b0_0.conda
+  version: 8.12.0
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    mpich: '>=3.3,<3.4.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: e5479dc898e61765deee83aba69afe7c
+    sha256: 58dfade77824f38d25b38159e7f920c015e519fffc3b6cca386b0a49bc2ef491
+  manager: conda
+  name: mpi4py
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/mpi4py-3.0.3-py38h028fd6f_0.conda
+  version: 3.0.3
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: c64b1bcb40a89d090ea5cf08360b4f57
+    sha256: 7c80723c9fd7e19365789c2b2ff0d534ac7376dd9a602ba1a232046f9ed2f285
+  manager: conda
+  name: msgpack-python
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/msgpack-python-1.0.3-py38hd09550d_0.conda
+  version: 1.0.3
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 148362ba07f92abab76999a680c80084
+    sha256: f3f473a60a6766a7f64447c0c7e49d0640db7f995255951e3477b5d21d144dc7
+  manager: conda
+  name: munkres
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/munkres-1.1.4-py_0.conda
+  version: 1.1.4
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: bbf9623dd1f0b2d8091f8b5a6eb63da5
+    sha256: 61157d251407aa8ae6f4f1979a3dd8d1702aad78bdd186943a4efbbf5e629513
+  manager: conda
+  name: nest-asyncio
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/nest-asyncio-1.5.5-py38h06a4308_0.conda
+  version: 1.5.5
+- category: main
+  dependencies:
+    python: '>=3.8'
+  hash:
+    md5: 6c97a8687676de8dac42bd8373892397
+    sha256: def7767107e59907dc1a7ccfbf47657770b1fa0ddfb735a6b4e6e10833d0cc0a
+  manager: conda
+  name: networkx
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/networkx-2.7.1-pyhd3eb1b0_0.conda
+  version: 2.7.1
+- category: main
+  dependencies:
+    python: '!=3.0,!=3.1,!=3.2,!=3.3'
+  hash:
+    md5: 5547ced9e3bb4c513405998957b52c7b
+    sha256: 39f9d46b6334bd0492ca89c095aea10de7d0976dc96d20013a4ef028e21e6b2a
+  manager: conda
+  name: pandocfilters
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pandocfilters-1.5.0-pyhd3eb1b0_0.conda
+  version: 1.5.0
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: c6f0f6219bf5ce2b510ef4b75cbc3e01
+    sha256: e6be6d3a4fec00fc3699a716bbbf48779ef4ab9149fa92df71d9a03d69a66a84
+  manager: conda
+  name: parso
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/parso-0.8.3-pyhd3eb1b0_0.conda
+  version: 0.8.3
+- category: main
+  dependencies:
+    python: '>=3'
+  hash:
+    md5: 4a6363fd8dda664b95f99f7c5aa95abc
+    sha256: c4c974ac5bab1628bc472eb271903206ebc349f30cf590740560b6284118852a
+  manager: conda
+  name: pickleshare
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pickleshare-0.7.5-pyhd3eb1b0_1003.conda
+  version: 0.7.5
+- category: main
+  dependencies:
+    freetype: '>=2.10.4,<3.0a0'
+    jpeg: ''
+    lcms2: '>=2.12,<3.0a0'
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+    libtiff: '>=4.1.0,<5.0a0'
+    libwebp: '>=1.2.0,<1.3.0a0'
+    python: '>=3.8,<3.9.0a0'
+    tk: ''
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: 13c7b8b727dc6af99e9f6d75b3ec18f3
+    sha256: 2bf283b58146c8c59d93351ae83ca78c770ee135b983f9a405aae3d03cee2985
+  manager: conda
+  name: pillow
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/pillow-9.0.1-py38h22f2fdc_0.conda
+  version: 9.0.1
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 4e25e8a00605b6b286f96900d6e8caa9
+    sha256: da03b35b906cd0e2ef293bd634fa084083702dcbbfbe9a4265b884776335f654
+  manager: conda
+  name: pluggy
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/pluggy-0.13.1-py38h06a4308_0.conda
+  version: 0.13.1
+- category: main
+  dependencies:
+    python: '>=2.7,!=3.0,!=3.1,!=3.2,!=3.3,!=3.4,<4'
+  hash:
+    md5: 101a437c0ab238eaa1736dd665b33fa2
+    sha256: 691d29b4dd9784b9cb753e312cb336ef7a47c9f2c2eb5682cbcbc94eff976f8f
+  manager: conda
+  name: prefixed
+  optional: false
+  platform: linux-64
+  url: https://conda.anaconda.org/conda-forge/noarch/prefixed-0.3.2-pyhd8ed1ab_0.tar.bz2
+  version: 0.3.2
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: 05275f89084c4ce7f9b0bc1e258b3e9e
+    sha256: 765445941cb6bebab31b3a10f7f16e17f7c04f71c39c4e908da4855856f487f8
+  manager: conda
+  name: prometheus_client
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/prometheus_client-0.13.1-pyhd3eb1b0_0.conda
+  version: 0.13.1
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 7441d2827d4bfbcc1fa308875a146246
+    sha256: 664254ab6de7f14d4077bdaeceda2bf0144fd841e257d07bb70427fadf08c588
+  manager: conda
+  name: ptyprocess
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/ptyprocess-0.7.0-pyhd3eb1b0_2.conda
+  version: 0.7.0
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: a87d6d9827e5dff68d34d69971f8a9b1
+    sha256: e579ae0a1205e5706dbf00deacbebc87b889f11c48e2f12323dfec7d2d15b27c
+  manager: conda
+  name: pure_eval
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pure_eval-0.2.2-pyhd3eb1b0_0.conda
+  version: 0.2.2
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: 7205a898ed2abbf6e9b903dff6abe08e
+    sha256: 4dd7cbf9d6a95aa9fd1ae74db2fabc7cf904e9d61a479f169c8e118a800fcdb3
+  manager: conda
+  name: py
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/py-1.11.0-pyhd3eb1b0_0.conda
+  version: 1.11.0
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 30e8cdd78a0754c2d789d53fa465cd30
+    sha256: 250377acee595ce4de41ee3a81bd76a6cdf7f5a75c7dcf243921ff780c7afd00
+  manager: conda
+  name: pycodestyle
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pycodestyle-2.7.0-pyhd3eb1b0_0.conda
+  version: 2.7.0
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: 135a72ff2a31150a3a3ff0b1edd41ca9
+    sha256: 4405b5aeff26863972c82e8b54d09f88cd084f70e01e4343107b2676ffbeab57
+  manager: conda
+  name: pycparser
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pycparser-2.21-pyhd3eb1b0_0.conda
+  version: '2.21'
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: eaecb0dee9d296e2ba1dadf6902149f3
+    sha256: 03e904ba20d625cc9177b97b4f8d5461427c9d0bd4098a0b179d471522edba0d
+  manager: conda
+  name: pyflakes
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pyflakes-2.3.1-pyhd3eb1b0_0.conda
+  version: 2.3.1
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: eff55c770961f459a734cf86768aac98
+    sha256: c764e23bddaa42add41931581cb97f6c5857b4075ec4f3470c59dd8d84954a68
+  manager: conda
+  name: pygments
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pygments-2.11.2-pyhd3eb1b0_0.conda
+  version: 2.11.2
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: 6bca2ae9c9aae9ccdebcb8cf2aa87cb3
+    sha256: 283f6336e6c02b1fb0310b10a609628631b97f280d30320655f31219d7baf568
+  manager: conda
+  name: pyparsing
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pyparsing-3.0.4-pyhd3eb1b0_0.conda
+  version: 3.0.4
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: d350240d35e34c9e250cbb1b9aa6460b
+    sha256: 9681fb4b3b26dda8ccbddf81c5a88ea8d1c7be51541590e801d7acae849eb16d
+  manager: conda
+  name: pyrsistent
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/pyrsistent-0.18.0-py38heee7806_0.conda
+  version: 0.18.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 21c67581f3a81ffbb02728eb2178d693
+    sha256: 59e37131acb2f5125e0942bc0f28c8ee58d034a078ab136d3f4ede64ef7bbc21
+  manager: conda
+  name: pysocks
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/pysocks-1.7.1-py38h06a4308_0.conda
+  version: 1.7.1
+- category: main
+  dependencies:
+    python: '>=3.3'
+  hash:
+    md5: ad1b2f7b33a45d0d68979ca2ad84b6a9
+    sha256: b723a1b02d5c8ba54b10003008930bf815f7ca4c9a01c283a05fe24b7fe0eb9b
+  manager: conda
+  name: python-fastjsonschema
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/python-fastjsonschema-2.15.1-pyhd3eb1b0_0.conda
+  version: 2.15.1
+- category: main
+  dependencies:
+    python: 3.8.*
+  hash:
+    md5: bfbb29d517281e78ac53e48d21e6e860
+    sha256: 8535eaa9225ce212f8ed437c4a7341409903a4c005ecb6d36c63af2793b59689
+  manager: conda
+  name: python_abi
+  optional: false
+  platform: linux-64
+  url: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.8-2_cp38.tar.bz2
+  version: '3.8'
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 76415b791ffd2007687ac5f0665aa7af
+    sha256: a82bbd32ba9470ef8ccf9ba17751cafc4dff6f1b3f55136d3e16a11766ccaed5
+  manager: conda
+  name: pytz
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pytz-2021.3-pyhd3eb1b0_0.conda
+  version: '2021.3'
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    python: '>=3.8,<3.9.0a0'
+    yaml: '>=0.2.5,<0.3.0a0'
+  hash:
+    md5: 2bd06f71e7a66ee11a748fd2f3a49aa9
+    sha256: 4002258b9fcb9c295d227a4f869bc2afad1d7620ab00070f4024119993bee76b
+  manager: conda
+  name: pyyaml
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/pyyaml-6.0-py38h7f8727e_1.conda
+  version: '6.0'
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    python: '>=3.8,<3.9.0a0'
+    zeromq: '>=4.3.4,<4.4.0a0'
+  hash:
+    md5: 4b7508c58d9552487e5730f55c5cd5ac
+    sha256: 8b7355e07f5bf5adcb9e49859fc83033a4e6cd92951e39c09aea7997d5d5477c
+  manager: conda
+  name: pyzmq
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/pyzmq-22.3.0-py38h295c915_2.conda
+  version: 22.3.0
+- category: main
+  dependencies:
+    dbus: '>=1.13.2,<2.0a0'
+    expat: '>=2.2.6,<3.0a0'
+    fontconfig: '>=2.13.0,<3.0a0'
+    freetype: '>=2.9.1,<3.0a0'
+    glib: '>=2.56.2,<3.0a0'
+    gst-plugins-base: '>=1.14.0,<1.15.0a0'
+    gstreamer: '>=1.14.0,<1.15.0a0'
+    icu: '>=58.2,<59.0a0'
+    jpeg: '>=9b,<10a'
+    libgcc-ng: '>=7.3.0'
+    libpng: '>=1.6.35,<1.7.0a0'
+    libstdcxx-ng: '>=7.3.0'
+    libxcb: '>=1.13,<2.0a0'
+    libxml2: '>=2.9.8,<2.10.0a0'
+    openssl: 1.1.*
+    sqlite: '>=3.25.3,<4.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: 05507dbc35c46ac5a7066fc860a62341
+    sha256: 5085610d80131ae721bd206af12857acaa4bc1b7b0eda6bc07c52f2277e9db25
+  manager: conda
+  name: qt
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/qt-5.9.7-h5867ecd_1.conda
+  version: 5.9.7
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: bfa3c5c61a5a91e528a1d2d1e3cae6c9
+    sha256: 81f8bc2fa5d8ffe03fa591777e06c424959d2599252374d08d4e1bf77136d8d9
+  manager: conda
+  name: send2trash
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/send2trash-1.8.0-pyhd3eb1b0_1.conda
+  version: 1.8.0
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 2046e66b7d12f7c0cda5687e4c27b692
+    sha256: 5d5a0cbf127295a717ef24b3bcecff6c1485909fa7c72afc66e27885c4ab878e
+  manager: conda
+  name: sip
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/sip-4.19.13-py38h295c915_0.conda
+  version: 4.19.13
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 34586824d411d36af2fa40e799c172d0
+    sha256: 71c97b4ddc3d19ed41bfa1a2d40f620f96b4d46f097dc48ab115b36640f7df0a
+  manager: conda
+  name: six
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/six-1.16.0-pyhd3eb1b0_1.conda
+  version: 1.16.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 95d3411f0f78c324a6655a2b370df2c5
+    sha256: 32718ccaffe34f63aa69ec1343ed5f992a16f510feed9223b0784c898643b527
+  manager: conda
+  name: sniffio
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/sniffio-1.2.0-py38h06a4308_1.conda
+  version: 1.2.0
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: c8c10f2cd854c0a27630760958bba60c
+    sha256: 4bf87350cb5e65746802eade943b5fa9decfa922f76ca8ca8ae3aa91ab967852
+  manager: conda
+  name: snowballstemmer
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/snowballstemmer-2.2.0-pyhd3eb1b0_0.conda
+  version: 2.2.0
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 0c60976249f116d5aa21fd50f0f94990
+    sha256: d351c9c0dab19d3aa5ae4530a98f5333c6e7f35d752b4f45e4b16d96c568ed17
+  manager: conda
+  name: sphinx_rtd_theme
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinx_rtd_theme-0.4.3-pyhd3eb1b0_0.conda
+  version: 0.4.3
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: ac923499f97b9a9ab7c672b27cb2a1a8
+    sha256: d4b89871d0b01c20d9aa253b2f46fb1c78cd75c91c20d63ff8cf2afd68ae3fca
+  manager: conda
+  name: sphinxcontrib-applehelp
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-applehelp-1.0.2-pyhd3eb1b0_0.tar.bz2
+  version: 1.0.2
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: bc39c2b70430734b5879d6b504e3311f
+    sha256: 8edb87f6abbb11d93df4b51037f6073415fe6f0ee6693944cd25f05e9cd6a052
+  manager: conda
+  name: sphinxcontrib-devhelp
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-devhelp-1.0.2-pyhd3eb1b0_0.tar.bz2
+  version: 1.0.2
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: 2af558ca8b56151110c7a3639a1ea348
+    sha256: fed2fc45ac491aa059a129e5b3b63090ab2d23327fd2a68b441a7e5c92a809cd
+  manager: conda
+  name: sphinxcontrib-htmlhelp
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-htmlhelp-2.0.0-pyhd3eb1b0_0.conda
+  version: 2.0.0
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: e43f8de7d6a717935ab220a0c957771d
+    sha256: 20c858fa8f7f9cda1eb9bb026d045f4eec29e5ba5f360affcb373931a62e31c8
+  manager: conda
+  name: sphinxcontrib-jsmath
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-jsmath-1.0.1-pyhd3eb1b0_0.tar.bz2
+  version: 1.0.1
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: 08d67f73f640b4d1e5e8890a324b60e3
+    sha256: a2594974c21898df67fceeeac7f12e5dd3c07fb7aae826eba2203b8812b801c8
+  manager: conda
+  name: sphinxcontrib-qthelp
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-qthelp-1.0.3-pyhd3eb1b0_0.tar.bz2
+  version: 1.0.3
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: 0440b84dfd478f340cf14c2d7c24f6c7
+    sha256: b0dcd6f5a1707742e189a377821e75c1cc73d7859d887b8924832eeb13bd02d4
+  manager: conda
+  name: sphinxcontrib-serializinghtml
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-serializinghtml-1.1.5-pyhd3eb1b0_0.conda
+  version: 1.1.5
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: bd2a5c664c982e8637ae17b1662bd9a4
+    sha256: 2b0bd8d29b8142a9fad8b70059576d22434144cff59f46c25e8e3b60fe9b8fac
+  manager: conda
+  name: testpath
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/testpath-0.5.0-pyhd3eb1b0_0.conda
+  version: 0.5.0
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: bbfdbae4934150b902f97daaf287efe2
+    sha256: 440bc067a57f888fb602339e2d1a1661ffee79244cd9c5179fc3bbc560efc56b
+  manager: conda
+  name: threadpoolctl
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/threadpoolctl-2.2.0-pyh0d69192_0.conda
+  version: 2.2.0
+- category: main
+  dependencies:
+    python: '>=2.7'
+  hash:
+    md5: cda05f5f6d8509529d1a2743288d197a
+    sha256: c50b132439b0260e4b43649ce30171a4792e186c6f3d657e5936578d4ee3fc56
+  manager: conda
+  name: toml
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/toml-0.10.2-pyhd3eb1b0_0.conda
+  version: 0.10.2
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: 9fedc09c1ff4c9bc22695093c1ecd335
+    sha256: 706678943282f24560c5aae936f78f061ffb289cec5cfcf545fcc64f0d7ee5b0
+  manager: conda
+  name: toolz
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/toolz-0.11.2-pyhd3eb1b0_0.conda
+  version: 0.11.2
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: d2d3043f631807af72b0fde504baf625
+    sha256: 6f3821fc414d6369deaaadbd2e965eb352dc04347c0bedcebbd06f09c8c6d095
+  manager: conda
+  name: tornado
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/tornado-6.1-py38h27cfd23_0.conda
+  version: '6.1'
+- category: main
+  dependencies:
+    python: '>=2.7'
+  hash:
+    md5: 9e0c24d3f7c51fbd42a2ebeb50b5c0fa
+    sha256: 09f8311136bc5bab5f7bd070c98f0ef3ad866ee122781a13fe80210510843671
+  manager: conda
+  name: tqdm
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/tqdm-4.62.2-pyhd3eb1b0_1.conda
+  version: 4.62.2
+- category: main
+  dependencies:
+    python: '>=3.7'
+  hash:
+    md5: 675f60e84f695e63749b09f9ed464eda
+    sha256: e34f5af510316895712446c879fc9c43e278f52c2c61b97fa54836abcb9dd5da
+  manager: conda
+  name: traitlets
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/traitlets-5.1.1-pyhd3eb1b0_0.conda
+  version: 5.1.1
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: 8d4303f11560fe9621c962e87cf64d27
+    sha256: 1620b404b9b4bb937c03276adcebfe3cffe5df65911b2680169edc9a7c5101e8
+  manager: conda
+  name: typing_extensions
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/typing_extensions-4.1.1-pyh06a4308_0.conda
+  version: 4.1.1
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: ffa649340272c3f6466ba01da254c3b0
+    sha256: 19f0d42ae7e5ab32e558812a0025c00de31e2a3636bb051ae3527db196264532
+  manager: conda
+  name: wcwidth
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/wcwidth-0.2.5-pyhd3eb1b0_0.conda
+  version: 0.2.5
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 5c9a80af48919815917612e58474a391
+    sha256: 4960e28532cceefc6ba7fe7afa87ce4d5d00abd2cc51b5e87e9f796d2341102c
+  manager: conda
+  name: webencodings
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/webencodings-0.5.1-py38_1.conda
+  version: 0.5.1
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: ab85e96e26da8d5797c2458232338b86
+    sha256: d3f762f14aff275613ef8d0df2b1e608e8174960da05a1815f36e70cd62aaae9
+  manager: conda
+  name: wheel
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/wheel-0.37.1-pyhd3eb1b0_0.conda
+  version: 0.37.1
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 256d0b94f739fe6c4bd23670dc30da68
+    sha256: e2327e6123ec9b879dba6828e5978ec46e2a7d438f6da866d795b2f23ef2ec65
+  manager: conda
+  name: zipp
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/zipp-3.8.0-py38h06a4308_0.conda
+  version: 3.8.0
+- category: main
+  dependencies:
+    idna: '>=2.8'
+    python: '>=3.8,<3.9.0a0'
+    sniffio: '>=1.1'
+  hash:
+    md5: 85440472f52d5b17de2ca8d27d41d21d
+    sha256: 09cd1c0e0821edb34ef03418ddf1cc920001cfbbcf212c00cea13c7af5d0d007
+  manager: conda
+  name: anyio
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/anyio-3.5.0-py38h06a4308_0.conda
+  version: 3.5.0
+- category: main
+  dependencies:
+    python: '>=3.5'
+    six: ''
+  hash:
+    md5: 140486e2ce4f3931b44aa5f7ff8d88da
+    sha256: 6d50672764e75e322db2da755378ba520d4b3c60dc60c5afcb5a539634cacda8
+  manager: conda
+  name: asttokens
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/asttokens-2.0.5-pyhd3eb1b0_0.conda
+  version: 2.0.5
+- category: main
+  dependencies:
+    python: '>=3.6'
+    pytz: '>=2015.7'
+  hash:
+    md5: 61575e8b70e18ebc54e65da5e441b861
+    sha256: c467e0ec03fb3f974cb903778b5806f4bced21534233c2e6b80bf879dc664460
+  manager: conda
+  name: babel
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/babel-2.9.1-pyhd3eb1b0_0.conda
+  version: 2.9.1
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+    python_abi: 3.8.* *_cp38
+    six: '>=1.9.0'
+    wcwidth: '>=0.1.4'
+  hash:
+    md5: fd18c5d7ca06563889b1da307eaca453
+    sha256: 42a5069134510b0e57a9b07f5c76d2e2a803a3bd14a61aa6e21fdd6a2dcadf1f
+  manager: conda
+  name: blessed
+  optional: false
+  platform: linux-64
+  url: https://conda.anaconda.org/conda-forge/linux-64/blessed-1.19.1-py38h578d9bd_1.tar.bz2
+  version: 1.19.1
+- category: main
+  dependencies:
+    libffi: '>=3.3'
+    libgcc-ng: '>=7.5.0'
+    pycparser: ''
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 7b12fe728b28de7b8851af1eb1ba1d38
+    sha256: ec03d92948951097c46e89e378b082035dab7963fefb71a874f50eecfd7a907f
+  manager: conda
+  name: cffi
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/cffi-1.15.0-py38hd667e15_1.conda
+  version: 1.15.0
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    python: '>=3.8,<3.9.0a0'
+    toolz: '>=0.10.0'
+  hash:
+    md5: 674167bf0c35566f9fb70653cfc196c0
+    sha256: a5fe8c238714603e65c665e49719d767a408365d7e54f49898201d752d9416dd
+  manager: conda
+  name: cytoolz
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/cytoolz-0.11.0-py38h7b6447c_0.conda
+  version: 0.11.0
+- category: main
+  dependencies:
+    python: '>=3.6'
+    six: ''
+  hash:
+    md5: 335fdb99580fb176808d42ccd3c332e1
+    sha256: 4768f27621090559d1b62bcf7fc28bd41f1a0124995fc57a4c9113c961c839ba
+  manager: conda
+  name: fasteners
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/fasteners-0.16.3-pyhd3eb1b0_0.conda
+  version: 0.16.3
+- category: main
+  dependencies:
+    brotli: '>=1.0.1'
+    munkres: ''
+    python: '>=3.6'
+  hash:
+    md5: bb9c5b5a6d892fca5efe4bf0203b6a48
+    sha256: 6c1c48972893046fd199e2c12a694f199c74246f00b6db761bf8c375433d4164
+  manager: conda
+  name: fonttools
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/fonttools-4.25.0-pyhd3eb1b0_0.conda
+  version: 4.25.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+    zipp: '>=0.5'
+  hash:
+    md5: 423bacee14c9d4174efc8cd379f6c2dc
+    sha256: eb1636fb499ffcd1d6bec45a676c40284bab9e957ed9813bc72e316fb7f44ad0
+  manager: conda
+  name: importlib-metadata
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/importlib-metadata-4.11.3-py38h06a4308_0.conda
+  version: 4.11.3
+- category: main
+  dependencies:
+    python: '>=3.6'
+    zipp: '>=3.1.0'
+  hash:
+    md5: 3e7caf9dbd3b4771e9b951ffc7cdad80
+    sha256: bdb5316b10c07956d3dc067df2e3b1c64faecdc859cc24cdab164669a80ce57b
+  manager: conda
+  name: importlib_resources
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/importlib_resources-5.2.0-pyhd3eb1b0_1.conda
+  version: 5.2.0
+- category: main
+  dependencies:
+    parso: '>=0.8.0,<0.9.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: d3629035a1f2c61553814cc20fa2ec11
+    sha256: dbc80eefca61dbdebe1d1de77f51894d4f797531261e3d90d81e49feae21594f
+  manager: conda
+  name: jedi
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/jedi-0.18.1-py38h06a4308_1.conda
+  version: 0.18.1
+- category: main
+  dependencies:
+    markupsafe: '>=2.0'
+    python: '>=3.6'
+  hash:
+    md5: a5b0429ead9704cd1ad0b044c97e728f
+    sha256: e15a4ea4eb6b33873f30d9644cb9a0d60cc2403bbfd3e3918ee40b24dce54cc1
+  manager: conda
+  name: jinja2
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/jinja2-3.0.3-pyhd3eb1b0_0.conda
+  version: 3.0.3
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+    traitlets: ''
+  hash:
+    md5: dbb24641c111c02f0620a3c339517d74
+    sha256: 26df8b8c84f46db06d365d905fd150811270f4a09f62361d89186996d50067f8
+  manager: conda
+  name: jupyter_core
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/jupyter_core-4.10.0-py38h06a4308_0.conda
+  version: 4.10.0
+- category: main
+  dependencies:
+    pygments: '>=2.4.1,<3'
+    python: ''
+  hash:
+    md5: af46aff4922ca45df6ba19b313df6070
+    sha256: 3fb6380242934cad8db44dd1e77597529eb6c73c88144ee5e7f86f18e6eb70e9
+  manager: conda
+  name: jupyterlab_pygments
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/jupyterlab_pygments-0.1.2-py_0.conda
+  version: 0.1.2
+- category: main
+  dependencies:
+    python: '>=3.6'
+    traitlets: ''
+  hash:
+    md5: 47e865f8b884de7c5d516349e83457a7
+    sha256: 6c33d164f44eb4be2a1a303d426a6df297739458a557fa45906741552569df02
+  manager: conda
+  name: matplotlib-inline
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/matplotlib-inline-0.1.2-pyhd3eb1b0_2.conda
+  version: 0.1.2
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    mkl: '>=2021.2.0,<2022.0a0'
+    python: '>=3.8,<3.9.0a0'
+    six: ''
+  hash:
+    md5: d44a183b4c54b461a8fa24805c53179a
+    sha256: a4b2a68d18486f17c2c5ef5b5d268643ea2fb65e99d61c8b73fd3a89ad076ab9
+  manager: conda
+  name: mkl-service
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/mkl-service-2.4.0-py38h7f8727e_0.conda
+  version: 2.4.0
+- category: main
+  dependencies:
+    pyparsing: '>=2.0.2,!=3.0.5'
+    python: '>=3.6'
+  hash:
+    md5: 07bbfbb961db7fa329cc42716943ea62
+    sha256: d3b400cd9613b5570dd5a4e788b0a2947d612b2e8ab47a82a83603d94c26959a
+  manager: conda
+  name: packaging
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/packaging-21.3-pyhd3eb1b0_0.conda
+  version: '21.3'
+- category: main
+  dependencies:
+    locket: ''
+    python: '>=3.5'
+    toolz: ''
+  hash:
+    md5: d02d8b6ea30c680d3fafe4ac50cc4b18
+    sha256: 7cb5bebcd6698effe5162a353fd7d6e0275a4ca07168f730b1917d587a134d13
+  manager: conda
+  name: partd
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/partd-1.2.0-pyhd3eb1b0_1.conda
+  version: 1.2.0
+- category: main
+  dependencies:
+    ptyprocess: '>=0.5'
+    python: ''
+  hash:
+    md5: 765b2562d6cdd14bb6d44fc170a04331
+    sha256: fb4b14fdb5e57becda5b8b88b453626ce12edb49fc8cec88ddaea40b52277494
+  manager: conda
+  name: pexpect
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pexpect-4.8.0-pyhd3eb1b0_3.conda
+  version: 4.8.0
+- category: main
+  dependencies:
+    python: ''
+    wcwidth: ''
+  hash:
+    md5: 19fa1fa6a03645e39e7dce3bdbe9d72f
+    sha256: b7251f3c678beab7ef9d7b9a8491a5d61b2b2f3f85e4a18fa4d833c61e673945
+  manager: conda
+  name: prompt-toolkit
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/prompt-toolkit-3.0.20-pyhd3eb1b0_0.conda
+  version: 3.0.20
+- category: main
+  dependencies:
+    dbus: '>=1.13.12,<2.0a0'
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+    python: '>=3.8,<3.9.0a0'
+    qt: '>=5.9.7,<5.10.0a0'
+    sip: '>=4.19.13,<=4.19.14'
+  hash:
+    md5: d3e6b8e1a634125414f87f231c755dae
+    sha256: b8bfbb8e6b5057222716e5e5d1a29156b6ab83d69d934365b1fabd8b6f84b560
+  manager: conda
+  name: pyqt
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/pyqt-5.9.2-py38h05f1152_4.conda
+  version: 5.9.2
+- category: main
+  dependencies:
+    python: ''
+    six: '>=1.5'
+  hash:
+    md5: 211ee00320b08a1ac9fea6677649f6c9
+    sha256: 01e82704b3d84c1b0b1f8823fa64259eb372a1278e6a40dddf2cefb4c96ab942
+  manager: conda
+  name: python-dateutil
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/python-dateutil-2.8.2-pyhd3eb1b0_0.conda
+  version: 2.8.2
+- category: main
+  dependencies:
+    certifi: '>=2016.9.26'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: c5e94bc1e8e443ae6d53b191a351591e
+    sha256: 2deed396c8102c6c9400b0efec4505ec45bdaf813906f5867285dd8f8bb92b10
+  manager: conda
+  name: setuptools
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/setuptools-61.2.0-py38h06a4308_0.conda
+  version: 61.2.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+    tornado: '>=2.0'
+  hash:
+    md5: 106357aab51b20535ac3a59a48035875
+    sha256: 95aad4a4ab4b1cb88a4a4c154d5903e759593c91a6e84fdb80ca5b0aa9a2c17d
+  manager: conda
+  name: snakeviz
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/snakeviz-2.0.1-py38h06a4308_0.conda
+  version: 2.0.1
+- category: main
+  dependencies:
+    ptyprocess: ''
+    python: '>=3.8,<3.9.0a0'
+    tornado: '>=4'
+  hash:
+    md5: 3fc07a4bdf64fbf00766564f1aaa5618
+    sha256: 75471a8713e1568ec341773ad77c86355ebec4901e58dba612a6a53b3d5635b1
+  manager: conda
+  name: terminado
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/terminado-0.13.1-py38h06a4308_0.conda
+  version: 0.13.1
+- category: main
+  dependencies:
+    typing_extensions: 4.1.1 pyh06a4308_0
+  hash:
+    md5: 0b535dfd0618653dd772c78c9c2b56a8
+    sha256: ffd342f3df10d3690d3c8abe53f411828bcb9b55ad7eadede2ae476bc9be0a22
+  manager: conda
+  name: typing-extensions
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/typing-extensions-4.1.1-hd3eb1b0_0.conda
+  version: 4.1.1
+- category: main
+  dependencies:
+    appdirs: '>=1.4.3,<2'
+    distlib: '>=0.3.1,<1'
+    filelock: '>=3.0.0,<4'
+    python: '>=3.8,<3.9.0a0'
+    six: '>=1.9.0,<2'
+  hash:
+    md5: 41648f5a5f164a1dd02d394600a67200
+    sha256: 4ad377e6386bb0e33f86b37421180e85f749b22572e824dc0182e6525242b26c
+  manager: conda
+  name: virtualenv
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/virtualenv-20.4.6-py38h06a4308_1.conda
+  version: 20.4.6
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+    six: ''
+  hash:
+    md5: f717b8646e57dfa782479480407f6647
+    sha256: 81169f26d2bee9cbcf6001f171695cb158b4fa637d6d14e5a5ac84e795355b77
+  manager: conda
+  name: websocket-client
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/websocket-client-0.58.0-py38h06a4308_4.conda
+  version: 0.58.0
+- category: main
+  dependencies:
+    cffi: '>=1.0.1'
+    libgcc-ng: '>=7.5.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 933eaa8ad180b8fb3b0e2ca36fb6954e
+    sha256: 375b07446fe55a74a1d23da4b4e74d542e73aa67b43fad7ebd9e2bf0f044ca15
+  manager: conda
+  name: argon2-cffi-bindings
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/argon2-cffi-bindings-21.2.0-py38h7f8727e_0.conda
+  version: 21.2.0
+- category: main
+  dependencies:
+    packaging: ''
+    python: '>=3.6'
+    six: '>=1.9.0'
+    webencodings: ''
+  hash:
+    md5: 256eb7e384e35f993ef8ccd6c4f45e58
+    sha256: 294fea02f7cdc4728ea19102d021c1f9d2c55fcfb00de26b72f54e206cd45762
+  manager: conda
+  name: bleach
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/bleach-4.1.0-pyhd3eb1b0_0.conda
+  version: 4.1.0
+- category: main
+  dependencies:
+    cffi: '>=1.0.0'
+    libgcc-ng: '>=7.3.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: e881c8ee8a4048f29da5d20f0330fe37
+    sha256: 42f83ff56466f4e83ad8c29b3cf1f396cd9e1278f8158331c8ac9687b4da8052
+  manager: conda
+  name: brotlipy
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/brotlipy-0.7.0-py38h27cfd23_1003.conda
+  version: 0.7.0
+- category: main
+  dependencies:
+    cffi: '>=1.12'
+    libgcc-ng: ''
+    openssl: ''
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 16d301ed789096eb9881a25ed7a1155e
+    sha256: c14b17e8e4bebbdffa04be42db74e89fb1e21f7d52c0b91cd6add0488b337133
+  manager: conda
+  name: cryptography
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/cryptography-37.0.1-py38h9ce1e76_0.conda
+  version: 37.0.1
+- category: main
+  dependencies:
+    cloudpickle: '>=1.1.1'
+    fsspec: '>=0.6.0'
+    packaging: '>=20.0'
+    partd: '>=0.3.10'
+    python: '>=3.8,<3.9.0a0'
+    pyyaml: '>=5.3.1'
+    toolz: '>=0.8.2'
+  hash:
+    md5: 3d9e2917bed5f51aeb2ea2ea70f8e552
+    sha256: 11570dd8675aadd67bf80ecd9bb66388907f0677ea2b9eba90915924d0f52ec8
+  manager: conda
+  name: dask-core
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/dask-core-2022.5.0-py38h06a4308_0.conda
+  version: 2022.5.0
+- category: main
+  dependencies:
+    blessed: '>=1.17.7'
+    prefixed: '>=0.3.2'
+    python: '>=2.7,!=3.0,!=3.1,!=3.2,!=3.3,<4'
+  hash:
+    md5: f5c404e6c73888f69932895043ea5938
+    sha256: 9bfc2fc19cf49deef5443a2f6fe76576f5dd6f486792c23bb53b8e17c3bcf424
+  manager: conda
+  name: enlighten
+  optional: false
+  platform: linux-64
+  url: https://conda.anaconda.org/conda-forge/noarch/enlighten-1.10.1-pyhd8ed1ab_0.tar.bz2
+  version: 1.10.1
+- category: main
+  dependencies:
+    importlib-metadata: ''
+    mccabe: '>=0.6.0,<0.7.0'
+    pycodestyle: '>=2.7.0,<2.8.0'
+    pyflakes: '>=2.3.0,<2.4.0'
+    python: '>=3.6'
+    setuptools: '>=30.0.0'
+  hash:
+    md5: 04cb15847ce1ae281bac8eb5d67da440
+    sha256: e4eb96ba8e25646b256c87185002e832bc41a00b7bd6212b569f459a6d0bebfd
+  manager: conda
+  name: flake8
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/flake8-3.9.2-pyhd3eb1b0_0.conda
+  version: 3.9.2
+- category: main
+  dependencies:
+    attrs: '>=17.4.0'
+    importlib_resources: '>=1.4.0'
+    pyrsistent: '>=0.14.0,!=0.17.0,!=0.17.1,!=0.17.2'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 877ad2da45101c06e0e779df2abcaf30
+    sha256: 720d6023090ae6a1d732f9e570906cd4be93b348a0463fccb76b1bc8a7f00f84
+  manager: conda
+  name: jsonschema
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/jsonschema-4.4.0-py38h06a4308_0.conda
+  version: 4.4.0
+- category: main
+  dependencies:
+    entrypoints: ''
+    jupyter_core: '>=4.9.2'
+    nest-asyncio: '>=1.5.4'
+    python: '>=3.8,<3.9.0a0'
+    python-dateutil: '>=2.8.2'
+    pyzmq: '>=22.3'
+    tornado: '>=6.0'
+    traitlets: ''
+  hash:
+    md5: 6252e586fe1056d82d0cdeac2f46fec1
+    sha256: fd1e1a3080c343bb0269bb08919f86eb379131cbb9a3300d23fbb8fe5f8abd44
+  manager: conda
+  name: jupyter_client
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/jupyter_client-7.2.2-py38h06a4308_0.conda
+  version: 7.2.2
+- category: main
+  dependencies:
+    python: 2.7|>=3.6
+    setuptools: ''
+  hash:
+    md5: 0941325bf48969e2b3b19d0951740950
+    sha256: 1a419fefc3d02169844d27a4f2b27ed72a49930e91d60ed25c461fc06b5b8da5
+  manager: conda
+  name: nodeenv
+  optional: false
+  platform: linux-64
+  url: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.6.0-pyhd8ed1ab_0.tar.bz2
+  version: 1.6.0
+- category: main
+  dependencies:
+    blas: 1.0 mkl
+    libgcc-ng: '>=7.3.0'
+    mkl: '>=2021.2.0,<2022.0a0'
+    mkl-service: '>=2.3.0,<3.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: b790b4a82a538017f30fd4ca9c0e2301
+    sha256: 121d32c3b78a3db5f85ac19ad4e6927cef0a707e9c557d13fb82254b2714628b
+  manager: conda
+  name: numpy-base
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/numpy-base-1.20.1-py38h7d8b39e_0.conda
+  version: 1.20.1
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+    setuptools: ''
+    wheel: ''
+  hash:
+    md5: 4cc887ba625309097ff29d8ce96329d9
+    sha256: c120859d1fa3da7f45b0d5ef824e6c3c8312a807aa70b94f5e11dd05ee4b026f
+  manager: conda
+  name: pip
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/pip-21.2.4-py38h06a4308_0.conda
+  version: 21.2.4
+- category: main
+  dependencies:
+    attrs: '>=19.2.0'
+    iniconfig: ''
+    more-itertools: '>=4.0.0'
+    packaging: ''
+    pluggy: '>=0.12,<1.0.0a1'
+    py: '>=1.8.2'
+    python: '>=3.8,<3.9.0a0'
+    toml: ''
+  hash:
+    md5: 62967f3cfdf22d8f7fc89826cc72f312
+    sha256: 37ca2e1c9cae6116c6cf5dad969627254a483a14633dc6c5c36d0d1c1c3ddeee
+  manager: conda
+  name: pytest
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/pytest-6.2.4-py38h06a4308_2.conda
+  version: 6.2.4
+- category: main
+  dependencies:
+    asttokens: ''
+    executing: ''
+    pure_eval: ''
+    python: '>=3.5'
+  hash:
+    md5: 6212968e73726f6da42e5ffcd2bea92d
+    sha256: 4e1527a4faf81f7d24c529f373c0dc432f2521480dcb528e55333ec8a0520f5a
+  manager: conda
+  name: stack_data
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/stack_data-0.2.0-pyhd3eb1b0_0.conda
+  version: 0.2.0
+- category: main
+  dependencies:
+    cffi: ''
+    libgcc-ng: '>=10.3.0'
+    libstdcxx-ng: '>=10.3.0'
+    python: '>=3.8,<3.9.0a0'
+    python_abi: 3.8.* *_cp38
+  hash:
+    md5: 3f6ce81c7d28563fe2af763d9ff43e62
+    sha256: 255b1075a58a17a931d5da546785a1db763287d4152cd65a1ece25771ad8345a
+  manager: conda
+  name: ukkonen
+  optional: false
+  platform: linux-64
+  url: https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py38h43d8883_2.tar.bz2
+  version: 1.0.1
+- category: main
+  dependencies:
+    argon2-cffi-bindings: ''
+    python: '>=3.6'
+    typing-extensions: ''
+  hash:
+    md5: f00b851bc61b4c313903d31c7daecb09
+    sha256: 2c9a465ef472d2b858fed1d2e20c15f99d13b56ff21bfd53ae6bb2fffd57c1d7
+  manager: conda
+  name: argon2-cffi
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/argon2-cffi-21.3.0-pyhd3eb1b0_0.conda
+  version: 21.3.0
+- category: main
+  dependencies:
+    python: '>=3.6'
+    ukkonen: ''
+  hash:
+    md5: 6f41e3056fcd3061fbc2b49b3309fe0c
+    sha256: 856f5cf7a55116b7976a5c9b1ca4c1536d821a460b3ac047a9895221d7ed607c
+  manager: conda
+  name: identify
+  optional: false
+  platform: linux-64
+  url: https://conda.anaconda.org/conda-forge/noarch/identify-2.5.1-pyhd8ed1ab_0.tar.bz2
+  version: 2.5.1
+- category: main
+  dependencies:
+    backcall: ''
+    decorator: ''
+    jedi: '>=0.16'
+    matplotlib-inline: '>=0.1.2'
+    pexpect: '>4.3'
+    pickleshare: ''
+    prompt-toolkit: '>=2.0.0,<3.1.0,!=3.0.0,!=3.0.1'
+    pygments: '>=2.4.0'
+    python: '>=3.8,<3.9.0a0'
+    setuptools: '>=18.5'
+    stack_data: ''
+    traitlets: '>=5'
+  hash:
+    md5: 770f26af389c5103c26addcae7fe755b
+    sha256: 89e739f38d68fd8fce416e2d3cc1ea9cfd07a78d995e7674b3b027937f7658e4
+  manager: conda
+  name: ipython
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/ipython-8.3.0-py38h06a4308_0.conda
+  version: 8.3.0
+- category: main
+  dependencies:
+    jsonschema: '>=2.6'
+    jupyter_core: ''
+    python: '>=3.8,<3.9.0a0'
+    python-fastjsonschema: ''
+    traitlets: '>=4.1'
+  hash:
+    md5: a9c19f2c0244559b292acc2ce458e871
+    sha256: 66b73015bc78549c1660e5d5c02facbc3d561b51525a9da9ecfc54500fe61e87
+  manager: conda
+  name: nbformat
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/nbformat-5.3.0-py38h06a4308_0.conda
+  version: 5.3.0
+- category: main
+  dependencies:
+    cryptography: '>=35.0'
+    python: '>=3.6'
+  hash:
+    md5: 1dbbf9422269cd62c7094960d9b43f36
+    sha256: 0a0629a3816bd639f1b1a94dd2fd521ad407e55bfd6532154dce22cddb875783
+  manager: conda
+  name: pyopenssl
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pyopenssl-22.0.0-pyhd3eb1b0_0.conda
+  version: 22.0.0
+- category: main
+  dependencies:
+    debugpy: '>=1.0.0,<2.0'
+    ipython: '>=7.23.1'
+    jupyter_client: <8.0
+    matplotlib-inline: '>=0.1.0,<0.2.0'
+    nest-asyncio: ''
+    python: '>=3.8,<3.9.0a0'
+    tornado: '>=4.2,<7.0'
+    traitlets: '>=5.1.0,<6.0'
+  hash:
+    md5: 64a5bfbc32f1e68b0eda430a1e3e2c11
+    sha256: a2e50454773714563d6384fa8b0651c30445b6d99ddd6520d36ac53aa90de6de
+  manager: conda
+  name: ipykernel
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/ipykernel-6.9.1-py38h06a4308_0.conda
+  version: 6.9.1
+- category: main
+  dependencies:
+    jupyter_client: '>=6.1.5'
+    nbformat: '>=5.0'
+    nest-asyncio: ''
+    python: '>=3.8,<3.9.0a0'
+    traitlets: '>=5.0.0'
+  hash:
+    md5: a0606d4de8c491a8381e72f7925000ca
+    sha256: 23732074c1f78570b16595b16cb1cf9b8b0f39c0dae83731bfd07bf324f4c20f
+  manager: conda
+  name: nbclient
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/nbclient-0.5.13-py38h06a4308_0.conda
+  version: 0.5.13
+- category: main
+  dependencies:
+    cfgv: '>=2.0.0'
+    identify: '>=1.0.0'
+    nodeenv: '>=0.11.1'
+    python: '>=3.8,<3.9.0a0'
+    python_abi: 3.8.* *_cp38
+    pyyaml: '>=5.1'
+    toml: ''
+    virtualenv: '>=20.0.8'
+  hash:
+    md5: 82341297a478aaca561a2db20b037aab
+    sha256: d5d786da2105733fa2385fafd929e7834d9c79e872d3d74e1eb5dcbde19e6773
+  manager: conda
+  name: pre-commit
+  optional: false
+  platform: linux-64
+  url: https://conda.anaconda.org/conda-forge/linux-64/pre-commit-2.15.0-py38h578d9bd_1.tar.bz2
+  version: 2.15.0
+- category: main
+  dependencies:
+    brotlipy: '>=0.6.0'
+    certifi: ''
+    cryptography: '>=1.3.4'
+    idna: '>=2.0.0'
+    pyopenssl: '>=0.14'
+    pysocks: '>=1.5.6,<2.0,!=1.5.7'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 40c1c6f5e634ec77344a822ab3aa84cc
+    sha256: 54ba00bf64aa83bcf6386b90b74bc066cb409c4a5a0a1b69bdaf01e6c9fbfa81
+  manager: conda
+  name: urllib3
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/urllib3-1.26.9-py38h06a4308_0.conda
+  version: 1.26.9
+- category: main
+  dependencies:
+    bleach: ''
+    defusedxml: ''
+    entrypoints: '>=0.2.2'
+    jinja2: '>=2.4'
+    jupyter_core: ''
+    jupyterlab_pygments: ''
+    mistune: '>=0.8.1,<2'
+    nbclient: '>=0.5.0,<0.6.0'
+    nbformat: '>=4.4'
+    pandocfilters: '>=1.4.1'
+    pygments: '>=2.4.1'
+    python: '>=3.8,<3.9.0a0'
+    testpath: ''
+    traitlets: '>=5.0'
+  hash:
+    md5: 17bbbaa994689cb955db812d4f8f594d
+    sha256: 2d29ec0e7fe19f6b5518bd5e05082404e1240c16e0a3437c70290c6e18fc5701
+  manager: conda
+  name: nbconvert
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/nbconvert-6.1.0-py38h06a4308_0.conda
+  version: 6.1.0
+- category: main
+  dependencies:
+    certifi: '>=2017.4.17'
+    charset-normalizer: '>=2.0.0,<2.1.0'
+    idna: '>=2.5,<4'
+    python: '>=3.6'
+    urllib3: '>=1.21.1,<1.27'
+  hash:
+    md5: 9b593f86737e69140c47c2107ecf277c
+    sha256: 0a87a073acd53795a878bb6cf24d1e57ecf73dd161171ed0b4d64f0be0844719
+  manager: conda
+  name: requests
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/requests-2.27.1-pyhd3eb1b0_0.conda
+  version: 2.27.1
+- category: main
+  dependencies:
+    anyio: '>=3.1.0,<4'
+    argon2-cffi: ''
+    ipython_genutils: ''
+    jinja2: ''
+    jupyter_client: '>=6.1.1'
+    jupyter_core: '>=4.6.0'
+    nbconvert: ''
+    nbformat: ''
+    packaging: ''
+    prometheus_client: ''
+    python: '>=3.7'
+    pyzmq: '>=17'
+    send2trash: ''
+    terminado: '>=0.8.3'
+    tornado: '>=6.1.0'
+    traitlets: '>=5'
+    websocket-client: ''
+  hash:
+    md5: 303eb09f873fde3c13abaaed542d54e0
+    sha256: d8890018724afc9b25e2417b3530f873559a06a23a4a78f048b276a5cdc63bc2
+  manager: conda
+  name: jupyter_server
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/jupyter_server-1.13.5-pyhd3eb1b0_0.conda
+  version: 1.13.5
+- category: main
+  dependencies:
+    argon2-cffi: ''
+    ipykernel: ''
+    ipython_genutils: ''
+    jinja2: ''
+    jupyter_client: '>=5.3.4'
+    jupyter_core: '>=4.6.1'
+    nbconvert: '>=5'
+    nbformat: ''
+    nest-asyncio: '>=1.5'
+    prometheus_client: ''
+    python: '>=3.8,<3.9.0a0'
+    pyzmq: '>=17'
+    send2trash: '>=1.8.0'
+    terminado: '>=0.8.3'
+    tornado: '>=6.1'
+    traitlets: '>=4.2.1'
+  hash:
+    md5: 2e58e04bd54880bea66b833c68f5f8c0
+    sha256: 24dda7e43e7bb8b53e1058d3d7a9031b9cfb1067a7405a59b1d53d8a1fc70355
+  manager: conda
+  name: notebook
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/notebook-6.4.11-py38h06a4308_0.conda
+  version: 6.4.11
+- category: main
+  dependencies:
+    alabaster: '>=0.7,<0.8'
+    babel: '>=1.3'
+    colorama: '>=0.3.5'
+    docutils: '>=0.14,<0.18'
+    imagesize: ''
+    jinja2: '>=2.3'
+    packaging: ''
+    pygments: '>=2.0'
+    python: '>=3.6'
+    requests: '>=2.5.0'
+    setuptools: ''
+    snowballstemmer: '>=1.1'
+    sphinxcontrib-applehelp: ''
+    sphinxcontrib-devhelp: ''
+    sphinxcontrib-htmlhelp: ''
+    sphinxcontrib-jsmath: ''
+    sphinxcontrib-qthelp: ''
+    sphinxcontrib-serializinghtml: ''
+  hash:
+    md5: 8f65a307ecef80b3afd979777cc5b549
+    sha256: 0649bd05747f248d83d2d21b8047e42f49cce93e56116fa65aa2a234310ee4ea
+  manager: conda
+  name: sphinx
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinx-4.2.0-pyhd3eb1b0_1.conda
+  version: 4.2.0
+- category: main
+  dependencies:
+    babel: ''
+    entrypoints: '>=0.2.2'
+    jinja2: '>=3.0.3'
+    json5: ''
+    jsonschema: '>=3.0.1'
+    jupyter_server: '>=1.8,<2'
+    packaging: ''
+    python: '>=3.8,<3.9.0a0'
+    requests: ''
+  hash:
+    md5: ae60c37f4ef571584563c5cbd6ac9466
+    sha256: 91bcd81a9a4b7923285959ec09f567a3fb15f718c84f285aa4ef7f42f45b6f71
+  manager: conda
+  name: jupyterlab_server
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/jupyterlab_server-2.12.0-py38h06a4308_0.conda
+  version: 2.12.0
+- category: main
+  dependencies:
+    jupyter_server: '>=1.8.0,<2.0.0'
+    notebook: <7
+    python: '>=3.6'
+  hash:
+    md5: 22683be353228acd015cae8a4676b462
+    sha256: bb4645b792089292736e7193112d74247884f9326b93410acfbd1b18c67ac2c4
+  manager: conda
+  name: nbclassic
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/nbclassic-0.3.5-pyhd3eb1b0_0.conda
+  version: 0.3.5
+- category: main
+  dependencies:
+    ipython: ''
+    jinja2: '>=2.10'
+    jupyter_core: ''
+    jupyter_server: '>=1.4,<2'
+    jupyterlab_server: '>=2.3,<3'
+    nbclassic: '>=0.2,<1'
+    packaging: ''
+    python: '>=3.6'
+    tornado: '>=6.1'
+  hash:
+    md5: 9292f2b7ad621d8a6d9a9a7f7338664d
+    sha256: d5d48497b7b79b91e13a73ee1def9e9ea27a40a6d6324b6ab616771331f80f22
+  manager: conda
+  name: jupyterlab
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/jupyterlab-3.1.7-pyhd3eb1b0_0.conda
+  version: 3.1.7
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    numpy: '>=1.16.6,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 9ca7b8cff6a7f97cd2395f20dd46bc90
+    sha256: 6495ba0a480430131662421476c0d1b75d78ebe4f29a768d9e0ea693fee6ad46
+  manager: conda
+  name: bottleneck
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/bottleneck-1.3.4-py38hce1f21e_0.conda
+  version: 1.3.4
+- category: main
+  dependencies:
+    hdf5: '>=1.10.5,<1.10.6.0a0 mpi_mpich_*'
+    libgcc-ng: '>=7.3.0'
+    mpi4py: ''
+    mpich: '>=3.3.2,<5.0.0a0'
+    numpy: '>=1.14.6,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+    six: ''
+  hash:
+    md5: d1d5e76f488eea5e325ec02c8375cc3d
+    sha256: c4614a2e58a06431665d104e1dfcfd97ff2b8c48f7693a0d6a6a8331c0910873
+  manager: conda
+  name: h5py
+  optional: false
+  platform: linux-64
+  url: https://conda.anaconda.org/conda-forge/linux-64/h5py-2.10.0-mpi_mpich_py38haaae0f6_2.tar.bz2
+  version: 2.10.0
+- category: main
+  dependencies:
+    h5py: ''
+    hdf5: '>=1.10.5,<1.10.6.0a0'
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+    python: '>=3.8,<3.9.0a0'
+    python_abi: 3.8.* *_cp38
+  hash:
+    md5: 32d5f10a85464bc063594b91d1fd2b12
+    sha256: 475a56aca66bf3f95c2b19500a3d4c7bfd6d625b6cef509ce3bdc16a5d44291d
+  manager: conda
+  name: hdf5plugin
+  optional: false
+  platform: linux-64
+  url: https://conda.anaconda.org/conda-forge/linux-64/hdf5plugin-2.3.0-py38h5235d98_0.tar.bz2
+  version: 2.3.0
+- category: main
+  dependencies:
+    blosc: '>=1.21.0,<2.0a0'
+    brotli: '>=1.0.9,<2.0a0'
+    brunsli: '>=0.1,<1.0a0'
+    bzip2: '>=1.0.8,<2.0a0'
+    cfitsio: '>=3.470,<3.471.0a0'
+    charls: '>=2.2.0,<2.3.0a0'
+    giflib: '>=5.2.1,<5.3.0a0'
+    jpeg: '>=9d,<10a'
+    jxrlib: '>=1.1,<1.2.0a0'
+    lcms2: '>=2.12,<3.0a0'
+    lerc: '>=3.0,<4.0a0'
+    libaec: '>=1.0.4,<2.0a0'
+    libdeflate: '>=1.8,<1.9.0a0'
+    libgcc-ng: '>=7.5.0'
+    libpng: '>=1.6.37,<1.7.0a0'
+    libstdcxx-ng: '>=7.5.0'
+    libtiff: '>=4.1.0,<5.0a0'
+    libwebp: '>=1.2.0,<1.3.0a0'
+    libzopfli: '>=1.0.3,<1.1.0a0'
+    lz4-c: '>=1.9.3,<1.10.0a0'
+    numpy: '>=1.16.6,<2.0a0'
+    openjpeg: '>=2.3.0,<3.0a0'
+    python: '>=3.8,<3.9.0a0'
+    snappy: '>=1.1.8,<2.0a0'
+    xz: '>=5.2.5,<6.0a0'
+    zfp: '>=0.5.5,<1.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+    zstd: '>=1.4.9,<1.5.0a0'
+  hash:
+    md5: 4cfb229b623dd26c7cca3bfa18638a98
+    sha256: b7a811184085bb6e39ba641a52778b71c4755e3fb25d6ef617756311d435181f
+  manager: conda
+  name: imagecodecs
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/imagecodecs-2021.8.26-py38h4cda21f_0.conda
+  version: 2021.8.26
+- category: main
+  dependencies:
+    numpy: ''
+    pillow: ''
+    python: '>=3'
+  hash:
+    md5: 4f1d37bdc3afdb2d237fd9b6b920ec3d
+    sha256: 28c1a7ad62b93af09bcee1820b86660789838a40ca7be783d34bdf238d5381f8
+  manager: conda
+  name: imageio
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/imageio-2.9.0-pyhd3eb1b0_0.conda
+  version: 2.9.0
+- category: main
+  dependencies:
+    matplotlib-base: '>=3.5.1,<3.5.2.0a0'
+    pyqt: ''
+    python: '>=3.8,<3.9.0a0'
+    tornado: ''
+  hash:
+    md5: d7a15b57f9ba4a2bad00be3fbda25206
+    sha256: 82b07da744d2c9b1ea2f05e554a39ac017a54c640f34bed2bba341301e36cd51
+  manager: conda
+  name: matplotlib
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/matplotlib-3.5.1-py38h06a4308_1.conda
+  version: 3.5.1
+- category: main
+  dependencies:
+    cycler: '>=0.10'
+    fonttools: '>=4.22.0'
+    freetype: '>=2.3'
+    kiwisolver: '>=1.0.1'
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+    numpy: '>=1.19.2,<2.0a0'
+    packaging: '>=20.0'
+    pillow: '>=6.2.0'
+    pyparsing: '>=2.2.1'
+    python: '>=3.8,<3.9.0a0'
+    python-dateutil: '>=2.7'
+    tk: ''
+  hash:
+    md5: 61037444fe5aef294f20fadac4204392
+    sha256: 235612796af4985a9ffaa89e1d7d4f878b17209a2e5c8b6e7ab6138f6dca965e
+  manager: conda
+  name: matplotlib-base
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/matplotlib-base-3.5.1-py38ha18d171_1.conda
+  version: 3.5.1
+- category: main
+  dependencies:
+    blas: 1.0 mkl
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+    mkl: '>=2021.2.0,<2022.0a0'
+    mkl-service: '>=2.3.0,<3.0a0'
+    numpy: '>=1.16,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 466c36695155a88248c4cde9871309e2
+    sha256: d3a407b82a9f5c60407663f856bc24f7e2052fab1595d78c277ff8400d00ba57
+  manager: conda
+  name: mkl_fft
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/mkl_fft-1.3.0-py38h42c9631_2.conda
+  version: 1.3.0
+- category: main
+  dependencies:
+    blas: '* mkl'
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+    mkl: '>=2021.2.0,<2022.0a0'
+    mkl-service: '>=2.3.0,<3.0a0'
+    numpy: '>=1.16,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 1e3b7251e474e099a4bfd672fdca17d8
+    sha256: df07f9cf6ae736e8612ce8ddf28a27b7de277f5b82de402ce214650938761121
+  manager: conda
+  name: mkl_random
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/mkl_random-1.2.2-py38h51133e4_0.conda
+  version: 1.2.2
+- category: main
+  dependencies:
+    blas: 1.0 mkl
+    libgcc-ng: '>=7.3.0'
+    mkl: '>=2021.2.0,<2022.0a0'
+    mkl-service: '>=2.3.0,<3.0a0'
+    mkl_fft: ''
+    mkl_random: ''
+    numpy-base: 1.20.1 py38h7d8b39e_0
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: e38538afb376c0f772ce8f09db54a6ae
+    sha256: ac530b0d85080bd32828a457aab152f947c8990546cbf71d0f8bf22ba5b0e82f
+  manager: conda
+  name: numpy
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/numpy-1.20.1-py38h93e21f0_0.conda
+  version: 1.20.1
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+    llvmlite: '>=0.36.0,<0.37.0a0'
+    numpy: '>=1.16.6,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+    setuptools: ''
+    tbb: '>=2020.3'
+  hash:
+    md5: 8660e6f10449493daebc4ecd0a744ace
+    sha256: f0ba404a631c8b8ac99df161a6393fbfbf6b7816427520a8b559ca62dd741c7a
+  manager: conda
+  name: numba
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/numba-0.53.1-py38ha9443f7_0.conda
+  version: 0.53.1
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+    msgpack-python: ''
+    numpy: '>=1.7'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 1f60b0f58011d77643f2fafb9c764d1c
+    sha256: 11fa931b7e98534cf2d6bb1845e27128c54c0ad3f2d5d85c8c2d76dc9d7d8dc8
+  manager: conda
+  name: numcodecs
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/numcodecs-0.8.0-py38h2531618_0.conda
+  version: 0.8.0
+- category: main
+  dependencies:
+    blas: 1.0 mkl
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+    mkl: '>=2021.2.0,<2022.0a0'
+    mkl-service: '>=2.3.0,<3.0a0'
+    numpy: '>=1.16.6,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: fdd4dd744e6409461fbc9543e30da04f
+    sha256: f41dc5784a6dab2fe738c61d23c22de923bc50065be563042689ba6436152dc7
+  manager: conda
+  name: numexpr
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/numexpr-2.7.3-py38h22e1b3c_1.conda
+  version: 2.7.3
+- category: main
+  dependencies:
+    numpy: ''
+    python: '>=3.5'
+  hash:
+    md5: 53205b8b5762c06f85b6bb7abd4f496e
+    sha256: 59764a0ce78436f1a542b06667233fdcf71e6d7c2dc5b88762a619c8350430ec
+  manager: conda
+  name: opt_einsum
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/opt_einsum-3.3.0-pyhd3eb1b0_1.conda
+  version: 3.3.0
+- category: main
+  dependencies:
+    libgcc-ng: '>=7.5.0'
+    numpy: '>=1.18.5,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: e82c8fa9f3829ecd054bf24a35932d1d
+    sha256: 11933bdd2cdfa44455beb1d9f4dfccf32cc07d1e1132cba8b2c5b533290b2ee9
+  manager: conda
+  name: pywavelets
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/pywavelets-1.3.0-py38h7f8727e_0.conda
+  version: 1.3.0
+- category: main
+  dependencies:
+    blas: 1.0 mkl
+    libgcc-ng: '>=7.3.0'
+    libgfortran-ng: '>=7,<8.0a0'
+    libstdcxx-ng: '>=7.3.0'
+    mkl: '>=2021.2.0,<2022.0a0'
+    mkl-service: '>=2.3.0,<3.0a0'
+    numpy: '>=1.16.6,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: e6c2e8a210cbd7d6a361d3bf6c70093a
+    sha256: a404bff196d155a2b88fa51c0b86f34d5ef7ab82ca9d2095c9143adb5f5a3698
+  manager: conda
+  name: scipy
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/scipy-1.6.2-py38had2a1c9_1.conda
+  version: 1.6.2
+- category: main
+  dependencies:
+    imagecodecs: '>=2021.4.28'
+    numpy: '>=1.15.1'
+    python: '>=3.7'
+  hash:
+    md5: 5a265e3b9694c13bcfb8c40a3b8e3d8f
+    sha256: 4188780a5854950cecfdcb0ec86e428899a7ea6589ec9947f8f7fe0987d8340c
+  manager: conda
+  name: tifffile
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/tifffile-2021.7.2-pyhd3eb1b0_2.conda
+  version: 2021.7.2
+- category: main
+  dependencies:
+    bottleneck: '>=1.2.1'
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+    numexpr: '>=2.7.0'
+    numpy: '>=1.19.2,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+    python-dateutil: '>=2.7.3'
+    pytz: '>=2017.3'
+  hash:
+    md5: f32e088c2b8e54889f2e0580a92fb73e
+    sha256: 509481b30423bceabd75f885b00274c40f7fa1c2cfb3f26b69a7280b51e2871c
+  manager: conda
+  name: pandas
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/pandas-1.3.3-py38h8c16a72_0.conda
+  version: 1.3.3
+- category: main
+  dependencies:
+    blosc: '>=1.17.0,<2.0a0'
+    bzip2: '>=1.0.8,<2.0a0'
+    hdf5: '>=1.10.5,<1.10.6.0a0'
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+    lzo: '>=2.10,<3.0a0'
+    mock: ''
+    numexpr: ''
+    numpy: '>=1.14.6,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+    six: ''
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: 2b8a8a6f30674b3272b4755b21030c02
+    sha256: 7b8aa6b3c775488926d264e39842e2e5cf2dab957a95431916aa40fbaeeaed9c
+  manager: conda
+  name: pytables
+  optional: false
+  platform: linux-64
+  url: https://conda.anaconda.org/conda-forge/linux-64/pytables-3.6.1-py38h9f153d1_1.tar.bz2
+  version: 3.6.1
+- category: main
+  dependencies:
+    _openmp_mutex: ''
+    cloudpickle: '>=0.2.1'
+    cytoolz: '>=0.7.3'
+    dask-core: '>=1.0.0,!=2.17.0'
+    imageio: '>=2.4.1'
+    libgcc-ng: '>=7.5.0'
+    libstdcxx-ng: '>=7.5.0'
+    networkx: '>=2.2'
+    numpy: '>=1.16.6,<2.0a0'
+    packaging: '>=20.0'
+    pillow: '>=6.1.0,!=7.1.0,!=7.1.1,!=8.3.0'
+    python: '>=3.8,<3.9.0a0'
+    pywavelets: '>=1.1.1'
+    scipy: '>=1.4.1'
+    tifffile: '>=2019.7.26'
+    toolz: '>=0.7.3'
+  hash:
+    md5: cbe42091efd07c1dbdd5f14031a03ea7
+    sha256: 9fb4b05b8dcd107422a8a7748e4906bff168a2ab13a28e548e3c7dafb1a5ec6e
+  manager: conda
+  name: scikit-image
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/scikit-image-0.19.2-py38h51133e4_0.conda
+  version: 0.19.2
+- category: main
+  dependencies:
+    joblib: '>=0.11'
+    libgcc-ng: '>=7.3.0'
+    libstdcxx-ng: '>=7.3.0'
+    numpy: '>=1.16.6,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+    scipy: '>=0.19.1'
+    threadpoolctl: '>=2.0.0'
+  hash:
+    md5: 0afaea75ccd4254c7683c0813579c9f4
+    sha256: 12c17cbbbd9287db2e83788378c231aa6eb75f290bad026be1e9a5a3df6ed4ea
+  manager: conda
+  name: scikit-learn
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/linux-64/scikit-learn-0.24.2-py38ha9443f7_0.conda
+  version: 0.24.2
+- category: main
+  dependencies:
+    asciitree: ''
+    fasteners: ''
+    numcodecs: '>=0.6.4'
+    numpy: '>=1.7'
+    python: '>=3.6,<4'
+  hash:
+    md5: 7df763b90dcefae1c6039911fc72b694
+    sha256: 9cfcc6ce466645e80891c07e81b4c5db3082fd55fcbb9ff7a2980b4d9c8d1ff7
+  manager: conda
+  name: zarr
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/zarr-2.8.1-pyhd3eb1b0_0.conda
+  version: 2.8.1
+- category: main
+  dependencies:
+    matplotlib: '>=2.2'
+    numpy: '>=1.15'
+    pandas: '>=0.23'
+    python: '>=3.6'
+    scipy: '>=1.0'
+  hash:
+    md5: 36b64fb4e3b76ded59d6388c9582de69
+    sha256: 3a78df98681129f429a9a4e0b98c2cdb43966022bab5886fd6804319af0cc65c
+  manager: conda
+  name: seaborn
+  optional: false
+  platform: linux-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/seaborn-0.11.2-pyhd3eb1b0_0.conda
+  version: 0.11.2
+- category: main
+  dependencies: {}
+  hash:
+    sha256: dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc
+  manager: pip
+  name: click
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/d2/3d/fa76db83bf75c4f8d338c2fd15c8d33fdd7ad23a9b5e57eb6c5de26b430e/click-7.1.2-py2.py3-none-any.whl
+  version: 7.1.2
+- category: main
+  dependencies: {}
+  hash:
+    sha256: 3dd15cb27e8119a24c1a7b5c93f9f3b455855e0f73993b1c25921b2f646f1dcd
+  manager: pip
+  name: colorlog
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/51/62/61449c6bb74c2a3953c415b2cdb488e4f0518ac67b35e2b03a6d543035ca/colorlog-4.8.0-py2.py3-none-any.whl
+  version: 4.8.0
+- dependencies: {}
+  hash:
+    sha256: ddb0b1d8243e6e3abb822bd14e447a89f4ab7439342912d590444831fa00b6a0
+  manager: pip
+  name: fastjsonschema
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/e6/0b/24795939622d60f4b453aa7040f23c6a6f8b44c7c026c3b42d9842e6cc31/fastjsonschema-2.15.3-py3-none-any.whl
+  version: 2.15.3
+- category: main
+  dependencies: {}
+  hash:
+    sha256: b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d
+  manager: pip
+  name: future
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/45/0b/38b06fd9b92dc2b68d58b75f900e97884c45bedd2ff83203d933cf5851c9/future-0.18.2.tar.gz
+  version: 0.18.2
+- category: main
+  dependencies: {}
+  hash:
+    sha256: 91ba172fc5b03978764d1df5144b4ba4ab13290d7bab7a50f12d8117f8630c38
+  manager: pip
+  name: lazy-object-proxy
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/45/9f/405023669e74d96d3c221832fdea58fdd4a6faaef569146c34bf4072813e/lazy_object_proxy-1.7.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl
+  version: 1.7.1
+- category: main
+  dependencies: {}
+  hash:
+    sha256: 5e2f9da88ed8236a76fffbee3ceefd259589cf42dfbc2cec2877102189fae58a
+  manager: pip
+  name: progress
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/e9/ff/7871f3736dc6707435b2a2f217c46b5a5bc6ea7e0a9a443cd69146a1afd1/progress-1.4.tar.gz
+  version: '1.4'
+- category: main
+  dependencies: {}
+  hash:
+    sha256: 2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94
+  manager: pip
+  name: smmap
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/6d/01/7caa71608bc29952ae09b0be63a539e50d2484bc37747797a66a60679856/smmap-5.0.0-py3-none-any.whl
+  version: 5.0.0
+- category: main
+  dependencies: {}
+  hash:
+    sha256: d7c013fe7abbc5e491394e10fa845f8f32fe54f8dc60c6622c6cf482d25d47e4
+  manager: pip
+  name: tabulate
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/ca/80/7c0cad11bd99985cfe7c09427ee0b4f9bd6b048bd13d4ffb32c6db237dfb/tabulate-0.8.9-py3-none-any.whl
+  version: 0.8.9
+- category: main
+  dependencies: {}
+  hash:
+    sha256: afa04efcdd818a93237574791be9b2817d7077c25a068b00f8cff7baa4e59257
+  manager: pip
+  name: unidecode
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/f9/5b/7603add7f192252916b85927263b598c74585f82389e6e42318a6278159b/Unidecode-1.3.4-py3-none-any.whl
+  version: 1.3.4
+- category: main
+  dependencies: {}
+  hash:
+    sha256: ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af
+  manager: pip
+  name: wrapt
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/36/ee/944dc7e5462662270e8a379755bcc543fc8f09029866288060dc163ed5b4/wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl
+  version: 1.14.1
+- category: main
+  dependencies:
+    lazy-object-proxy: '>=1.4.0'
+    typing-extensions: '>=3.10'
+    wrapt: '>=1.11,<2'
+  hash:
+    sha256: 14ffbb4f6aa2cf474a0834014005487f7ecd8924996083ab411e7fa0b508ce0b
+  manager: pip
+  name: astroid
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/94/58/6f1bbfd88b6ba5271b4a9be99cb15cb2fe369794ba410390f0d672c6ad39/astroid-2.11.5-py3-none-any.whl
+  version: 2.11.5
+- category: main
+  dependencies:
+    click: '*'
+    pyyaml: '*'
+  hash:
+    sha256: 0945e83b1a3d9e216bdf06000b767cc96dc2a0faf356ac0cc255c45f671c84b9
+  manager: pip
+  name: docstr-coverage
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/ef/97/80f5de5ab716ece99fec79ce1ae51821ef4fcd6ccd64902b4481991fbba4/docstr_coverage-2.1.1-py3-none-any.whl
+  version: 2.1.1
+- category: main
+  dependencies:
+    flake8: '*'
+  hash:
+    sha256: 12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9
+  manager: pip
+  name: flake8-polyfill
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/86/b5/a43fed6fd0193585d17d6faa7b85317d4461f694aaed546098c69f856579/flake8_polyfill-1.0.2-py2.py3-none-any.whl
+  version: 1.0.2
+- category: main
+  dependencies:
+    click: '*'
+    pillow: '*'
+    requests: '*'
+  hash:
+    sha256: a47e6996a8a4223132ab08e3ad9bade2cb997e68a03162ff5d122469e9d6a5c6
+  manager: pip
+  name: genbadge
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/20/b8/61d32e888fdcced280813ec871c50c6d0ef17fc266fe56d600fd77201566/genbadge-1.0.6-py2.py3-none-any.whl
+  version: 1.0.6
+- category: main
+  dependencies:
+    smmap: '>=3.0.1,<6'
+  hash:
+    sha256: 8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd
+  manager: pip
+  name: gitdb
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/a3/7c/5d747655049bfbf75b5fcec57c8115896cb78d6fafa84f6d3ef4c0f13a98/gitdb-4.0.9-py3-none-any.whl
+  version: 4.0.9
+- category: main
+  dependencies:
+    six: '*'
+  hash:
+    sha256: 4ce09faec7e5192ffc3c57830e26acba0fd6cd11e1ee81af0d4df0657463bd1c
+  manager: pip
+  name: mando
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/e6/cc/f6e25247c1493a654785e68cd975e479c311e99dafedd49ed17f8d300e0c/mando-0.6.4-py2.py3-none-any.whl
+  version: 0.6.4
+- category: main
+  dependencies:
+    six: '>=1.7.0'
+  hash:
+    sha256: 08c039560a6da2fe4f2c426d0766e284d3b736e355f8dd24b37367b0bb41973b
+  manager: pip
+  name: retrying
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/44/ef/beae4b4ef80902f22e3af073397f079c96969c69b2c7d52a57ea9ae61c9d/retrying-1.3.3.tar.gz
+  version: 1.3.3
+- category: main
+  dependencies:
+    gitdb: '>=4.0.1,<5'
+  hash:
+    sha256: 5b68b000463593e05ff2b261acff0ff0972df8ab1b70d3cdbd41b546c8b8fc3d
+  manager: pip
+  name: gitpython
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/83/32/ce68915670da6fd6b1e3fb4b3554b4462512f6441dddd194fc0f4f6ec653/GitPython-3.1.27-py3-none-any.whl
+  version: 3.1.27
+- category: main
+  dependencies:
+    retrying: '>=1.3.3'
+    six: '*'
+  hash:
+    sha256: d68fc15fcb49f88db27ab3e0c87110943e65fee02a47f33a8590f541b3042461
+  manager: pip
+  name: plotly
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/1f/f6/bd3c17c8003b6641df1228e80e1acac97ed8402635e46c2571f8e1ef63af/plotly-4.14.3-py2.py3-none-any.whl
+  version: 4.14.3
+- category: main
+  dependencies:
+    colorama: '>=0.4,<0.5'
+    flake8-polyfill: '*'
+    future: '*'
+    mando: '>=0.6,<0.7'
+  hash:
+    sha256: 32ac2f86bfacbddade5c79f0e927e97f90a5cda5b86f880511dd849c4a0096e3
+  manager: pip
+  name: radon
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/cf/fe/c400dbbbbde6649ad0164ef2ffef3672baefc62ecb676f58d0f25d8f83b0/radon-4.0.0-py2.py3-none-any.whl
+  version: 4.0.0
+- category: main
+  dependencies:
+    astroid: '>=2.7'
+    jinja2: '*'
+    pyyaml: '*'
+    sphinx: '>=3.0'
+    unidecode: '*'
+  hash:
+    sha256: 007bf9e24cd2aa0ac0561f67e8bcd6a6e2e8911ef4b4fd54aaba799d8832c8d0
+  manager: pip
+  name: sphinx-autoapi
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/5e/67/249380ade22a7efaa8a335f45a9b87f2fdda499c9fdc53913096dec5d1fe/sphinx_autoapi-1.8.4-py2.py3-none-any.whl
+  version: 1.8.4
+- category: main
+  dependencies:
+    click: '>=7.0,<8.0'
+    colorlog: '>=4.0.0,<5.0.0'
+    gitpython: '>=3.0.0,<4.0.0'
+    nbformat: '>=5.1.3,<6.0.0'
+    plotly: '>=4.0.0,<5.0.0'
+    progress: '1.4'
+    radon: '>=4.0.0,<4.1.0'
+    tabulate: '>=0.8.2,<1.0.0'
+  hash:
+    sha256: a229db85982f87f31f714fbe49ec4701eb84e4843dc1a1fcbe3305e3c05c4ea7
+  manager: pip
+  name: wily
+  optional: false
+  platform: linux-64
+  source: null
+  url: https://files.pythonhosted.org/packages/e7/2c/53638ade80511eee70c29bcc52e90ca017836feecba1762c935112249aea/wily-1.20.0-py3-none-any.whl
+  version: 1.20.0
+- category: main
+  dependencies: {}
+  hash:
+    md5: cb2c87e85ac8e0ceae776d26d4214c8a
+    sha256: 88255dc768d4125de9043d6e035a4999dd27233d55274ae24c7e17f81c0d7bcd
+  manager: conda
+  name: blas
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/blas-1.0-mkl.conda
+  version: '1.0'
+- category: main
+  dependencies: {}
+  hash:
+    md5: 19fcb113b170fe2a0be96b47801fed7d
+    sha256: ddd0949cda5e6db367d713797b297568c5f156d5a63cef9b001edaa95cf0282f
+  manager: conda
+  name: bzip2
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/bzip2-1.0.8-h1de35cc_0.conda
+  version: 1.0.8
+- category: main
+  dependencies: {}
+  hash:
+    md5: fc798dffc9f7843e2dc563a10290ef59
+    sha256: 63dd6ec719304b69eb87129a300742d9be7d9e6090e34c66bfcf17e30e8f728a
+  manager: conda
+  name: c-ares
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/c-ares-1.18.1-hca72f7f_0.conda
+  version: 1.18.1
+- category: main
+  dependencies: {}
+  hash:
+    md5: dd4c1cfc3606b56486f7af0a99e80fa3
+    sha256: 11323c155780177c86be6046392a0b03297040b590acd2a4781d2f8c360e1356
+  manager: conda
+  name: ca-certificates
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/ca-certificates-2022.4.26-hecd8cb5_0.conda
+  version: 2022.4.26
+- category: main
+  dependencies: {}
+  hash:
+    md5: 0c36d6800a1a0f0ae244699a09d3f982
+    sha256: 6d6b24f5f00606a31171bf83274e60827a87012c2ef0d663d3514840fb96da3d
+  manager: conda
+  name: giflib
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/giflib-5.2.1-haf1e3a3_0.conda
+  version: 5.2.1
+- category: main
+  dependencies: {}
+  hash:
+    md5: 65e79d0ffef79cbb8ebd3c71e74eb50a
+    sha256: 02f4d56da842411d2b524b1e455f68d8c300927450bfd5e3e0a49ce07413c279
+  manager: conda
+  name: intel-openmp
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/intel-openmp-2021.4.0-hecd8cb5_3538.conda
+  version: 2021.4.0
+- category: main
+  dependencies: {}
+  hash:
+    md5: 99b7d820514a0c07818d58c320ab21fc
+    sha256: 4a10950fa789a9386c2cbeb347875ec608c7264ecfe6f2b85096fa22b4556c64
+  manager: conda
+  name: jpeg
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/jpeg-9e-hca72f7f_0.conda
+  version: 9e
+- category: main
+  dependencies: {}
+  hash:
+    md5: 446b753e081e9384c17401e5925d2f1f
+    sha256: 81b9ed66810210a4f12cd0388680d93d833515f0e122779d02b6c874dd6c157b
+  manager: conda
+  name: jxrlib
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/jxrlib-1.1-haf1e3a3_2.conda
+  version: '1.1'
+- category: main
+  dependencies: {}
+  hash:
+    md5: fa697ecaca74bdf72bd0a10e42a2287a
+    sha256: be3ad99669577b0ebb536b1681728643c66230c8f221459882b98faf17d83b08
+  manager: conda
+  name: libcxx
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libcxx-12.0.0-h2f01273_0.conda
+  version: 12.0.0
+- category: main
+  dependencies: {}
+  hash:
+    md5: 584dec4a4ba735d8d7841de1948b23b1
+    sha256: 24aca54c92ca2a5cc3eaecd2415714d974e175226e00776ce8ed6e967d4ee10b
+  manager: conda
+  name: libdeflate
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libdeflate-1.8-h9ed2024_5.conda
+  version: '1.8'
+- category: main
+  dependencies: {}
+  hash:
+    md5: ffb0ee08779a6ccb4706b72523712cb7
+    sha256: cb6c66c3e947f455d6e704c82bfc53813acd7b7ed36f698ea5c29571b6e482d6
+  manager: conda
+  name: libev
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libev-4.33-h9ed2024_1.conda
+  version: '4.33'
+- category: main
+  dependencies: {}
+  hash:
+    md5: 2f6d6d3c7a46ff214a5a1a8991af9bef
+    sha256: 025e223374a075ac59f01f94f69e139b123b2783e952be966f1e9ee12a3c2a36
+  manager: conda
+  name: libgfortran
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libgfortran-3.0.1-h93005f0_2.conda
+  version: 3.0.1
+- category: main
+  dependencies: {}
+  hash:
+    md5: dc65d21181274fa72d5eea2ba9cc1c35
+    sha256: 6c3adf188a1d522fd2b7c0dd69afb2ef93acf887b7c253715af6ee3c6c17cc8d
+  manager: conda
+  name: libsodium
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libsodium-1.0.18-h1de35cc_0.conda
+  version: 1.0.18
+- category: main
+  dependencies: {}
+  hash:
+    md5: 029b8fce196d53c93af17512f6f606d8
+    sha256: 85d00c299762d296266bf733b6a4f92c85f55639abc7019622f269a0f772219d
+  manager: conda
+  name: libwebp-base
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libwebp-base-1.2.2-hca72f7f_0.conda
+  version: 1.2.2
+- category: main
+  dependencies: {}
+  hash:
+    md5: 06b2da900ba1448f28cd5a0cd43bff90
+    sha256: 5ea7fdd97d9bfd398d8fc67ca256c249197e5d5da843c683d24711a204862f8f
+  manager: conda
+  name: llvm-openmp
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/llvm-openmp-12.0.0-h0dcd299_1.conda
+  version: 12.0.0
+- category: main
+  dependencies: {}
+  hash:
+    md5: ce0721847dfc63612523ecdcf37ce53e
+    sha256: d8de050f7d7ed0c672d3f6fc3b24fcbe5ef053bb26e2aa3c52c2b08f68721a75
+  manager: conda
+  name: mpi
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/mpi-1.0-openmpi.conda
+  version: '1.0'
+- category: main
+  dependencies: {}
+  hash:
+    md5: 32896581ee96215797b753884720e581
+    sha256: c0769e31126edd29c8f9bf51f37c31f672ef1c426c89c175a6d0a8b61535a2bb
+  manager: conda
+  name: ncurses
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/ncurses-6.3-hca72f7f_2.conda
+  version: '6.3'
+- category: main
+  dependencies: {}
+  hash:
+    md5: bb093b4af8f53670468795e5f12676e5
+    sha256: 2fa5301fa47838b1a3f88b02e88efbf523e9b5ddab4708b8d06ed793894de41a
+  manager: conda
+  name: xz
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/xz-5.2.5-hca72f7f_1.conda
+  version: 5.2.5
+- category: main
+  dependencies: {}
+  hash:
+    md5: 73628ed86f99adf6a0cb81dd20e426cd
+    sha256: 6bffb10328d883c274639032af4f9963bbfa48bb29adc86a30cf85da1efdb4ec
+  manager: conda
+  name: yaml
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/yaml-0.2.5-haf1e3a3_0.conda
+  version: 0.2.5
+- category: main
+  dependencies: {}
+  hash:
+    md5: 4264c14bdd0bd302b0232cb65f3ee275
+    sha256: 9d38a35aa37ef0b9f56fd74279d0a7201c62ec57de3fca17931bfffeb1c71e2e
+  manager: conda
+  name: zlib
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/zlib-1.2.12-h4dc903c_2.conda
+  version: 1.2.12
+- category: main
+  dependencies:
+    libcxx: '>=10.0.0'
+  hash:
+    md5: 47c6f0f0789dc3b0c350e2f6caac3ebc
+    sha256: 7b8ecf457dd4b8b45cc66d61d4885b2f1dd009142666dea334441e6798ec6113
+  manager: conda
+  name: brotli
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/brotli-1.0.9-hb1e8313_2.conda
+  version: 1.0.9
+- category: main
+  dependencies:
+    libcxx: '>=10.0.0'
+  hash:
+    md5: b3c97875262c5c9026e36c77007ec260
+    sha256: f90eb9d24d40bfb978e61e1a326e98a24dc594a46e1fe531842826bdc69f89ef
+  manager: conda
+  name: charls
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/charls-2.2.0-h23ab428_0.conda
+  version: 2.2.0
+- category: main
+  dependencies:
+    libcxx: '>=12.0.0'
+  hash:
+    md5: aec2c3dbef836849c9260f05be04f3db
+    sha256: a0d739308d470dca99f4e680487418480c9750c6f88227a00f5dbce0d743dbbf
+  manager: conda
+  name: lerc
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/lerc-3.0-he9d5cce_0.conda
+  version: '3.0'
+- category: main
+  dependencies:
+    libcxx: '>=10.0.0'
+  hash:
+    md5: b825d0cba53756d8de6f0842f7c9b657
+    sha256: 25719a75ddf06292ba24612bb0cb350d4dba0585cb753bb26cf5086392f6ae37
+  manager: conda
+  name: libaec
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libaec-1.0.4-hb1e8313_1.conda
+  version: 1.0.4
+- category: main
+  dependencies:
+    ncurses: '>=6.3,<7.0a0'
+  hash:
+    md5: 84f04c29858f8953fdb36fe27f2148e1
+    sha256: 6b209fd3202f65dc92add0e5a942d2f6ed7e476606e81a211935aa683604dbd8
+  manager: conda
+  name: libedit
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libedit-3.1.20210910-hca72f7f_0.conda
+  version: 3.1.20210910
+- category: main
+  dependencies:
+    libcxx: '>=10.0.0'
+  hash:
+    md5: 0c959d444ac65555cb836cdbd3e9a2d9
+    sha256: f88a2ff020e096f477f0e8297088c33479de2a3362a9aaf05aa910ac1d175175
+  manager: conda
+  name: libffi
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libffi-3.3-hb1e8313_2.conda
+  version: '3.3'
+- category: main
+  dependencies:
+    libcxx: '>=10.0.0'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: a3a3f10a81668d361eccf7b62186982f
+    sha256: 9c766bc18bad51ad28de6b157cd35c71002744c7cf91d69605631ebb48152bfc
+  manager: conda
+  name: libllvm10
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libllvm10-10.0.1-h76017ad_5.conda
+  version: 10.0.1
+- category: main
+  dependencies:
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: d69245a20ec59d8dc534c65308607129
+    sha256: 26a86fc7dbd8a1e89ab2da2d80e459e740d971235419063862bcbb313d1c1e8a
+  manager: conda
+  name: libpng
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libpng-1.6.37-ha441bb4_0.conda
+  version: 1.6.37
+- category: main
+  dependencies:
+    libcxx: '>=10.0.0'
+  hash:
+    md5: 59a6ea05350711fe0566aba5c51653d2
+    sha256: 455de701c10d6dcf6362ffd61a3737ae714718744eacb56821037564d11dd9f5
+  manager: conda
+  name: libzopfli
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libzopfli-1.0.3-hb1e8313_0.conda
+  version: 1.0.3
+- category: main
+  dependencies:
+    libcxx: '>=10.0.0'
+  hash:
+    md5: dc70fec3978d3189741886cc05fcb145
+    sha256: 760138ad419a31b9f3f76abaf25eb9046af93e3516a27915ebbd6d55ec53a62a
+  manager: conda
+  name: lz4-c
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/lz4-c-1.9.3-h23ab428_1.conda
+  version: 1.9.3
+- category: main
+  dependencies:
+    intel-openmp: 2021.*
+  hash:
+    md5: fb01af85e56bf4a79290f84e73d1de5d
+    sha256: 721db9ad17770032c59ab43b717da8d10106583162c27f46c1116e48133091d2
+  manager: conda
+  name: mkl
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/mkl-2021.2.0-hecd8cb5_269.conda
+  version: 2021.2.0
+- category: main
+  dependencies:
+    libcxx: '>=4.0.1'
+    libgfortran: '>=3.0.1,<4.0.0.a0'
+    mpi: 1.0 openmpi
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: 55374a530c88cf12ae5836bb77dfacd3
+    sha256: 0c785daec8bbd06d0e1ed666048dc2d8fb50e328e570358b65235e0ef293a56f
+  manager: conda
+  name: openmpi
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/openmpi-4.0.2-hfa1e0ec_1.conda
+  version: 4.0.2
+- category: main
+  dependencies:
+    ca-certificates: ''
+  hash:
+    md5: 2b44a8f7a0051ffbc4814e9f99114222
+    sha256: df143ea943717c25b5a2cef35f64699bde3d3d3f05cb1eb2156ddc84efb0d03f
+  manager: conda
+  name: openssl
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/openssl-1.1.1o-hca72f7f_0.conda
+  version: 1.1.1o
+- category: main
+  dependencies:
+    ncurses: '>=6.3,<7.0a0'
+  hash:
+    md5: c54a6153e7ef82f55e7a0ae2f6749592
+    sha256: 4d28e2f21b68935185f98bb44935c02a6d6e137add54efb5d2781ecab5f550ff
+  manager: conda
+  name: readline
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/readline-8.1.2-hca72f7f_1.conda
+  version: 8.1.2
+- category: main
+  dependencies:
+    libcxx: '>=12.0.0'
+  hash:
+    md5: 441e45d96c8bc4078e615f993ec164ab
+    sha256: a53067bec66066df458eafd6d18c159e7739dd2a365383991ae23f000095d6ba
+  manager: conda
+  name: snappy
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/snappy-1.1.9-he9d5cce_0.conda
+  version: 1.1.9
+- category: main
+  dependencies:
+    libcxx: '>=10.0.0'
+  hash:
+    md5: 735a2e20d007760d789b821f1c706fd3
+    sha256: 43ce426a1991e667fb03f0e22b8b2d99f2ddeab5f18777f65a29f980cabe1c8c
+  manager: conda
+  name: tbb
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/tbb-2020.3-h879752b_0.conda
+  version: '2020.3'
+- category: main
+  dependencies:
+    zlib: '>=1.2.12,<1.3.0a0'
+  hash:
+    md5: 30fd8466573613aadae5fe013306b51b
+    sha256: d575e334007d2f4355f2c832a928697400093e1b36643021c39bd4072486e9dc
+  manager: conda
+  name: tk
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/tk-8.6.11-h3fd3227_1.conda
+  version: 8.6.11
+- category: main
+  dependencies:
+    libcxx: '>=10.0.0'
+    libsodium: '>=1.0.18,<1.0.19.0a0'
+  hash:
+    md5: 24ca0b9986211e74e86f8afbba5c092d
+    sha256: ca8d4dd16485580da828bf4bec8c1bd22a7157b211cdcfb570b298c1a512a850
+  manager: conda
+  name: zeromq
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/zeromq-4.3.4-h23ab428_0.conda
+  version: 4.3.4
+- category: main
+  dependencies:
+    libcxx: '>=12.0.0'
+    llvm-openmp: '>=12.0.0'
+  hash:
+    md5: 701d4bf0c61dc57368d798fa5f0c81d7
+    sha256: 827a18241a1dfdebe59d127f1456b766daa9caddf27ac34e70b0600534c9d0ad
+  manager: conda
+  name: zfp
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/zfp-0.5.5-he9d5cce_6.conda
+  version: 0.5.5
+- category: main
+  dependencies:
+    brotli: '>=1.0.9,<2.0a0'
+    libcxx: '>=10.0.0'
+  hash:
+    md5: 38c8911fbbfd02287a822df899a2b927
+    sha256: 05bc36686b61a280dcdfe68498b228575f966a7ce002b53a24235003fef82e8b
+  manager: conda
+  name: brunsli
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/brunsli-0.1-h23ab428_0.conda
+  version: '0.1'
+- category: main
+  dependencies:
+    libpng: '>=1.6.37,<1.7.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: a06dcb72dc6961d37f280b4b97d74f43
+    sha256: 1089a7d110acb3d8ca5198684ecdbd23ea2c2b68252e70fe597d38307e7e3313
+  manager: conda
+  name: freetype
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/freetype-2.11.0-hd8bbffd_0.conda
+  version: 2.11.0
+- category: main
+  dependencies:
+    libcxx: '>=4.0.1'
+    libgfortran: '>=3.0.1,<4.0.0.a0'
+    openmpi: '>=4.0,<5.0.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: 07e6fe79b209a81dd9218adb16a0082a
+    sha256: b98e2fca0db9d53cca5069ec0d3fef278f2da208d8eab02ae06fdb61abf9b8b2
+  manager: conda
+  name: hdf5
+  optional: false
+  platform: osx-64
+  url: https://conda.anaconda.org/conda-forge/osx-64/hdf5-1.10.5-mpi_openmpi_h70ef20d_1003.tar.bz2
+  version: 1.10.5
+- category: main
+  dependencies:
+    libedit: '>=3.1.20210216,<4.0a0'
+    openssl: '>=1.1.1k,<1.1.2a'
+  hash:
+    md5: 9477ac0fa61f323ca0864573ac240c8e
+    sha256: 0631feaab4d6ce7902eb48fba1d7868adb3d548b0b6434236b8f4ba805635ca9
+  manager: conda
+  name: krb5
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/krb5-1.19.2-hcd88c3b_0.conda
+  version: 1.19.2
+- category: main
+  dependencies:
+    c-ares: '>=1.7.5'
+    libcxx: '>=12.0.0'
+    libev: '>=4.33,<4.34.0a0'
+    openssl: '>=1.1.1l,<1.1.2a'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: 7b5767e0cb9676eb442ef9cd2c76730b
+    sha256: f78b51a88a2833b3fd1f4626ff0bff489e07fdc9b22ae123f6d5a6fd313ba4eb
+  manager: conda
+  name: libnghttp2
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libnghttp2-1.46.0-ha29bfda_0.conda
+  version: 1.46.0
+- category: main
+  dependencies:
+    openssl: '>=1.1.1n,<1.1.2a'
+  hash:
+    md5: 8c6b3265fbbcf552ffc20a08c117cba3
+    sha256: 87d79a36ff21a21a4e198f89dd71e936682c2f5b45db4df16809c4becd1b8699
+  manager: conda
+  name: libssh2
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libssh2-1.10.0-h0a4fc7d_0.conda
+  version: 1.10.0
+- category: main
+  dependencies:
+    readline: '>=8.0,<9.0a0'
+    zlib: '>=1.2.12,<1.3.0a0'
+  hash:
+    md5: 5d3e2867383881b9227ee3aba91cd52d
+    sha256: af3c079c3789fef4e9041b231d75d651d41de0949bdee64294ff175009fc4abb
+  manager: conda
+  name: sqlite
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/sqlite-3.38.3-h707629a_0.conda
+  version: 3.38.3
+- category: main
+  dependencies:
+    libcxx: '>=10.0.0'
+    lz4-c: '>=1.9.3,<1.10.0a0'
+    xz: '>=5.2.5,<6.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: bc8c39208f4e8205c729683dcfa7e95e
+    sha256: baaedcb01e951b39bf09e9a7938a7ac0941b56dd4ee0ea8f80e3add525b4d893
+  manager: conda
+  name: zstd
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/zstd-1.4.9-h322a384_0.conda
+  version: 1.4.9
+- category: main
+  dependencies:
+    libcxx: '>=10.0.0'
+    lz4-c: '>=1.9.3,<1.10.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+    zstd: '>=1.4.5,<1.5.0a0'
+  hash:
+    md5: 75e06f4f9058f3f1a001a0ad10d7f180
+    sha256: 839126c357e6032bfdfe4504765b3649852ebfd9e8a6925d17db04df785fe709
+  manager: conda
+  name: blosc
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/blosc-1.21.0-h2842e9f_0.conda
+  version: 1.21.0
+- category: main
+  dependencies:
+    krb5: '>=1.19.2,<1.20.0a0'
+    libnghttp2: '>=1.46.0,<2.0a0'
+    libssh2: '>=1.9.0,<2.0a0'
+    openssl: '>=1.1.1n,<1.1.2a'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: b64ff06f53a518e786fda8f67480ac0a
+    sha256: bb6997e3e9cca50ee23e74d95f193cf432180c08b095e11f7a63cd4b04725d1a
+  manager: conda
+  name: libcurl
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libcurl-7.82.0-h6dfd666_0.conda
+  version: 7.82.0
+- category: main
+  dependencies:
+    jpeg: '>=9b,<10a'
+    libcxx: '>=10.0.0'
+    libwebp-base: ''
+    xz: '>=5.2.5,<6.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+    zstd: '>=1.4.5,<1.5.0a0'
+  hash:
+    md5: 32cded0d1900a09a8fefdeda35e0de1c
+    sha256: 8939cfb2e775fb3dbb6316d725040c389514e8c8578bc253d127ecb87fb3dcb8
+  manager: conda
+  name: libtiff
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libtiff-4.2.0-h87d7836_0.conda
+  version: 4.2.0
+- category: main
+  dependencies:
+    libcxx: '>=12.0.0'
+    libffi: '>=3.3,<3.4.0a0'
+    ncurses: '>=6.3,<7.0a0'
+    openssl: '>=1.1.1n,<1.1.2a'
+    readline: '>=8.0,<9.0a0'
+    sqlite: '>=3.37.2,<4.0a0'
+    tk: '>=8.6.11,<8.7.0a0'
+    xz: '>=5.2.5,<6.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: f2f8398141b52f0b9ccc04b7a41c6ba0
+    sha256: c4b59d115327a84a527ef661760e34dde5146b2debbf748933fee8e5d0b3fc4a
+  manager: conda
+  name: python
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/python-3.8.13-hdfd78df_0.conda
+  version: 3.8.13
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 21ad3b69a5ce6c22e724e9dbb4cffa65
+    sha256: 014761192dcba8728848de354b9bbde31dbecdc966fbe3622c495ac50dfa834e
+  manager: conda
+  name: alabaster
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/alabaster-0.7.12-pyhd3eb1b0_0.tar.bz2
+  version: 0.7.12
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 5673d98d06171cb6eed03a6736845c4d
+    sha256: 4d849f6c8a4b60166ec21c7716de9589c083c74416a64876dd9ac6c613520a08
+  manager: conda
+  name: appdirs
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/appdirs-1.4.4-pyhd3eb1b0_0.conda
+  version: 1.4.4
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 2b544e4b465ff69e65ea1ffa4396cde0
+    sha256: f86415a36e437aba6cf00e06c3be4c34b77ff8a548a8d158b8cc9c8f0224d19d
+  manager: conda
+  name: appnope
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/appnope-0.1.2-py38hecd8cb5_1001.conda
+  version: 0.1.2
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 88e5fad50e595d527acfc96b782261cb
+    sha256: 1d4334a37f237428b8ab4319dbc8d24fa2bf949da3b86715989be7384e243490
+  manager: conda
+  name: asciitree
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/asciitree-0.3.3-py_2.conda
+  version: 0.3.3
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 3bc977a57587a7964921e3e1e2e31f9e
+    sha256: daf213916e7797c3493db7bbe43b3235d0606a18dd152af129a14bec8e5f56a2
+  manager: conda
+  name: attrs
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/attrs-21.4.0-pyhd3eb1b0_0.conda
+  version: 21.4.0
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: b2aa5503875aba2f1d88cae9df9a96d5
+    sha256: 09f2cf5b30825a39257320204b08146367f4346655197515b2cabcd363736488
+  manager: conda
+  name: backcall
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/backcall-0.2.0-pyhd3eb1b0_0.tar.bz2
+  version: 0.2.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: f81376d06da67f25c50290d26535452f
+    sha256: a1a295b6cfbdf7ba4921949cd8419b19f5596183b699fb1ad8f4670c519e7913
+  manager: conda
+  name: certifi
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/certifi-2022.5.18.1-py38hecd8cb5_0.conda
+  version: 2022.5.18.1
+- category: main
+  dependencies:
+    python: '>=3.6.1'
+  hash:
+    md5: ebb5f5f7dc4f1a3780ef7ea7738db08c
+    sha256: fbc03537a27ef756162c49b1d0608bf7ab12fa5e38ceb8563d6f4859e835ac5c
+  manager: conda
+  name: cfgv
+  optional: false
+  platform: osx-64
+  url: https://conda.anaconda.org/conda-forge/noarch/cfgv-3.3.1-pyhd8ed1ab_0.tar.bz2
+  version: 3.3.1
+- category: main
+  dependencies:
+    bzip2: '>=1.0.8,<2.0a0'
+    libcurl: '>=7.71.1,<8.0a0'
+    libgfortran: '>=3.0.1,<4.0.0.a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: 5fac36e0ef1d6018e55f3e062c2951fe
+    sha256: e8cf0f3ad728127ebcdf5bccc3d833d0ea8be5f70e2bd050a70983946769c336
+  manager: conda
+  name: cfitsio
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/cfitsio-3.470-hee0f690_6.conda
+  version: '3.470'
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: e7a441d94234b2b5fafee06e25dbf076
+    sha256: b39aea12bf02654cdd0094c79bfa6edbc8d054787f6e2d0b96d403cd4ba4cc0d
+  manager: conda
+  name: charset-normalizer
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/charset-normalizer-2.0.4-pyhd3eb1b0_0.conda
+  version: 2.0.4
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: 8e38585c33e6c659e0e5b0b18e6bf3e2
+    sha256: 1fef66d73901bd403bd5f1631e686d98e15d0b6ec7d59dc59f819bffc755b832
+  manager: conda
+  name: cloudpickle
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/cloudpickle-2.0.0-pyhd3eb1b0_0.conda
+  version: 2.0.0
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: f550604d18b83878f647a491b2b343d6
+    sha256: 2224b228e6d511f11b6d76ed95107b4ea79f3d58939b8638b78485d205d38140
+  manager: conda
+  name: colorama
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/colorama-0.4.4-pyhd3eb1b0_0.conda
+  version: 0.4.4
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: f4758d624f8cb061e82b5a7adf193300
+    sha256: e9b18bfd181c1e8536af5a8aa8ca62ca5bddcd4acb92ec18d349c3d0eb498716
+  manager: conda
+  name: coverage
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/coverage-5.5-py38h9ed2024_2.conda
+  version: '5.5'
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: f5e365d2cdb66d547eb8c3ab93843aab
+    sha256: dda35af3a1f92960aa01fee1e3c3587da67a23f186a09a2c313fc325392216c3
+  manager: conda
+  name: cycler
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/cycler-0.11.0-pyhd3eb1b0_0.conda
+  version: 0.11.0
+- category: main
+  dependencies:
+    libcxx: '>=12.0.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 2ddf77b041a27682881dfc342abbd687
+    sha256: f30cf2d4f8c81dc4ada92dda5b8d9ffcb2a4f661ae560672c4d9747844e2147f
+  manager: conda
+  name: debugpy
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/debugpy-1.5.1-py38he9d5cce_0.conda
+  version: 1.5.1
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: 4d969aac32a0faf84af90c797bfc7fec
+    sha256: 094d553ca784fb55afdb969a028ec1de177b152f14359e998a6ba94394a09d7f
+  manager: conda
+  name: decorator
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/decorator-5.1.1-pyhd3eb1b0_0.conda
+  version: 5.1.1
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: d912068b0729930972adcaac338882c0
+    sha256: d5ccad2e614ba3f953c202a42270fe0cfdaf6c5071311a3accf28446c49a6c5b
+  manager: conda
+  name: defusedxml
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/defusedxml-0.7.1-pyhd3eb1b0_0.conda
+  version: 0.7.1
+- category: main
+  dependencies:
+    python: 2.7|>=3.6
+  hash:
+    md5: 86c256c16d9b416ffee75a4cfccf6c9a
+    sha256: 5cc5c5d866dc7dce069e27a430bd9ae7668d56b8a5814552fd0b52930679cfa3
+  manager: conda
+  name: distlib
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/distlib-0.3.2-pyhd3eb1b0_0.conda
+  version: 0.3.2
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: dda26c59f2e4f82e09dfe7158537f067
+    sha256: 37a5bcb5b87e3c24fb6f385332de301cc9568f6651d34e196c04dd2a0f01adf1
+  manager: conda
+  name: docutils
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/docutils-0.17.1-py38hecd8cb5_1.conda
+  version: 0.17.1
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: f6003ec78f7c64fb47686adccc2675d1
+    sha256: d1948d6da066a37ba6e988f4b784a2ad0b6eb3346347a121eb04badd3683417d
+  manager: conda
+  name: entrypoints
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/entrypoints-0.4-py38hecd8cb5_0.conda
+  version: '0.4'
+- category: main
+  dependencies:
+    python: '>=2.7'
+  hash:
+    md5: 7be61d1c3c555fb37682b28d7a53d622
+    sha256: 19861fd553ee36da352401ece4564271be8a958726a527b9731f229fa6131c5d
+  manager: conda
+  name: executing
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/executing-0.8.3-pyhd3eb1b0_0.conda
+  version: 0.8.3
+- category: main
+  dependencies:
+    python: '>=3.7'
+  hash:
+    md5: 527be2ebbc60c0de6533ce33132ce303
+    sha256: c73632a2f1b916a7f21a0aac4e8bed971bff9baa665f46e0674f3f5d8c304dc1
+  manager: conda
+  name: filelock
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/filelock-3.6.0-pyhd3eb1b0_0.conda
+  version: 3.6.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 6114cd136aa78d5db350e1fbd33fe49b
+    sha256: de43cc03fc3a256fbeae409b786221414014988f07a563b8564b5288731b3101
+  manager: conda
+  name: fsspec
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/fsspec-2022.3.0-py38hecd8cb5_0.conda
+  version: 2022.3.0
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: 8f43a528cf83b43af38a4d142fa38b8a
+    sha256: 2214ad8a3c5f0afc3c13ced28dd1961b98ff780b4e8562357a85c243e7fe678e
+  manager: conda
+  name: idna
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/idna-3.3-pyhd3eb1b0_0.conda
+  version: '3.3'
+- category: main
+  dependencies:
+    python: '>=3.4'
+  hash:
+    md5: 306855b2038e489d01dff5b343a8adb9
+    sha256: 53067081ee63245250c34ce8224c43acafffd4b4d661302594203d54a9ba02b2
+  manager: conda
+  name: imagesize
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/imagesize-1.3.0-pyhd3eb1b0_0.conda
+  version: 1.3.0
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: e40edff2c5708f342cef43c7f280c507
+    sha256: d288f67c03b20a885464056a215baef626d29f95df56e1974379a17f8e55a24e
+  manager: conda
+  name: iniconfig
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/iniconfig-1.1.1-pyhd3eb1b0_0.tar.bz2
+  version: 1.1.1
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 553832c0b872a28088a0001fa2ba3822
+    sha256: 57b26ed1992a45f5c3c1261001956cd31c05ffe7c57c452af07bc3417c57d143
+  manager: conda
+  name: ipython_genutils
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/ipython_genutils-0.2.0-pyhd3eb1b0_1.conda
+  version: 0.2.0
+- category: main
+  dependencies:
+    python: '>=3.6.1,<4.0'
+  hash:
+    md5: 75f2497fe01a9ac6208d72e26066b76a
+    sha256: c20bf1d70180ea452b16b89b9a62aab2192a8b8ad71d63c46beabc1fbb9b2451
+  manager: conda
+  name: isort
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/isort-5.9.3-pyhd3eb1b0_0.conda
+  version: 5.9.3
+- category: main
+  dependencies:
+    python: '>=3.7'
+  hash:
+    md5: cae25b839f3b24686e683addde01b742
+    sha256: fba9ae67546481614bc575c5be838184f3856110a0d6a948aff34872da54ef67
+  manager: conda
+  name: joblib
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/joblib-1.1.0-pyhd3eb1b0_0.conda
+  version: 1.1.0
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 4e721ee2dbfa20069719d2ee19185031
+    sha256: 595b96c7e20703125ff4865ee55ba282357b57438a609dd7660610318127ca2e
+  manager: conda
+  name: json5
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/json5-0.9.6-pyhd3eb1b0_0.conda
+  version: 0.9.6
+- category: main
+  dependencies:
+    libcxx: '>=12.0.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 69e898666e90cfa4c7ecc827b1990f09
+    sha256: 93a2b15492205b93d54309e114c16c986161cbd56a811539d343514bbebed354
+  manager: conda
+  name: kiwisolver
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/kiwisolver-1.4.2-py38he9d5cce_0.conda
+  version: 1.4.2
+- category: main
+  dependencies:
+    jpeg: '>=9b,<10a'
+    libtiff: '>=4.1.0,<5.0a0'
+  hash:
+    md5: 697aba7a3308226df7a93ccfeae16ffa
+    sha256: 1da8175b3b624a3a0b84c745c5d8eb66c2833001163398dfc25d385ea9c54fd0
+  manager: conda
+  name: lcms2
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/lcms2-2.12-hf1fd2bf_0.conda
+  version: '2.12'
+- category: main
+  dependencies:
+    giflib: '>=5.2.1,<5.3.0a0'
+    jpeg: '>=9d,<10a'
+    libpng: '>=1.6.37,<1.7.0a0'
+    libtiff: '>=4.1.0,<5.0a0'
+    libwebp-base: ''
+  hash:
+    md5: 027d2450b64e251b8169798f6121b47a
+    sha256: 85e9b259e51d342cce7ddedb4214864a117e288061e60128748c0d058395f0b9
+  manager: conda
+  name: libwebp
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/libwebp-1.2.2-h56c3ce4_0.conda
+  version: 1.2.2
+- category: main
+  dependencies:
+    libcxx: '>=10.0.0'
+    libllvm10: '>=10.0.1,<10.1.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 3f9309846c28eec6bdafe4aa39ffa9f6
+    sha256: a3bb1ecca2ac1956555001a0477202cb8fb4c5cff67023cc4dbda9cdc5667e2f
+  manager: conda
+  name: llvmlite
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/llvmlite-0.36.0-py38he4411ff_4.conda
+  version: 0.36.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: c20b81da1e7a7eb21660d3d069709fa1
+    sha256: 3a279a21a3d7470bf5d041492183330a2f9c1997d0b8a6839056ac459644254b
+  manager: conda
+  name: locket
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/locket-1.0.0-py38hecd8cb5_0.conda
+  version: 1.0.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: ce67815e11d846384abf63db2db75a8a
+    sha256: 92c350c671df07fae0af3ef89c312b302db3b6c04e6086502b2219edc46809d1
+  manager: conda
+  name: markupsafe
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/markupsafe-2.0.1-py38h9ed2024_0.conda
+  version: 2.0.1
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 86e2057ac3b9c7a26ab9a52bb0f53d6e
+    sha256: 9d6ab0043208dfe3d7f8998e44c1d08e6cb8a883174682c415590eb2c4501b77
+  manager: conda
+  name: mccabe
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/mccabe-0.6.1-py38_1.conda
+  version: 0.6.1
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 14552fbbea9538d0aa2686a3c7cba628
+    sha256: d9bb61e0e16ba54f11b149d71d28f88208f02bf5cacdd8e73be81b87a79a0505
+  manager: conda
+  name: mistune
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/mistune-0.8.4-py38h1de35cc_1001.conda
+  version: 0.8.4
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: e30b674f018b25357c076ae407d769b9
+    sha256: 0924c0efe3d2b48a34664533b14b2df85e979c56e2d578db9cadd58bb490b815
+  manager: conda
+  name: mock
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/mock-4.0.3-pyhd3eb1b0_0.conda
+  version: 4.0.3
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: ac1210cc005fb8bd631ea8beb8343332
+    sha256: 96aefa911a4022f832148e09df4cecb3a0e62ac353e68f7b27018b5c594c9491
+  manager: conda
+  name: more-itertools
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/more-itertools-8.12.0-pyhd3eb1b0_0.conda
+  version: 8.12.0
+- category: main
+  dependencies:
+    openmpi: '>=4.0,<4.1.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 2e4c9f6cde00b616c2fb0c025eb225d9
+    sha256: 15e41dfff597636808cd42ec818798359a72a03cd907f77b946f4f303f047c97
+  manager: conda
+  name: mpi4py
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/mpi4py-3.0.3-py38h27a7d74_1.conda
+  version: 3.0.3
+- category: main
+  dependencies:
+    libcxx: '>=12.0.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 06c7070693428ccf4815c78bfeaa841b
+    sha256: ed01bcaf8171d56ab6c16b2a4ecda4278b5f8c44ac67febc68126037fd3fc9d1
+  manager: conda
+  name: msgpack-python
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/msgpack-python-1.0.3-py38haf03e11_0.conda
+  version: 1.0.3
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 148362ba07f92abab76999a680c80084
+    sha256: f3f473a60a6766a7f64447c0c7e49d0640db7f995255951e3477b5d21d144dc7
+  manager: conda
+  name: munkres
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/munkres-1.1.4-py_0.conda
+  version: 1.1.4
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: f0d2034d99e98561f9859f0a1a13360f
+    sha256: 3cbcda8a700ba97263c09da342a9a0cfc4b8529c137c7629afae2ad7e9cef905
+  manager: conda
+  name: nest-asyncio
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/nest-asyncio-1.5.5-py38hecd8cb5_0.conda
+  version: 1.5.5
+- category: main
+  dependencies:
+    python: '>=3.8'
+  hash:
+    md5: 6c97a8687676de8dac42bd8373892397
+    sha256: def7767107e59907dc1a7ccfbf47657770b1fa0ddfb735a6b4e6e10833d0cc0a
+  manager: conda
+  name: networkx
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/networkx-2.7.1-pyhd3eb1b0_0.conda
+  version: 2.7.1
+- category: main
+  dependencies:
+    libcxx: '>=10.0.0'
+    libpng: '>=1.6.37,<1.7.0a0'
+    libtiff: '>=4.1.0,<5.0a0'
+  hash:
+    md5: 882833bd7befc5e60e6fba9c518c1b79
+    sha256: 63ce54a6960e6fd0f4ec1ab511fb16dc6219bc6aac52a98d881872f70d3ffca5
+  manager: conda
+  name: openjpeg
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/openjpeg-2.4.0-h66ea3da_0.conda
+  version: 2.4.0
+- category: main
+  dependencies:
+    python: '!=3.0,!=3.1,!=3.2,!=3.3'
+  hash:
+    md5: 5547ced9e3bb4c513405998957b52c7b
+    sha256: 39f9d46b6334bd0492ca89c095aea10de7d0976dc96d20013a4ef028e21e6b2a
+  manager: conda
+  name: pandocfilters
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pandocfilters-1.5.0-pyhd3eb1b0_0.conda
+  version: 1.5.0
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: c6f0f6219bf5ce2b510ef4b75cbc3e01
+    sha256: e6be6d3a4fec00fc3699a716bbbf48779ef4ab9149fa92df71d9a03d69a66a84
+  manager: conda
+  name: parso
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/parso-0.8.3-pyhd3eb1b0_0.conda
+  version: 0.8.3
+- category: main
+  dependencies:
+    python: '>=3'
+  hash:
+    md5: 4a6363fd8dda664b95f99f7c5aa95abc
+    sha256: c4c974ac5bab1628bc472eb271903206ebc349f30cf590740560b6284118852a
+  manager: conda
+  name: pickleshare
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pickleshare-0.7.5-pyhd3eb1b0_1003.conda
+  version: 0.7.5
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 142fadba8c1ed28a493f1a327d0a3a1b
+    sha256: c9faccf38ee325fb8808e7e373f9aa76b029868d3ff3ce33d54757d1137cd74c
+  manager: conda
+  name: pluggy
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/pluggy-0.13.1-py38hecd8cb5_0.conda
+  version: 0.13.1
+- category: main
+  dependencies:
+    python: '>=2.7,!=3.0,!=3.1,!=3.2,!=3.3,!=3.4,<4'
+  hash:
+    md5: 101a437c0ab238eaa1736dd665b33fa2
+    sha256: 691d29b4dd9784b9cb753e312cb336ef7a47c9f2c2eb5682cbcbc94eff976f8f
+  manager: conda
+  name: prefixed
+  optional: false
+  platform: osx-64
+  url: https://conda.anaconda.org/conda-forge/noarch/prefixed-0.3.2-pyhd8ed1ab_0.tar.bz2
+  version: 0.3.2
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: 05275f89084c4ce7f9b0bc1e258b3e9e
+    sha256: 765445941cb6bebab31b3a10f7f16e17f7c04f71c39c4e908da4855856f487f8
+  manager: conda
+  name: prometheus_client
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/prometheus_client-0.13.1-pyhd3eb1b0_0.conda
+  version: 0.13.1
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 7441d2827d4bfbcc1fa308875a146246
+    sha256: 664254ab6de7f14d4077bdaeceda2bf0144fd841e257d07bb70427fadf08c588
+  manager: conda
+  name: ptyprocess
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/ptyprocess-0.7.0-pyhd3eb1b0_2.conda
+  version: 0.7.0
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: a87d6d9827e5dff68d34d69971f8a9b1
+    sha256: e579ae0a1205e5706dbf00deacbebc87b889f11c48e2f12323dfec7d2d15b27c
+  manager: conda
+  name: pure_eval
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pure_eval-0.2.2-pyhd3eb1b0_0.conda
+  version: 0.2.2
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: 7205a898ed2abbf6e9b903dff6abe08e
+    sha256: 4dd7cbf9d6a95aa9fd1ae74db2fabc7cf904e9d61a479f169c8e118a800fcdb3
+  manager: conda
+  name: py
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/py-1.11.0-pyhd3eb1b0_0.conda
+  version: 1.11.0
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 30e8cdd78a0754c2d789d53fa465cd30
+    sha256: 250377acee595ce4de41ee3a81bd76a6cdf7f5a75c7dcf243921ff780c7afd00
+  manager: conda
+  name: pycodestyle
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pycodestyle-2.7.0-pyhd3eb1b0_0.conda
+  version: 2.7.0
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: 135a72ff2a31150a3a3ff0b1edd41ca9
+    sha256: 4405b5aeff26863972c82e8b54d09f88cd084f70e01e4343107b2676ffbeab57
+  manager: conda
+  name: pycparser
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pycparser-2.21-pyhd3eb1b0_0.conda
+  version: '2.21'
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: eaecb0dee9d296e2ba1dadf6902149f3
+    sha256: 03e904ba20d625cc9177b97b4f8d5461427c9d0bd4098a0b179d471522edba0d
+  manager: conda
+  name: pyflakes
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pyflakes-2.3.1-pyhd3eb1b0_0.conda
+  version: 2.3.1
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: eff55c770961f459a734cf86768aac98
+    sha256: c764e23bddaa42add41931581cb97f6c5857b4075ec4f3470c59dd8d84954a68
+  manager: conda
+  name: pygments
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pygments-2.11.2-pyhd3eb1b0_0.conda
+  version: 2.11.2
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: 6bca2ae9c9aae9ccdebcb8cf2aa87cb3
+    sha256: 283f6336e6c02b1fb0310b10a609628631b97f280d30320655f31219d7baf568
+  manager: conda
+  name: pyparsing
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pyparsing-3.0.4-pyhd3eb1b0_0.conda
+  version: 3.0.4
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 113311ade8193fee41c406cec2678852
+    sha256: 4b9580d49522c208f695addb335db641f431618177f212e550ef88c717af188e
+  manager: conda
+  name: pyrsistent
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/pyrsistent-0.18.0-py38hca72f7f_0.conda
+  version: 0.18.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 54d4dfc99989f03cefe7d922a741f3cf
+    sha256: a1e49c1397e7e65952bfc3182d1c6e82b04b6eaa8c41f66b61dc558c801e3396
+  manager: conda
+  name: pysocks
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/pysocks-1.7.1-py38_1.conda
+  version: 1.7.1
+- category: main
+  dependencies:
+    python: '>=3.3'
+  hash:
+    md5: ad1b2f7b33a45d0d68979ca2ad84b6a9
+    sha256: b723a1b02d5c8ba54b10003008930bf815f7ca4c9a01c283a05fe24b7fe0eb9b
+  manager: conda
+  name: python-fastjsonschema
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/python-fastjsonschema-2.15.1-pyhd3eb1b0_0.conda
+  version: 2.15.1
+- category: main
+  dependencies:
+    python: 3.8.*
+  hash:
+    md5: 156803acb0247c263c9586f190b72f1c
+    sha256: dab6aad0a321499739e562edfb710c106f1a62863c11e205beec76493ec4bf59
+  manager: conda
+  name: python_abi
+  optional: false
+  platform: osx-64
+  url: https://conda.anaconda.org/conda-forge/osx-64/python_abi-3.8-2_cp38.tar.bz2
+  version: '3.8'
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 76415b791ffd2007687ac5f0665aa7af
+    sha256: a82bbd32ba9470ef8ccf9ba17751cafc4dff6f1b3f55136d3e16a11766ccaed5
+  manager: conda
+  name: pytz
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pytz-2021.3-pyhd3eb1b0_0.conda
+  version: '2021.3'
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+    yaml: '>=0.2.5,<0.3.0a0'
+  hash:
+    md5: 243ac745f35beceb65ec17bff3698757
+    sha256: 817def553ac782efced3784fcce1b4cb57f758750ee721809444b91389f62f19
+  manager: conda
+  name: pyyaml
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/pyyaml-6.0-py38hca72f7f_1.conda
+  version: '6.0'
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+    zeromq: '>=4.3.4,<4.4.0a0'
+  hash:
+    md5: fe003dc72d676b6ce6f6abd3fb575964
+    sha256: 689a0982e1b0f485483514a94517b4cde8696577695720d64763cd89ec8a59cd
+  manager: conda
+  name: pyzmq
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/pyzmq-22.3.0-py38he9d5cce_2.conda
+  version: 22.3.0
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: bfa3c5c61a5a91e528a1d2d1e3cae6c9
+    sha256: 81f8bc2fa5d8ffe03fa591777e06c424959d2599252374d08d4e1bf77136d8d9
+  manager: conda
+  name: send2trash
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/send2trash-1.8.0-pyhd3eb1b0_1.conda
+  version: 1.8.0
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 34586824d411d36af2fa40e799c172d0
+    sha256: 71c97b4ddc3d19ed41bfa1a2d40f620f96b4d46f097dc48ab115b36640f7df0a
+  manager: conda
+  name: six
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/six-1.16.0-pyhd3eb1b0_1.conda
+  version: 1.16.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 51d4ef66b0571f459e6a9613444bdab6
+    sha256: 637f14bc49771488e46988513ce65ef4810f211df6d27f7cc6d5a43eb6842692
+  manager: conda
+  name: sniffio
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/sniffio-1.2.0-py38hecd8cb5_1.conda
+  version: 1.2.0
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: c8c10f2cd854c0a27630760958bba60c
+    sha256: 4bf87350cb5e65746802eade943b5fa9decfa922f76ca8ca8ae3aa91ab967852
+  manager: conda
+  name: snowballstemmer
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/snowballstemmer-2.2.0-pyhd3eb1b0_0.conda
+  version: 2.2.0
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: 0c60976249f116d5aa21fd50f0f94990
+    sha256: d351c9c0dab19d3aa5ae4530a98f5333c6e7f35d752b4f45e4b16d96c568ed17
+  manager: conda
+  name: sphinx_rtd_theme
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinx_rtd_theme-0.4.3-pyhd3eb1b0_0.conda
+  version: 0.4.3
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: ac923499f97b9a9ab7c672b27cb2a1a8
+    sha256: d4b89871d0b01c20d9aa253b2f46fb1c78cd75c91c20d63ff8cf2afd68ae3fca
+  manager: conda
+  name: sphinxcontrib-applehelp
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-applehelp-1.0.2-pyhd3eb1b0_0.tar.bz2
+  version: 1.0.2
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: bc39c2b70430734b5879d6b504e3311f
+    sha256: 8edb87f6abbb11d93df4b51037f6073415fe6f0ee6693944cd25f05e9cd6a052
+  manager: conda
+  name: sphinxcontrib-devhelp
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-devhelp-1.0.2-pyhd3eb1b0_0.tar.bz2
+  version: 1.0.2
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: 2af558ca8b56151110c7a3639a1ea348
+    sha256: fed2fc45ac491aa059a129e5b3b63090ab2d23327fd2a68b441a7e5c92a809cd
+  manager: conda
+  name: sphinxcontrib-htmlhelp
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-htmlhelp-2.0.0-pyhd3eb1b0_0.conda
+  version: 2.0.0
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: e43f8de7d6a717935ab220a0c957771d
+    sha256: 20c858fa8f7f9cda1eb9bb026d045f4eec29e5ba5f360affcb373931a62e31c8
+  manager: conda
+  name: sphinxcontrib-jsmath
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-jsmath-1.0.1-pyhd3eb1b0_0.tar.bz2
+  version: 1.0.1
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: 08d67f73f640b4d1e5e8890a324b60e3
+    sha256: a2594974c21898df67fceeeac7f12e5dd3c07fb7aae826eba2203b8812b801c8
+  manager: conda
+  name: sphinxcontrib-qthelp
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-qthelp-1.0.3-pyhd3eb1b0_0.tar.bz2
+  version: 1.0.3
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: 0440b84dfd478f340cf14c2d7c24f6c7
+    sha256: b0dcd6f5a1707742e189a377821e75c1cc73d7859d887b8924832eeb13bd02d4
+  manager: conda
+  name: sphinxcontrib-serializinghtml
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-serializinghtml-1.1.5-pyhd3eb1b0_0.conda
+  version: 1.1.5
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: bd2a5c664c982e8637ae17b1662bd9a4
+    sha256: 2b0bd8d29b8142a9fad8b70059576d22434144cff59f46c25e8e3b60fe9b8fac
+  manager: conda
+  name: testpath
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/testpath-0.5.0-pyhd3eb1b0_0.conda
+  version: 0.5.0
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: bbfdbae4934150b902f97daaf287efe2
+    sha256: 440bc067a57f888fb602339e2d1a1661ffee79244cd9c5179fc3bbc560efc56b
+  manager: conda
+  name: threadpoolctl
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/threadpoolctl-2.2.0-pyh0d69192_0.conda
+  version: 2.2.0
+- category: main
+  dependencies:
+    python: '>=2.7'
+  hash:
+    md5: cda05f5f6d8509529d1a2743288d197a
+    sha256: c50b132439b0260e4b43649ce30171a4792e186c6f3d657e5936578d4ee3fc56
+  manager: conda
+  name: toml
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/toml-0.10.2-pyhd3eb1b0_0.conda
+  version: 0.10.2
+- category: main
+  dependencies:
+    python: '>=3.5'
+  hash:
+    md5: 9fedc09c1ff4c9bc22695093c1ecd335
+    sha256: 706678943282f24560c5aae936f78f061ffb289cec5cfcf545fcc64f0d7ee5b0
+  manager: conda
+  name: toolz
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/toolz-0.11.2-pyhd3eb1b0_0.conda
+  version: 0.11.2
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 430b78790d6d2f428bcf6cfd4a64127c
+    sha256: cdfd98174706c9cb70f3c4a8bf6bd94a1c30ea8f38e23365210bc55e866a6b68
+  manager: conda
+  name: tornado
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/tornado-6.1-py38h9ed2024_0.conda
+  version: '6.1'
+- category: main
+  dependencies:
+    python: '>=2.7'
+  hash:
+    md5: 9e0c24d3f7c51fbd42a2ebeb50b5c0fa
+    sha256: 09f8311136bc5bab5f7bd070c98f0ef3ad866ee122781a13fe80210510843671
+  manager: conda
+  name: tqdm
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/tqdm-4.62.2-pyhd3eb1b0_1.conda
+  version: 4.62.2
+- category: main
+  dependencies:
+    python: '>=3.7'
+  hash:
+    md5: 675f60e84f695e63749b09f9ed464eda
+    sha256: e34f5af510316895712446c879fc9c43e278f52c2c61b97fa54836abcb9dd5da
+  manager: conda
+  name: traitlets
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/traitlets-5.1.1-pyhd3eb1b0_0.conda
+  version: 5.1.1
+- category: main
+  dependencies:
+    python: '>=3.6'
+  hash:
+    md5: 8d4303f11560fe9621c962e87cf64d27
+    sha256: 1620b404b9b4bb937c03276adcebfe3cffe5df65911b2680169edc9a7c5101e8
+  manager: conda
+  name: typing_extensions
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/typing_extensions-4.1.1-pyh06a4308_0.conda
+  version: 4.1.1
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: ffa649340272c3f6466ba01da254c3b0
+    sha256: 19f0d42ae7e5ab32e558812a0025c00de31e2a3636bb051ae3527db196264532
+  manager: conda
+  name: wcwidth
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/wcwidth-0.2.5-pyhd3eb1b0_0.conda
+  version: 0.2.5
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 06bf5fe6b42bc9d00c96f4be7beaaf26
+    sha256: 8dbe8f3a0fdcf4e0bc62c2755cfd9559d48e4efebd8b5a50e6d4232f621f3fbb
+  manager: conda
+  name: webencodings
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/webencodings-0.5.1-py38_1.conda
+  version: 0.5.1
+- category: main
+  dependencies:
+    python: ''
+  hash:
+    md5: ab85e96e26da8d5797c2458232338b86
+    sha256: d3f762f14aff275613ef8d0df2b1e608e8174960da05a1815f36e70cd62aaae9
+  manager: conda
+  name: wheel
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/wheel-0.37.1-pyhd3eb1b0_0.conda
+  version: 0.37.1
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 581323dbb487b96fb811106546a73af4
+    sha256: 75e4afdebae400744061f72675c89da094482f76414b88076dfa833c6d0d148c
+  manager: conda
+  name: zipp
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/zipp-3.8.0-py38hecd8cb5_0.conda
+  version: 3.8.0
+- category: main
+  dependencies:
+    idna: '>=2.8'
+    python: '>=3.8,<3.9.0a0'
+    sniffio: '>=1.1'
+  hash:
+    md5: 626b497546f7f6e4286a86f2b8b84d51
+    sha256: 9453c3d5042e08175d75f7779f7e21e7102cb1b919690828cfd9bf5052d26943
+  manager: conda
+  name: anyio
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/anyio-3.5.0-py38hecd8cb5_0.conda
+  version: 3.5.0
+- category: main
+  dependencies:
+    python: '>=3.5'
+    six: ''
+  hash:
+    md5: 140486e2ce4f3931b44aa5f7ff8d88da
+    sha256: 6d50672764e75e322db2da755378ba520d4b3c60dc60c5afcb5a539634cacda8
+  manager: conda
+  name: asttokens
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/asttokens-2.0.5-pyhd3eb1b0_0.conda
+  version: 2.0.5
+- category: main
+  dependencies:
+    python: '>=3.6'
+    pytz: '>=2015.7'
+  hash:
+    md5: 61575e8b70e18ebc54e65da5e441b861
+    sha256: c467e0ec03fb3f974cb903778b5806f4bced21534233c2e6b80bf879dc664460
+  manager: conda
+  name: babel
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/babel-2.9.1-pyhd3eb1b0_0.conda
+  version: 2.9.1
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+    python_abi: 3.8.* *_cp38
+    six: '>=1.9.0'
+    wcwidth: '>=0.1.4'
+  hash:
+    md5: 3b04eb0edd6b9e0a1fcbba9b0802acc4
+    sha256: 684f045027929395319a4d4c4128bb0e471a8aee8ce5563c9f461999c0111938
+  manager: conda
+  name: blessed
+  optional: false
+  platform: osx-64
+  url: https://conda.anaconda.org/conda-forge/osx-64/blessed-1.19.1-py38h50d1736_1.tar.bz2
+  version: 1.19.1
+- category: main
+  dependencies:
+    libffi: '>=3.3'
+    pycparser: ''
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 5f7caa2c82d00004ee9bde44840945dc
+    sha256: 3ba87d9ae28bc72e88fa47f4e8ad1b7a2df9d0e6421b47f26cfc02bf3191e5df
+  manager: conda
+  name: cffi
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/cffi-1.15.0-py38hc55c11b_1.conda
+  version: 1.15.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+    toolz: '>=0.10.0'
+  hash:
+    md5: f58527d5ddae8d53ee7570e698b2ee49
+    sha256: 2cacd85940ebd0106de3e53debbdccb01625675c78f5abcf82a9b28008128d4d
+  manager: conda
+  name: cytoolz
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/cytoolz-0.11.0-py38haf1e3a3_0.conda
+  version: 0.11.0
+- category: main
+  dependencies:
+    python: '>=3.6'
+    six: ''
+  hash:
+    md5: 335fdb99580fb176808d42ccd3c332e1
+    sha256: 4768f27621090559d1b62bcf7fc28bd41f1a0124995fc57a4c9113c961c839ba
+  manager: conda
+  name: fasteners
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/fasteners-0.16.3-pyhd3eb1b0_0.conda
+  version: 0.16.3
+- category: main
+  dependencies:
+    brotli: '>=1.0.1'
+    munkres: ''
+    python: '>=3.6'
+  hash:
+    md5: bb9c5b5a6d892fca5efe4bf0203b6a48
+    sha256: 6c1c48972893046fd199e2c12a694f199c74246f00b6db761bf8c375433d4164
+  manager: conda
+  name: fonttools
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/fonttools-4.25.0-pyhd3eb1b0_0.conda
+  version: 4.25.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+    zipp: '>=0.5'
+  hash:
+    md5: 97d838ed6fa3cadd79922114accfc0fb
+    sha256: f7deff8bdd2fcf5f98a7953a9058f6cbffb7ec08393038cd81e500b5ede22038
+  manager: conda
+  name: importlib-metadata
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/importlib-metadata-4.11.3-py38hecd8cb5_0.conda
+  version: 4.11.3
+- category: main
+  dependencies:
+    python: '>=3.6'
+    zipp: '>=3.1.0'
+  hash:
+    md5: 3e7caf9dbd3b4771e9b951ffc7cdad80
+    sha256: bdb5316b10c07956d3dc067df2e3b1c64faecdc859cc24cdab164669a80ce57b
+  manager: conda
+  name: importlib_resources
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/importlib_resources-5.2.0-pyhd3eb1b0_1.conda
+  version: 5.2.0
+- category: main
+  dependencies:
+    parso: '>=0.8.0,<0.9.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: f0aa32d375b15db6f9aa00c5ceae339c
+    sha256: 11f7a3dd0e7209bfa7b3a682646f85cc50667d3004da3f0df63da0257153ae50
+  manager: conda
+  name: jedi
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/jedi-0.18.1-py38hecd8cb5_1.conda
+  version: 0.18.1
+- category: main
+  dependencies:
+    markupsafe: '>=2.0'
+    python: '>=3.6'
+  hash:
+    md5: a5b0429ead9704cd1ad0b044c97e728f
+    sha256: e15a4ea4eb6b33873f30d9644cb9a0d60cc2403bbfd3e3918ee40b24dce54cc1
+  manager: conda
+  name: jinja2
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/jinja2-3.0.3-pyhd3eb1b0_0.conda
+  version: 3.0.3
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+    traitlets: ''
+  hash:
+    md5: 8c94540b1c15d9fd8b5a1dc8c4c92f64
+    sha256: bf83ae5bbd60ba66932557f84aae20c3e238546bb39848ca488f5dff8972043a
+  manager: conda
+  name: jupyter_core
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/jupyter_core-4.10.0-py38hecd8cb5_0.conda
+  version: 4.10.0
+- category: main
+  dependencies:
+    pygments: '>=2.4.1,<3'
+    python: ''
+  hash:
+    md5: af46aff4922ca45df6ba19b313df6070
+    sha256: 3fb6380242934cad8db44dd1e77597529eb6c73c88144ee5e7f86f18e6eb70e9
+  manager: conda
+  name: jupyterlab_pygments
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/jupyterlab_pygments-0.1.2-py_0.conda
+  version: 0.1.2
+- category: main
+  dependencies:
+    python: '>=3.6'
+    traitlets: ''
+  hash:
+    md5: 47e865f8b884de7c5d516349e83457a7
+    sha256: 6c33d164f44eb4be2a1a303d426a6df297739458a557fa45906741552569df02
+  manager: conda
+  name: matplotlib-inline
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/matplotlib-inline-0.1.2-pyhd3eb1b0_2.conda
+  version: 0.1.2
+- category: main
+  dependencies:
+    mkl: '>=2021.2.0,<2022.0a0'
+    python: '>=3.8,<3.9.0a0'
+    six: ''
+  hash:
+    md5: aa83a42208ae871c8f1d3e15720024c4
+    sha256: be6c56e88cdb9df5b6dfb2f20fee412fbfb382deb061252e851420224112a0dd
+  manager: conda
+  name: mkl-service
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/mkl-service-2.4.0-py38h9ed2024_0.conda
+  version: 2.4.0
+- category: main
+  dependencies:
+    pyparsing: '>=2.0.2,!=3.0.5'
+    python: '>=3.6'
+  hash:
+    md5: 07bbfbb961db7fa329cc42716943ea62
+    sha256: d3b400cd9613b5570dd5a4e788b0a2947d612b2e8ab47a82a83603d94c26959a
+  manager: conda
+  name: packaging
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/packaging-21.3-pyhd3eb1b0_0.conda
+  version: '21.3'
+- category: main
+  dependencies:
+    locket: ''
+    python: '>=3.5'
+    toolz: ''
+  hash:
+    md5: d02d8b6ea30c680d3fafe4ac50cc4b18
+    sha256: 7cb5bebcd6698effe5162a353fd7d6e0275a4ca07168f730b1917d587a134d13
+  manager: conda
+  name: partd
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/partd-1.2.0-pyhd3eb1b0_1.conda
+  version: 1.2.0
+- category: main
+  dependencies:
+    ptyprocess: '>=0.5'
+    python: ''
+  hash:
+    md5: 765b2562d6cdd14bb6d44fc170a04331
+    sha256: fb4b14fdb5e57becda5b8b88b453626ce12edb49fc8cec88ddaea40b52277494
+  manager: conda
+  name: pexpect
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pexpect-4.8.0-pyhd3eb1b0_3.conda
+  version: 4.8.0
+- category: main
+  dependencies:
+    freetype: '>=2.10.4,<3.0a0'
+    jpeg: ''
+    lcms2: '>=2.12,<3.0a0'
+    libcxx: '>=12.0.0'
+    libtiff: '>=4.1.0,<5.0a0'
+    libwebp: '>=1.2.0,<1.3.0a0'
+    python: '>=3.8,<3.9.0a0'
+    tk: ''
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: 9b05d3f1ef363938abfd1140d3104a81
+    sha256: ec2c8d9e46620dabe9aa10e704eb3d9054a9951e8d546b231fb78b136c0af1bc
+  manager: conda
+  name: pillow
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/pillow-9.0.1-py38hde71d04_0.conda
+  version: 9.0.1
+- category: main
+  dependencies:
+    python: ''
+    wcwidth: ''
+  hash:
+    md5: 19fa1fa6a03645e39e7dce3bdbe9d72f
+    sha256: b7251f3c678beab7ef9d7b9a8491a5d61b2b2f3f85e4a18fa4d833c61e673945
+  manager: conda
+  name: prompt-toolkit
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/prompt-toolkit-3.0.20-pyhd3eb1b0_0.conda
+  version: 3.0.20
+- category: main
+  dependencies:
+    python: ''
+    six: '>=1.5'
+  hash:
+    md5: 211ee00320b08a1ac9fea6677649f6c9
+    sha256: 01e82704b3d84c1b0b1f8823fa64259eb372a1278e6a40dddf2cefb4c96ab942
+  manager: conda
+  name: python-dateutil
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/python-dateutil-2.8.2-pyhd3eb1b0_0.conda
+  version: 2.8.2
+- category: main
+  dependencies:
+    certifi: '>=2016.9.26'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: d7a83933228d51053ea27c48a2959f1a
+    sha256: a15504ab17d0b5505900c515037b12d71fc804a1c254860614d581fd12c587a0
+  manager: conda
+  name: setuptools
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/setuptools-61.2.0-py38hecd8cb5_0.conda
+  version: 61.2.0
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+    tornado: '>=2.0'
+  hash:
+    md5: 0fec234d151c6afae1a921bf6dbe77ed
+    sha256: f3cac26daf6ff78612ce9a2f41f1dd7a9d310934260581e9a2e67d15cff64c59
+  manager: conda
+  name: snakeviz
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/snakeviz-2.0.1-py38hecd8cb5_0.conda
+  version: 2.0.1
+- category: main
+  dependencies:
+    ptyprocess: ''
+    python: '>=3.8,<3.9.0a0'
+    tornado: '>=4'
+  hash:
+    md5: 70cd24be2f01996667d580408c973d93
+    sha256: 6a47af5b3595923a3854894c2580371ea306f35a15399cc02931a8ffcd195611
+  manager: conda
+  name: terminado
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/terminado-0.13.1-py38hecd8cb5_0.conda
+  version: 0.13.1
+- category: main
+  dependencies:
+    typing_extensions: 4.1.1 pyh06a4308_0
+  hash:
+    md5: 0b535dfd0618653dd772c78c9c2b56a8
+    sha256: ffd342f3df10d3690d3c8abe53f411828bcb9b55ad7eadede2ae476bc9be0a22
+  manager: conda
+  name: typing-extensions
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/typing-extensions-4.1.1-hd3eb1b0_0.conda
+  version: 4.1.1
+- category: main
+  dependencies:
+    appdirs: '>=1.4.3,<2'
+    distlib: '>=0.3.1,<1'
+    filelock: '>=3.0.0,<4'
+    python: '>=3.8,<3.9.0a0'
+    six: '>=1.9.0,<2'
+  hash:
+    md5: 080438332d4a3e1a551c72ac51fb8481
+    sha256: db2b0db5e7a873c21337b14f71ca41c53eea1b427cf51b132dab07aaa6464058
+  manager: conda
+  name: virtualenv
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/virtualenv-20.4.6-py38hecd8cb5_1.conda
+  version: 20.4.6
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+    six: ''
+  hash:
+    md5: 28bd1df12ee80049de5caaae2ebce77e
+    sha256: 3d598110a0af2641d66d77834843bedbe12a6e3b5d61b175ef1428723fbf71da
+  manager: conda
+  name: websocket-client
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/websocket-client-0.58.0-py38hecd8cb5_4.conda
+  version: 0.58.0
+- category: main
+  dependencies:
+    cffi: '>=1.0.1'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 241491682516288378ab940ad09d7f74
+    sha256: 18082967846d37390fe78e9e16fc11d30fad57622841f5e8919c6fe10fb3f418
+  manager: conda
+  name: argon2-cffi-bindings
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/argon2-cffi-bindings-21.2.0-py38hca72f7f_0.conda
+  version: 21.2.0
+- category: main
+  dependencies:
+    packaging: ''
+    python: '>=3.6'
+    six: '>=1.9.0'
+    webencodings: ''
+  hash:
+    md5: 256eb7e384e35f993ef8ccd6c4f45e58
+    sha256: 294fea02f7cdc4728ea19102d021c1f9d2c55fcfb00de26b72f54e206cd45762
+  manager: conda
+  name: bleach
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/bleach-4.1.0-pyhd3eb1b0_0.conda
+  version: 4.1.0
+- category: main
+  dependencies:
+    cffi: '>=1.0.0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 41b0bc0721aecf75336a098f4d5314b8
+    sha256: 8cd905ec746456419b0ba8b58003e35860f4c1205fc2be810de06002ba257418
+  manager: conda
+  name: brotlipy
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/brotlipy-0.7.0-py38h9ed2024_1003.conda
+  version: 0.7.0
+- category: main
+  dependencies:
+    cffi: '>=1.12'
+    openssl: ''
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 4d1cadeecc9246ce76605e9bf89fad9e
+    sha256: 449db256b743151ab688aa4800de211a12724e92fc27831ee8ef0f483d9d1924
+  manager: conda
+  name: cryptography
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/cryptography-37.0.1-py38hf6deb26_0.conda
+  version: 37.0.1
+- category: main
+  dependencies:
+    cloudpickle: '>=1.1.1'
+    fsspec: '>=0.6.0'
+    packaging: '>=20.0'
+    partd: '>=0.3.10'
+    python: '>=3.8,<3.9.0a0'
+    pyyaml: '>=5.3.1'
+    toolz: '>=0.8.2'
+  hash:
+    md5: ee065a8767e0f8fc4d0950c6577f0043
+    sha256: 12e7ff47192d0e770a450d6a26ac9975bb4e8908c943aa673a48370fdec12af5
+  manager: conda
+  name: dask-core
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/dask-core-2022.5.0-py38hecd8cb5_0.conda
+  version: 2022.5.0
+- category: main
+  dependencies:
+    blessed: '>=1.17.7'
+    prefixed: '>=0.3.2'
+    python: '>=2.7,!=3.0,!=3.1,!=3.2,!=3.3,<4'
+  hash:
+    md5: f5c404e6c73888f69932895043ea5938
+    sha256: 9bfc2fc19cf49deef5443a2f6fe76576f5dd6f486792c23bb53b8e17c3bcf424
+  manager: conda
+  name: enlighten
+  optional: false
+  platform: osx-64
+  url: https://conda.anaconda.org/conda-forge/noarch/enlighten-1.10.1-pyhd8ed1ab_0.tar.bz2
+  version: 1.10.1
+- category: main
+  dependencies:
+    importlib-metadata: ''
+    mccabe: '>=0.6.0,<0.7.0'
+    pycodestyle: '>=2.7.0,<2.8.0'
+    pyflakes: '>=2.3.0,<2.4.0'
+    python: '>=3.6'
+    setuptools: '>=30.0.0'
+  hash:
+    md5: 04cb15847ce1ae281bac8eb5d67da440
+    sha256: e4eb96ba8e25646b256c87185002e832bc41a00b7bd6212b569f459a6d0bebfd
+  manager: conda
+  name: flake8
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/flake8-3.9.2-pyhd3eb1b0_0.conda
+  version: 3.9.2
+- category: main
+  dependencies:
+    attrs: '>=17.4.0'
+    importlib_resources: '>=1.4.0'
+    pyrsistent: '>=0.14.0,!=0.17.0,!=0.17.1,!=0.17.2'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 38dd39c9c45ed24ea311dc0b4d155833
+    sha256: 6ef383f15fdc82c27bca4cfc414cab6cf87f75d5186071f9c1569ee29b14818a
+  manager: conda
+  name: jsonschema
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/jsonschema-4.4.0-py38hecd8cb5_0.conda
+  version: 4.4.0
+- category: main
+  dependencies:
+    entrypoints: ''
+    jupyter_core: '>=4.9.2'
+    nest-asyncio: '>=1.5.4'
+    python: '>=3.8,<3.9.0a0'
+    python-dateutil: '>=2.8.2'
+    pyzmq: '>=22.3'
+    tornado: '>=6.0'
+    traitlets: ''
+  hash:
+    md5: 51773d706c11abaf05fea1800625ff13
+    sha256: ed07e10392c2b95b2e25c6ad74285e7e4d7d5564a4223ea14c0c451a90b9d33c
+  manager: conda
+  name: jupyter_client
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/jupyter_client-7.2.2-py38hecd8cb5_0.conda
+  version: 7.2.2
+- category: main
+  dependencies:
+    python: 2.7|>=3.6
+    setuptools: ''
+  hash:
+    md5: 0941325bf48969e2b3b19d0951740950
+    sha256: 1a419fefc3d02169844d27a4f2b27ed72a49930e91d60ed25c461fc06b5b8da5
+  manager: conda
+  name: nodeenv
+  optional: false
+  platform: osx-64
+  url: https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.6.0-pyhd8ed1ab_0.tar.bz2
+  version: 1.6.0
+- category: main
+  dependencies:
+    blas: 1.0 mkl
+    mkl: '>=2021.2.0,<2022.0a0'
+    mkl-service: '>=2.3.0,<3.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 3746d2d0beb512e308f0b39c246b33fa
+    sha256: e9c5fef390d70970bd8f137bdb871f38740a3b79b0e223e870c1945b8e3a8fe1
+  manager: conda
+  name: numpy-base
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/numpy-base-1.20.1-py38h585ceec_0.conda
+  version: 1.20.1
+- category: main
+  dependencies:
+    python: '>=3.8,<3.9.0a0'
+    setuptools: ''
+    wheel: ''
+  hash:
+    md5: 7e29b793da87a1c4715c1fa119d99ef0
+    sha256: 1ffed76070f704f459fcf3c67320cccaa1a4ef06bc7c66e7a518085e38639c82
+  manager: conda
+  name: pip
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/pip-21.2.4-py38hecd8cb5_0.conda
+  version: 21.2.4
+- category: main
+  dependencies:
+    attrs: '>=19.2.0'
+    iniconfig: ''
+    more-itertools: '>=4.0.0'
+    packaging: ''
+    pluggy: '>=0.12,<1.0.0a1'
+    py: '>=1.8.2'
+    python: '>=3.8,<3.9.0a0'
+    toml: ''
+  hash:
+    md5: 584982a60ae019a3948792e726501183
+    sha256: 0c72c9c2a6dcf3645fecfc7b0cbf15dbef9e1de9b905eef408d3e7b9f1320099
+  manager: conda
+  name: pytest
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/pytest-6.2.4-py38hecd8cb5_2.conda
+  version: 6.2.4
+- category: main
+  dependencies:
+    asttokens: ''
+    executing: ''
+    pure_eval: ''
+    python: '>=3.5'
+  hash:
+    md5: 6212968e73726f6da42e5ffcd2bea92d
+    sha256: 4e1527a4faf81f7d24c529f373c0dc432f2521480dcb528e55333ec8a0520f5a
+  manager: conda
+  name: stack_data
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/stack_data-0.2.0-pyhd3eb1b0_0.conda
+  version: 0.2.0
+- category: main
+  dependencies:
+    cffi: ''
+    libcxx: '>=11.1.0'
+    python: '>=3.8,<3.9.0a0'
+    python_abi: 3.8.* *_cp38
+  hash:
+    md5: b77764caaf71b45937ac28f6bab7764d
+    sha256: f7adaaa5f15dbbc49965243619ea5a5986fa17c00d1eac6c19ec4872702a01d3
+  manager: conda
+  name: ukkonen
+  optional: false
+  platform: osx-64
+  url: https://conda.anaconda.org/conda-forge/osx-64/ukkonen-1.0.1-py38h12bbefe_1.tar.bz2
+  version: 1.0.1
+- category: main
+  dependencies:
+    argon2-cffi-bindings: ''
+    python: '>=3.6'
+    typing-extensions: ''
+  hash:
+    md5: f00b851bc61b4c313903d31c7daecb09
+    sha256: 2c9a465ef472d2b858fed1d2e20c15f99d13b56ff21bfd53ae6bb2fffd57c1d7
+  manager: conda
+  name: argon2-cffi
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/argon2-cffi-21.3.0-pyhd3eb1b0_0.conda
+  version: 21.3.0
+- category: main
+  dependencies:
+    python: '>=3.6'
+    ukkonen: ''
+  hash:
+    md5: 6f41e3056fcd3061fbc2b49b3309fe0c
+    sha256: 856f5cf7a55116b7976a5c9b1ca4c1536d821a460b3ac047a9895221d7ed607c
+  manager: conda
+  name: identify
+  optional: false
+  platform: osx-64
+  url: https://conda.anaconda.org/conda-forge/noarch/identify-2.5.1-pyhd8ed1ab_0.tar.bz2
+  version: 2.5.1
+- category: main
+  dependencies:
+    appnope: ''
+    backcall: ''
+    decorator: ''
+    jedi: '>=0.16'
+    matplotlib-inline: '>=0.1.2'
+    pexpect: '>4.3'
+    pickleshare: ''
+    prompt-toolkit: '>=2.0.0,<3.1.0,!=3.0.0,!=3.0.1'
+    pygments: '>=2.4.0'
+    python: '>=3.8,<3.9.0a0'
+    setuptools: '>=18.5'
+    stack_data: ''
+    traitlets: '>=5'
+  hash:
+    md5: 329fc0165c4bf2d945d0c68557d5173d
+    sha256: 9e56bafe0d2a08ea08e244bb78cc9cb2e63e03a0732e4f3fdee0f149ceeb6071
+  manager: conda
+  name: ipython
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/ipython-8.3.0-py38hecd8cb5_0.conda
+  version: 8.3.0
+- category: main
+  dependencies:
+    jsonschema: '>=2.6'
+    jupyter_core: ''
+    python: '>=3.8,<3.9.0a0'
+    python-fastjsonschema: ''
+    traitlets: '>=4.1'
+  hash:
+    md5: 5b6ac6fe9256a09f8e9b1bf500740ffb
+    sha256: 6f810b82386e4dcf9bcc6389014af758460d7af0e609ab47b88d42da71cc507f
+  manager: conda
+  name: nbformat
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/nbformat-5.3.0-py38hecd8cb5_0.conda
+  version: 5.3.0
+- category: main
+  dependencies:
+    cryptography: '>=35.0'
+    python: '>=3.6'
+  hash:
+    md5: 1dbbf9422269cd62c7094960d9b43f36
+    sha256: 0a0629a3816bd639f1b1a94dd2fd521ad407e55bfd6532154dce22cddb875783
+  manager: conda
+  name: pyopenssl
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/pyopenssl-22.0.0-pyhd3eb1b0_0.conda
+  version: 22.0.0
+- category: main
+  dependencies:
+    appnope: ''
+    debugpy: '>=1.0.0,<2.0'
+    ipython: '>=7.23.1'
+    jupyter_client: <8.0
+    matplotlib-inline: '>=0.1.0,<0.2.0'
+    nest-asyncio: ''
+    python: '>=3.8,<3.9.0a0'
+    tornado: '>=4.2,<7.0'
+    traitlets: '>=5.1.0,<6.0'
+  hash:
+    md5: 1e871f3672d376078a113d66b352c1c9
+    sha256: e67032bfd31ec6ebba09a1d050276b99bb9e1c0b42c519868f24dbea43b8b1b7
+  manager: conda
+  name: ipykernel
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/ipykernel-6.9.1-py38hecd8cb5_0.conda
+  version: 6.9.1
+- category: main
+  dependencies:
+    jupyter_client: '>=6.1.5'
+    nbformat: '>=5.0'
+    nest-asyncio: ''
+    python: '>=3.8,<3.9.0a0'
+    traitlets: '>=5.0.0'
+  hash:
+    md5: cb3b035daf42ea75d5f48577f502d0f2
+    sha256: e037d17f864e7fe19841030adb64e3f8e46f13f404a30f9af5619d3be80d7bf1
+  manager: conda
+  name: nbclient
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/nbclient-0.5.13-py38hecd8cb5_0.conda
+  version: 0.5.13
+- category: main
+  dependencies:
+    cfgv: '>=2.0.0'
+    identify: '>=1.0.0'
+    nodeenv: '>=0.11.1'
+    python: '>=3.8,<3.9.0a0'
+    python_abi: 3.8.* *_cp38
+    pyyaml: '>=5.1'
+    toml: ''
+    virtualenv: '>=20.0.8'
+  hash:
+    md5: 0662d1baf23c64a47f7f4afc52daf6a8
+    sha256: f20c76271028d59f825054f7609c095b86c420ec7ff0f6ea7f52addbed4f54a3
+  manager: conda
+  name: pre-commit
+  optional: false
+  platform: osx-64
+  url: https://conda.anaconda.org/conda-forge/osx-64/pre-commit-2.15.0-py38h50d1736_1.tar.bz2
+  version: 2.15.0
+- category: main
+  dependencies:
+    brotlipy: '>=0.6.0'
+    certifi: ''
+    cryptography: '>=1.3.4'
+    idna: '>=2.0.0'
+    pyopenssl: '>=0.14'
+    pysocks: '>=1.5.6,<2.0,!=1.5.7'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 1349aad2f43f0990eb33d31a10ed745c
+    sha256: 30a7f4cfd6f319acbb345b91ad7bf2c0b92855a327ea83f7965a95e92fc9f0ab
+  manager: conda
+  name: urllib3
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/urllib3-1.26.9-py38hecd8cb5_0.conda
+  version: 1.26.9
+- category: main
+  dependencies:
+    bleach: ''
+    defusedxml: ''
+    entrypoints: '>=0.2.2'
+    jinja2: '>=2.4'
+    jupyter_core: ''
+    jupyterlab_pygments: ''
+    mistune: '>=0.8.1,<2'
+    nbclient: '>=0.5.0,<0.6.0'
+    nbformat: '>=4.4'
+    pandocfilters: '>=1.4.1'
+    pygments: '>=2.4.1'
+    python: '>=3.8,<3.9.0a0'
+    testpath: ''
+    traitlets: '>=5.0'
+  hash:
+    md5: 11ca6fd9c54d8f54cbd0f77e9260bc3a
+    sha256: 819667f454d861eccccbc0d7e39d4033bd121bb1a1513e1353af72c4b9e02655
+  manager: conda
+  name: nbconvert
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/nbconvert-6.1.0-py38hecd8cb5_0.conda
+  version: 6.1.0
+- category: main
+  dependencies:
+    certifi: '>=2017.4.17'
+    charset-normalizer: '>=2.0.0,<2.1.0'
+    idna: '>=2.5,<4'
+    python: '>=3.6'
+    urllib3: '>=1.21.1,<1.27'
+  hash:
+    md5: 9b593f86737e69140c47c2107ecf277c
+    sha256: 0a87a073acd53795a878bb6cf24d1e57ecf73dd161171ed0b4d64f0be0844719
+  manager: conda
+  name: requests
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/requests-2.27.1-pyhd3eb1b0_0.conda
+  version: 2.27.1
+- category: main
+  dependencies:
+    anyio: '>=3.1.0,<4'
+    argon2-cffi: ''
+    ipython_genutils: ''
+    jinja2: ''
+    jupyter_client: '>=6.1.1'
+    jupyter_core: '>=4.6.0'
+    nbconvert: ''
+    nbformat: ''
+    packaging: ''
+    prometheus_client: ''
+    python: '>=3.7'
+    pyzmq: '>=17'
+    send2trash: ''
+    terminado: '>=0.8.3'
+    tornado: '>=6.1.0'
+    traitlets: '>=5'
+    websocket-client: ''
+  hash:
+    md5: 303eb09f873fde3c13abaaed542d54e0
+    sha256: d8890018724afc9b25e2417b3530f873559a06a23a4a78f048b276a5cdc63bc2
+  manager: conda
+  name: jupyter_server
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/jupyter_server-1.13.5-pyhd3eb1b0_0.conda
+  version: 1.13.5
+- category: main
+  dependencies:
+    argon2-cffi: ''
+    ipykernel: ''
+    ipython_genutils: ''
+    jinja2: ''
+    jupyter_client: '>=5.3.4'
+    jupyter_core: '>=4.6.1'
+    nbconvert: '>=5'
+    nbformat: ''
+    nest-asyncio: '>=1.5'
+    prometheus_client: ''
+    python: '>=3.8,<3.9.0a0'
+    pyzmq: '>=17'
+    send2trash: '>=1.8.0'
+    terminado: '>=0.8.3'
+    tornado: '>=6.1'
+    traitlets: '>=4.2.1'
+  hash:
+    md5: 1c1836860fb054c349d72f7d6b3a6efa
+    sha256: e5ded392f09e493f5e1a400e85eb58f4fc7426857bf0a0f11e8fc8f7117f5bfd
+  manager: conda
+  name: notebook
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/notebook-6.4.11-py38hecd8cb5_0.conda
+  version: 6.4.11
+- category: main
+  dependencies:
+    alabaster: '>=0.7,<0.8'
+    babel: '>=1.3'
+    colorama: '>=0.3.5'
+    docutils: '>=0.14,<0.18'
+    imagesize: ''
+    jinja2: '>=2.3'
+    packaging: ''
+    pygments: '>=2.0'
+    python: '>=3.6'
+    requests: '>=2.5.0'
+    setuptools: ''
+    snowballstemmer: '>=1.1'
+    sphinxcontrib-applehelp: ''
+    sphinxcontrib-devhelp: ''
+    sphinxcontrib-htmlhelp: ''
+    sphinxcontrib-jsmath: ''
+    sphinxcontrib-qthelp: ''
+    sphinxcontrib-serializinghtml: ''
+  hash:
+    md5: 8f65a307ecef80b3afd979777cc5b549
+    sha256: 0649bd05747f248d83d2d21b8047e42f49cce93e56116fa65aa2a234310ee4ea
+  manager: conda
+  name: sphinx
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/sphinx-4.2.0-pyhd3eb1b0_1.conda
+  version: 4.2.0
+- category: main
+  dependencies:
+    babel: ''
+    entrypoints: '>=0.2.2'
+    jinja2: '>=3.0.3'
+    json5: ''
+    jsonschema: '>=3.0.1'
+    jupyter_server: '>=1.8,<2'
+    packaging: ''
+    python: '>=3.8,<3.9.0a0'
+    requests: ''
+  hash:
+    md5: 1e745f1768c0aca6a6af0a88e8e4f8ad
+    sha256: 6f76df0dae1da2fb56c8fc44c5838cb82d564579b23b2b4093c15322f21794cb
+  manager: conda
+  name: jupyterlab_server
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/jupyterlab_server-2.12.0-py38hecd8cb5_0.conda
+  version: 2.12.0
+- category: main
+  dependencies:
+    jupyter_server: '>=1.8.0,<2.0.0'
+    notebook: <7
+    python: '>=3.6'
+  hash:
+    md5: 22683be353228acd015cae8a4676b462
+    sha256: bb4645b792089292736e7193112d74247884f9326b93410acfbd1b18c67ac2c4
+  manager: conda
+  name: nbclassic
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/nbclassic-0.3.5-pyhd3eb1b0_0.conda
+  version: 0.3.5
+- category: main
+  dependencies:
+    ipython: ''
+    jinja2: '>=2.10'
+    jupyter_core: ''
+    jupyter_server: '>=1.4,<2'
+    jupyterlab_server: '>=2.3,<3'
+    nbclassic: '>=0.2,<1'
+    packaging: ''
+    python: '>=3.6'
+    tornado: '>=6.1'
+  hash:
+    md5: 9292f2b7ad621d8a6d9a9a7f7338664d
+    sha256: d5d48497b7b79b91e13a73ee1def9e9ea27a40a6d6324b6ab616771331f80f22
+  manager: conda
+  name: jupyterlab
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/jupyterlab-3.1.7-pyhd3eb1b0_0.conda
+  version: 3.1.7
+- category: main
+  dependencies:
+    numpy: '>=1.16.6,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 51ecbea9295cc1aa0eb6e6562826aca2
+    sha256: a829b85cddf95b6a4e1001b19551d59e2e3839d1995eec991926487e7cb3cbfb
+  manager: conda
+  name: bottleneck
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/bottleneck-1.3.4-py38h67323c0_0.conda
+  version: 1.3.4
+- category: main
+  dependencies:
+    hdf5: '>=1.10.5,<1.10.6.0a0 mpi_openmpi_*'
+    mpi4py: ''
+    numpy: '>=1.14.6,<2.0a0'
+    openmpi: '>=4.0.2,<5.0.0a0'
+    python: '>=3.8,<3.9.0a0'
+    six: ''
+  hash:
+    md5: 770c0cd6031f85bbc7ed745764f0babb
+    sha256: cee223f4c250ac011687b2633cf60bbbde1f0ddd408169d316274c94448c17ae
+  manager: conda
+  name: h5py
+  optional: false
+  platform: osx-64
+  url: https://conda.anaconda.org/conda-forge/osx-64/h5py-2.10.0-mpi_openmpi_py38h28212cf_2.tar.bz2
+  version: 2.10.0
+- category: main
+  dependencies:
+    h5py: ''
+    hdf5: '>=1.10.5,<1.10.6.0a0'
+    libcxx: '>=9.0.1'
+    python: '>=3.8,<3.9.0a0'
+    python_abi: 3.8.* *_cp38
+  hash:
+    md5: b81b5f2f24a99a4d0446bab539562acc
+    sha256: e1884bcc703d257ba5d95ff96cd61eb71284fc5e02be52fd8233cc6f36b9c843
+  manager: conda
+  name: hdf5plugin
+  optional: false
+  platform: osx-64
+  url: https://conda.anaconda.org/conda-forge/osx-64/hdf5plugin-2.3.0-py38ha601cb3_0.tar.bz2
+  version: 2.3.0
+- category: main
+  dependencies:
+    blosc: '>=1.21.0,<2.0a0'
+    brotli: '>=1.0.9,<2.0a0'
+    brunsli: '>=0.1,<1.0a0'
+    bzip2: '>=1.0.8,<2.0a0'
+    cfitsio: '>=3.470,<3.471.0a0'
+    charls: '>=2.2.0,<2.3.0a0'
+    giflib: '>=5.2.1,<5.3.0a0'
+    jpeg: '>=9d,<10a'
+    jxrlib: '>=1.1,<1.2.0a0'
+    lcms2: '>=2.12,<3.0a0'
+    lerc: '>=3.0,<4.0a0'
+    libaec: '>=1.0.4,<2.0a0'
+    libcxx: '>=12.0.0'
+    libdeflate: '>=1.8,<1.9.0a0'
+    libpng: '>=1.6.37,<1.7.0a0'
+    libtiff: '>=4.1.0,<5.0a0'
+    libwebp: '>=1.2.0,<1.3.0a0'
+    libzopfli: '>=1.0.3,<1.1.0a0'
+    lz4-c: '>=1.9.3,<1.10.0a0'
+    numpy: '>=1.16.6,<2.0a0'
+    openjpeg: '>=2.3.0,<3.0a0'
+    python: '>=3.8,<3.9.0a0'
+    snappy: '>=1.1.8,<2.0a0'
+    xz: '>=5.2.5,<6.0a0'
+    zfp: '>=0.5.5,<1.0a0'
+    zlib: '>=1.2.11,<1.3.0a0'
+    zstd: '>=1.4.9,<1.5.0a0'
+  hash:
+    md5: 707d3ae9a006eae3ef3ee59d36f2240b
+    sha256: 0ecc1b5886c5641904b38e14007009b3e27e52ffd54b707f24b1bb216b01bf09
+  manager: conda
+  name: imagecodecs
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/imagecodecs-2021.8.26-py38ha952a84_0.conda
+  version: 2021.8.26
+- category: main
+  dependencies:
+    numpy: ''
+    pillow: ''
+    python: '>=3'
+  hash:
+    md5: 4f1d37bdc3afdb2d237fd9b6b920ec3d
+    sha256: 28c1a7ad62b93af09bcee1820b86660789838a40ca7be783d34bdf238d5381f8
+  manager: conda
+  name: imageio
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/imageio-2.9.0-pyhd3eb1b0_0.conda
+  version: 2.9.0
+- category: main
+  dependencies:
+    matplotlib-base: '>=3.5.1,<3.5.2.0a0'
+    python: '>=3.8,<3.9.0a0'
+    tornado: ''
+  hash:
+    md5: 2ca356c27d3d4163b65d4560ea6024c2
+    sha256: 200584856a6537806c81b2b1b2d67acfb4f4fde924c6e77ca4f7a5ac291aeacc
+  manager: conda
+  name: matplotlib
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/matplotlib-3.5.1-py38hecd8cb5_1.conda
+  version: 3.5.1
+- category: main
+  dependencies:
+    cycler: '>=0.10'
+    fonttools: '>=4.22.0'
+    freetype: '>=2.3'
+    kiwisolver: '>=1.0.1'
+    libcxx: '>=12.0.0'
+    numpy: '>=1.19.2,<2.0a0'
+    packaging: '>=20.0'
+    pillow: '>=6.2.0'
+    pyparsing: '>=2.2.1'
+    python: '>=3.8,<3.9.0a0'
+    python-dateutil: '>=2.7'
+  hash:
+    md5: 737f5b096415dccfd6a6a1b357279c1c
+    sha256: f7ba952345b70a9288c3d8ee6fbb18e13dee2ce0c80016e43c1659aac971a715
+  manager: conda
+  name: matplotlib-base
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/matplotlib-base-3.5.1-py38hfb0c5b7_1.conda
+  version: 3.5.1
+- category: main
+  dependencies:
+    blas: 1.0 mkl
+    libcxx: '>=10.0.0'
+    mkl: '>=2021.2.0,<2022.0a0'
+    mkl-service: '>=2.3.0,<3.0a0'
+    numpy: '>=1.16,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 074ae65d92fbb401e882203908b45859
+    sha256: 719efcd2976f4cc0519d67f502674ab5d3104120a42b5381e3b24a35448fdadb
+  manager: conda
+  name: mkl_fft
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/mkl_fft-1.3.0-py38h4a7008c_2.conda
+  version: 1.3.0
+- category: main
+  dependencies:
+    blas: '* mkl'
+    libcxx: '>=10.0.0'
+    mkl: '>=2021.2.0,<2022.0a0'
+    mkl-service: '>=2.3.0,<3.0a0'
+    numpy: '>=1.16,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: f000fabdb08ccfb051ac7134449e23b3
+    sha256: 247e9630e6d5e7d5868a09f14c6151617220359fc230a5575ef21f26749a830a
+  manager: conda
+  name: mkl_random
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/mkl_random-1.2.2-py38hb2f4e1b_0.conda
+  version: 1.2.2
+- category: main
+  dependencies:
+    blas: 1.0 mkl
+    mkl: '>=2021.2.0,<2022.0a0'
+    mkl-service: '>=2.3.0,<3.0a0'
+    mkl_fft: ''
+    mkl_random: ''
+    numpy-base: 1.20.1 py38h585ceec_0
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: cbf3fe59bbeabac553345a216b2883c2
+    sha256: 401923e1fdcafc4ace9762d5bb1819bceb3275ab8b029792c53ff59a099b8a49
+  manager: conda
+  name: numpy
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/numpy-1.20.1-py38hd6e1bb9_0.conda
+  version: 1.20.1
+- category: main
+  dependencies:
+    libcxx: '>=10.0.0'
+    llvm-openmp: '>=10.0.0'
+    llvmlite: '>=0.36.0,<0.37.0a0'
+    numpy: '>=1.16.6,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+    setuptools: ''
+    tbb: '>=2020.3'
+  hash:
+    md5: a63656d5d809042bbde4c473e78e947f
+    sha256: 271200a9319f52de7033ee4e055cbf2a510fbc6bee778b789d969bc56a81d2fa
+  manager: conda
+  name: numba
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/numba-0.53.1-py38hb2f4e1b_0.conda
+  version: 0.53.1
+- category: main
+  dependencies:
+    libcxx: '>=10.0.0'
+    msgpack-python: ''
+    numpy: '>=1.7'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 9792b5443f40671efd3124a749012b01
+    sha256: 5ebc39326237dae8458c7c346dd6d9eb1b182ca716a8953dffbf6db5aa559a16
+  manager: conda
+  name: numcodecs
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/numcodecs-0.8.0-py38h23ab428_0.conda
+  version: 0.8.0
+- category: main
+  dependencies:
+    blas: 1.0 mkl
+    libcxx: '>=10.0.0'
+    mkl: '>=2021.2.0,<2022.0a0'
+    mkl-service: '>=2.3.0,<3.0a0'
+    numpy: '>=1.16.6,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 620c4922f3745f0580eb49fe2ca4a3f2
+    sha256: d49c2d0c592c16c1786b062e0efb05301dc5a47c346bb6f1f776876173d02476
+  manager: conda
+  name: numexpr
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/numexpr-2.7.3-py38h5873af2_1.conda
+  version: 2.7.3
+- category: main
+  dependencies:
+    numpy: ''
+    python: '>=3.5'
+  hash:
+    md5: 53205b8b5762c06f85b6bb7abd4f496e
+    sha256: 59764a0ce78436f1a542b06667233fdcf71e6d7c2dc5b88762a619c8350430ec
+  manager: conda
+  name: opt_einsum
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/opt_einsum-3.3.0-pyhd3eb1b0_1.conda
+  version: 3.3.0
+- category: main
+  dependencies:
+    numpy: '>=1.18.5,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: 3fed974bbd0df850bbe7176cd13044d8
+    sha256: 52fbf8b4a47643db08c5ca6a8790c655a93befee7fa5446b3d78f680c0a78cb9
+  manager: conda
+  name: pywavelets
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/pywavelets-1.3.0-py38hca72f7f_0.conda
+  version: 1.3.0
+- category: main
+  dependencies:
+    blas: 1.0 mkl
+    libcxx: '>=10.0.0'
+    libgfortran: '>=3.0.1,<4.0.0.a0'
+    mkl: '>=2021.2.0,<2022.0a0'
+    mkl-service: '>=2.3.0,<3.0a0'
+    numpy: '>=1.16.6,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+  hash:
+    md5: e35cb7581cc465135b22bf215d2ec35b
+    sha256: 575a23d49eb125b54bc12439597da21f0f11492dac42b98e6e0599befcd3b3d1
+  manager: conda
+  name: scipy
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/scipy-1.6.2-py38hd5f7400_1.conda
+  version: 1.6.2
+- category: main
+  dependencies:
+    imagecodecs: '>=2021.4.28'
+    numpy: '>=1.15.1'
+    python: '>=3.7'
+  hash:
+    md5: 5a265e3b9694c13bcfb8c40a3b8e3d8f
+    sha256: 4188780a5854950cecfdcb0ec86e428899a7ea6589ec9947f8f7fe0987d8340c
+  manager: conda
+  name: tifffile
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/tifffile-2021.7.2-pyhd3eb1b0_2.conda
+  version: 2021.7.2
+- category: main
+  dependencies:
+    bottleneck: '>=1.2.1'
+    libcxx: '>=10.0.0'
+    numexpr: '>=2.7.0'
+    numpy: '>=1.19.2,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+    python-dateutil: '>=2.7.3'
+    pytz: '>=2017.3'
+  hash:
+    md5: b9b8ba75c0650d7f1cb2bb5af909cb16
+    sha256: 1e15143759d0ad30762715e2d503b40f94ac75d235a8fd1431a2dcbc85bee8ac
+  manager: conda
+  name: pandas
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/pandas-1.3.3-py38h5008ddb_0.conda
+  version: 1.3.3
+- category: main
+  dependencies:
+    blosc: '>=1.17.0,<2.0a0'
+    bzip2: '>=1.0.8,<2.0a0'
+    hdf5: '>=1.10.5,<1.10.6.0a0'
+    libcxx: '>=9.0.1'
+    mock: ''
+    numexpr: ''
+    numpy: '>=1.14.6,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+    six: ''
+    zlib: '>=1.2.11,<1.3.0a0'
+  hash:
+    md5: a2d59c3be8c694e43ba4621b596f82f6
+    sha256: 04cfa826248b987961e0ba5e081c771ed5341887becc6791dcda7b38c336273f
+  manager: conda
+  name: pytables
+  optional: false
+  platform: osx-64
+  url: https://conda.anaconda.org/conda-forge/osx-64/pytables-3.6.1-py38h6f8395a_1.tar.bz2
+  version: 3.6.1
+- category: main
+  dependencies:
+    cloudpickle: '>=0.2.1'
+    cytoolz: '>=0.7.3'
+    dask-core: '>=1.0.0,!=2.17.0'
+    imageio: '>=2.4.1'
+    libcxx: '>=12.0.0'
+    llvm-openmp: '>=4.0.1'
+    networkx: '>=2.2'
+    numpy: '>=1.16.6,<2.0a0'
+    packaging: '>=20.0'
+    pillow: '>=6.1.0,!=7.1.0,!=7.1.1,!=8.3.0'
+    python: '>=3.8,<3.9.0a0'
+    pywavelets: '>=1.1.1'
+    scipy: '>=1.4.1'
+    tifffile: '>=2019.7.26'
+    toolz: '>=0.7.3'
+  hash:
+    md5: 00367d2a591fe71d6ffa33c2cdd87aaa
+    sha256: dfe3f1643aef90c6cc878410f5c443eafcb2bcc737b2c4fb2b78bb84c9e57d6c
+  manager: conda
+  name: scikit-image
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/scikit-image-0.19.2-py38hae1ba45_0.conda
+  version: 0.19.2
+- category: main
+  dependencies:
+    joblib: '>=0.11'
+    libcxx: '>=10.0.0'
+    llvm-openmp: '>=10.0.0'
+    numpy: '>=1.16.6,<2.0a0'
+    python: '>=3.8,<3.9.0a0'
+    scipy: '>=0.19.1'
+    threadpoolctl: '>=2.0.0'
+  hash:
+    md5: cab481bb97698d6dea0d7d70ade7ce5d
+    sha256: ec0de5a3e3228b0df782fd1a734a537d93ec93175c549fa3fa05363ebb446775
+  manager: conda
+  name: scikit-learn
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/osx-64/scikit-learn-0.24.2-py38hb2f4e1b_0.conda
+  version: 0.24.2
+- category: main
+  dependencies:
+    asciitree: ''
+    fasteners: ''
+    numcodecs: '>=0.6.4'
+    numpy: '>=1.7'
+    python: '>=3.6,<4'
+  hash:
+    md5: 7df763b90dcefae1c6039911fc72b694
+    sha256: 9cfcc6ce466645e80891c07e81b4c5db3082fd55fcbb9ff7a2980b4d9c8d1ff7
+  manager: conda
+  name: zarr
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/zarr-2.8.1-pyhd3eb1b0_0.conda
+  version: 2.8.1
+- category: main
+  dependencies:
+    matplotlib: '>=2.2'
+    numpy: '>=1.15'
+    pandas: '>=0.23'
+    python: '>=3.6'
+    scipy: '>=1.0'
+  hash:
+    md5: 36b64fb4e3b76ded59d6388c9582de69
+    sha256: 3a78df98681129f429a9a4e0b98c2cdb43966022bab5886fd6804319af0cc65c
+  manager: conda
+  name: seaborn
+  optional: false
+  platform: osx-64
+  url: https://repo.anaconda.com/pkgs/main/noarch/seaborn-0.11.2-pyhd3eb1b0_0.conda
+  version: 0.11.2
+- category: main
+  dependencies: {}
+  hash:
+    sha256: dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc
+  manager: pip
+  name: click
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/d2/3d/fa76db83bf75c4f8d338c2fd15c8d33fdd7ad23a9b5e57eb6c5de26b430e/click-7.1.2-py2.py3-none-any.whl
+  version: 7.1.2
+- category: main
+  dependencies: {}
+  hash:
+    sha256: 3dd15cb27e8119a24c1a7b5c93f9f3b455855e0f73993b1c25921b2f646f1dcd
+  manager: pip
+  name: colorlog
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/51/62/61449c6bb74c2a3953c415b2cdb488e4f0518ac67b35e2b03a6d543035ca/colorlog-4.8.0-py2.py3-none-any.whl
+  version: 4.8.0
+- dependencies: {}
+  hash:
+    sha256: ddb0b1d8243e6e3abb822bd14e447a89f4ab7439342912d590444831fa00b6a0
+  manager: pip
+  name: fastjsonschema
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/e6/0b/24795939622d60f4b453aa7040f23c6a6f8b44c7c026c3b42d9842e6cc31/fastjsonschema-2.15.3-py3-none-any.whl
+  version: 2.15.3
+- category: main
+  dependencies: {}
+  hash:
+    sha256: b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d
+  manager: pip
+  name: future
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/45/0b/38b06fd9b92dc2b68d58b75f900e97884c45bedd2ff83203d933cf5851c9/future-0.18.2.tar.gz
+  version: 0.18.2
+- category: main
+  dependencies: {}
+  hash:
+    sha256: 07fa44286cda977bd4803b656ffc1c9b7e3bc7dff7d34263446aec8f8c96f88a
+  manager: pip
+  name: lazy-object-proxy
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/5c/96/2c984706be60a1671177f57ba9f6b17a11b4cbf1b6704f3839ad6addc284/lazy_object_proxy-1.7.1-cp38-cp38-macosx_10_9_x86_64.whl
+  version: 1.7.1
+- category: main
+  dependencies: {}
+  hash:
+    sha256: 5e2f9da88ed8236a76fffbee3ceefd259589cf42dfbc2cec2877102189fae58a
+  manager: pip
+  name: progress
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/e9/ff/7871f3736dc6707435b2a2f217c46b5a5bc6ea7e0a9a443cd69146a1afd1/progress-1.4.tar.gz
+  version: '1.4'
+- category: main
+  dependencies: {}
+  hash:
+    sha256: 2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94
+  manager: pip
+  name: smmap
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/6d/01/7caa71608bc29952ae09b0be63a539e50d2484bc37747797a66a60679856/smmap-5.0.0-py3-none-any.whl
+  version: 5.0.0
+- category: main
+  dependencies: {}
+  hash:
+    sha256: d7c013fe7abbc5e491394e10fa845f8f32fe54f8dc60c6622c6cf482d25d47e4
+  manager: pip
+  name: tabulate
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/ca/80/7c0cad11bd99985cfe7c09427ee0b4f9bd6b048bd13d4ffb32c6db237dfb/tabulate-0.8.9-py3-none-any.whl
+  version: 0.8.9
+- category: main
+  dependencies: {}
+  hash:
+    sha256: afa04efcdd818a93237574791be9b2817d7077c25a068b00f8cff7baa4e59257
+  manager: pip
+  name: unidecode
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/f9/5b/7603add7f192252916b85927263b598c74585f82389e6e42318a6278159b/Unidecode-1.3.4-py3-none-any.whl
+  version: 1.3.4
+- category: main
+  dependencies: {}
+  hash:
+    sha256: 8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456
+  manager: pip
+  name: wrapt
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/33/cd/7335d8b82ff0a442581ab37a8d275ad76b4c1f33ace63c1a4d7c23791eee/wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl
+  version: 1.14.1
+- category: main
+  dependencies:
+    lazy-object-proxy: '>=1.4.0'
+    typing-extensions: '>=3.10'
+    wrapt: '>=1.11,<2'
+  hash:
+    sha256: 14ffbb4f6aa2cf474a0834014005487f7ecd8924996083ab411e7fa0b508ce0b
+  manager: pip
+  name: astroid
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/94/58/6f1bbfd88b6ba5271b4a9be99cb15cb2fe369794ba410390f0d672c6ad39/astroid-2.11.5-py3-none-any.whl
+  version: 2.11.5
+- category: main
+  dependencies:
+    click: '*'
+    pyyaml: '*'
+  hash:
+    sha256: 0945e83b1a3d9e216bdf06000b767cc96dc2a0faf356ac0cc255c45f671c84b9
+  manager: pip
+  name: docstr-coverage
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/ef/97/80f5de5ab716ece99fec79ce1ae51821ef4fcd6ccd64902b4481991fbba4/docstr_coverage-2.1.1-py3-none-any.whl
+  version: 2.1.1
+- category: main
+  dependencies:
+    flake8: '*'
+  hash:
+    sha256: 12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9
+  manager: pip
+  name: flake8-polyfill
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/86/b5/a43fed6fd0193585d17d6faa7b85317d4461f694aaed546098c69f856579/flake8_polyfill-1.0.2-py2.py3-none-any.whl
+  version: 1.0.2
+- category: main
+  dependencies:
+    click: '*'
+    pillow: '*'
+    requests: '*'
+  hash:
+    sha256: a47e6996a8a4223132ab08e3ad9bade2cb997e68a03162ff5d122469e9d6a5c6
+  manager: pip
+  name: genbadge
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/20/b8/61d32e888fdcced280813ec871c50c6d0ef17fc266fe56d600fd77201566/genbadge-1.0.6-py2.py3-none-any.whl
+  version: 1.0.6
+- category: main
+  dependencies:
+    smmap: '>=3.0.1,<6'
+  hash:
+    sha256: 8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd
+  manager: pip
+  name: gitdb
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/a3/7c/5d747655049bfbf75b5fcec57c8115896cb78d6fafa84f6d3ef4c0f13a98/gitdb-4.0.9-py3-none-any.whl
+  version: 4.0.9
+- category: main
+  dependencies:
+    six: '*'
+  hash:
+    sha256: 4ce09faec7e5192ffc3c57830e26acba0fd6cd11e1ee81af0d4df0657463bd1c
+  manager: pip
+  name: mando
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/e6/cc/f6e25247c1493a654785e68cd975e479c311e99dafedd49ed17f8d300e0c/mando-0.6.4-py2.py3-none-any.whl
+  version: 0.6.4
+- category: main
+  dependencies:
+    six: '>=1.7.0'
+  hash:
+    sha256: 08c039560a6da2fe4f2c426d0766e284d3b736e355f8dd24b37367b0bb41973b
+  manager: pip
+  name: retrying
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/44/ef/beae4b4ef80902f22e3af073397f079c96969c69b2c7d52a57ea9ae61c9d/retrying-1.3.3.tar.gz
+  version: 1.3.3
+- category: main
+  dependencies:
+    gitdb: '>=4.0.1,<5'
+  hash:
+    sha256: 5b68b000463593e05ff2b261acff0ff0972df8ab1b70d3cdbd41b546c8b8fc3d
+  manager: pip
+  name: gitpython
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/83/32/ce68915670da6fd6b1e3fb4b3554b4462512f6441dddd194fc0f4f6ec653/GitPython-3.1.27-py3-none-any.whl
+  version: 3.1.27
+- category: main
+  dependencies:
+    retrying: '>=1.3.3'
+    six: '*'
+  hash:
+    sha256: d68fc15fcb49f88db27ab3e0c87110943e65fee02a47f33a8590f541b3042461
+  manager: pip
+  name: plotly
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/1f/f6/bd3c17c8003b6641df1228e80e1acac97ed8402635e46c2571f8e1ef63af/plotly-4.14.3-py2.py3-none-any.whl
+  version: 4.14.3
+- category: main
+  dependencies:
+    colorama: '>=0.4,<0.5'
+    flake8-polyfill: '*'
+    future: '*'
+    mando: '>=0.6,<0.7'
+  hash:
+    sha256: 32ac2f86bfacbddade5c79f0e927e97f90a5cda5b86f880511dd849c4a0096e3
+  manager: pip
+  name: radon
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/cf/fe/c400dbbbbde6649ad0164ef2ffef3672baefc62ecb676f58d0f25d8f83b0/radon-4.0.0-py2.py3-none-any.whl
+  version: 4.0.0
+- category: main
+  dependencies:
+    astroid: '>=2.7'
+    jinja2: '*'
+    pyyaml: '*'
+    sphinx: '>=3.0'
+    unidecode: '*'
+  hash:
+    sha256: 007bf9e24cd2aa0ac0561f67e8bcd6a6e2e8911ef4b4fd54aaba799d8832c8d0
+  manager: pip
+  name: sphinx-autoapi
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/5e/67/249380ade22a7efaa8a335f45a9b87f2fdda499c9fdc53913096dec5d1fe/sphinx_autoapi-1.8.4-py2.py3-none-any.whl
+  version: 1.8.4
+- category: main
+  dependencies:
+    click: '>=7.0,<8.0'
+    colorlog: '>=4.0.0,<5.0.0'
+    gitpython: '>=3.0.0,<4.0.0'
+    nbformat: '>=5.1.3,<6.0.0'
+    plotly: '>=4.0.0,<5.0.0'
+    progress: '1.4'
+    radon: '>=4.0.0,<4.1.0'
+    tabulate: '>=0.8.2,<1.0.0'
+  hash:
+    sha256: a229db85982f87f31f714fbe49ec4701eb84e4843dc1a1fcbe3305e3c05c4ea7
+  manager: pip
+  name: wily
+  optional: false
+  platform: osx-64
+  source: null
+  url: https://files.pythonhosted.org/packages/e7/2c/53638ade80511eee70c29bcc52e90ca017836feecba1762c935112249aea/wily-1.20.0-py3-none-any.whl
+  version: 1.20.0
+version: 1
diff --git a/conda-osx-64.lock b/conda-osx-64.lock
new file mode 100644
index 0000000000000000000000000000000000000000..da90d0d1c701db23b20fc815dd981a205d3bd71d
--- /dev/null
+++ b/conda-osx-64.lock
@@ -0,0 +1,250 @@
+# Generated by conda-lock.
+# platform: osx-64
+# input_hash: b5d1f81dc1316eb7babb5d0e38665c10fdfd704a7ebe538ada1201badd2269fc
+@EXPLICIT
+https://repo.anaconda.com/pkgs/main/osx-64/blas-1.0-mkl.conda#cb2c87e85ac8e0ceae776d26d4214c8a
+https://repo.anaconda.com/pkgs/main/osx-64/bzip2-1.0.8-h1de35cc_0.conda#19fcb113b170fe2a0be96b47801fed7d
+https://repo.anaconda.com/pkgs/main/osx-64/c-ares-1.18.1-hca72f7f_0.conda#fc798dffc9f7843e2dc563a10290ef59
+https://repo.anaconda.com/pkgs/main/osx-64/ca-certificates-2022.4.26-hecd8cb5_0.conda#dd4c1cfc3606b56486f7af0a99e80fa3
+https://repo.anaconda.com/pkgs/main/osx-64/giflib-5.2.1-haf1e3a3_0.conda#0c36d6800a1a0f0ae244699a09d3f982
+https://repo.anaconda.com/pkgs/main/osx-64/intel-openmp-2021.4.0-hecd8cb5_3538.conda#65e79d0ffef79cbb8ebd3c71e74eb50a
+https://repo.anaconda.com/pkgs/main/osx-64/jpeg-9e-hca72f7f_0.conda#99b7d820514a0c07818d58c320ab21fc
+https://repo.anaconda.com/pkgs/main/osx-64/jxrlib-1.1-haf1e3a3_2.conda#446b753e081e9384c17401e5925d2f1f
+https://repo.anaconda.com/pkgs/main/osx-64/libcxx-12.0.0-h2f01273_0.conda#fa697ecaca74bdf72bd0a10e42a2287a
+https://repo.anaconda.com/pkgs/main/osx-64/libdeflate-1.8-h9ed2024_5.conda#584dec4a4ba735d8d7841de1948b23b1
+https://repo.anaconda.com/pkgs/main/osx-64/libev-4.33-h9ed2024_1.conda#ffb0ee08779a6ccb4706b72523712cb7
+https://repo.anaconda.com/pkgs/main/osx-64/libgfortran-3.0.1-h93005f0_2.conda#2f6d6d3c7a46ff214a5a1a8991af9bef
+https://repo.anaconda.com/pkgs/main/osx-64/libsodium-1.0.18-h1de35cc_0.conda#dc65d21181274fa72d5eea2ba9cc1c35
+https://repo.anaconda.com/pkgs/main/osx-64/libwebp-base-1.2.2-hca72f7f_0.conda#029b8fce196d53c93af17512f6f606d8
+https://repo.anaconda.com/pkgs/main/osx-64/llvm-openmp-12.0.0-h0dcd299_1.conda#06b2da900ba1448f28cd5a0cd43bff90
+https://repo.anaconda.com/pkgs/main/osx-64/mpi-1.0-openmpi.conda#ce0721847dfc63612523ecdcf37ce53e
+https://repo.anaconda.com/pkgs/main/osx-64/ncurses-6.3-hca72f7f_2.conda#32896581ee96215797b753884720e581
+https://repo.anaconda.com/pkgs/main/osx-64/xz-5.2.5-hca72f7f_1.conda#bb093b4af8f53670468795e5f12676e5
+https://repo.anaconda.com/pkgs/main/osx-64/yaml-0.2.5-haf1e3a3_0.conda#73628ed86f99adf6a0cb81dd20e426cd
+https://repo.anaconda.com/pkgs/main/osx-64/zlib-1.2.12-h4dc903c_2.conda#4264c14bdd0bd302b0232cb65f3ee275
+https://repo.anaconda.com/pkgs/main/osx-64/brotli-1.0.9-hb1e8313_2.conda#47c6f0f0789dc3b0c350e2f6caac3ebc
+https://repo.anaconda.com/pkgs/main/osx-64/charls-2.2.0-h23ab428_0.conda#b3c97875262c5c9026e36c77007ec260
+https://repo.anaconda.com/pkgs/main/osx-64/lerc-3.0-he9d5cce_0.conda#aec2c3dbef836849c9260f05be04f3db
+https://repo.anaconda.com/pkgs/main/osx-64/libaec-1.0.4-hb1e8313_1.conda#b825d0cba53756d8de6f0842f7c9b657
+https://repo.anaconda.com/pkgs/main/osx-64/libedit-3.1.20210910-hca72f7f_0.conda#84f04c29858f8953fdb36fe27f2148e1
+https://repo.anaconda.com/pkgs/main/osx-64/libffi-3.3-hb1e8313_2.conda#0c959d444ac65555cb836cdbd3e9a2d9
+https://repo.anaconda.com/pkgs/main/osx-64/libllvm10-10.0.1-h76017ad_5.conda#a3a3f10a81668d361eccf7b62186982f
+https://repo.anaconda.com/pkgs/main/osx-64/libpng-1.6.37-ha441bb4_0.conda#d69245a20ec59d8dc534c65308607129
+https://repo.anaconda.com/pkgs/main/osx-64/libzopfli-1.0.3-hb1e8313_0.conda#59a6ea05350711fe0566aba5c51653d2
+https://repo.anaconda.com/pkgs/main/osx-64/lz4-c-1.9.3-h23ab428_1.conda#dc70fec3978d3189741886cc05fcb145
+https://repo.anaconda.com/pkgs/main/osx-64/mkl-2021.2.0-hecd8cb5_269.conda#fb01af85e56bf4a79290f84e73d1de5d
+https://repo.anaconda.com/pkgs/main/osx-64/openmpi-4.0.2-hfa1e0ec_1.conda#55374a530c88cf12ae5836bb77dfacd3
+https://repo.anaconda.com/pkgs/main/osx-64/openssl-1.1.1o-hca72f7f_0.conda#2b44a8f7a0051ffbc4814e9f99114222
+https://repo.anaconda.com/pkgs/main/osx-64/readline-8.1.2-hca72f7f_1.conda#c54a6153e7ef82f55e7a0ae2f6749592
+https://repo.anaconda.com/pkgs/main/osx-64/snappy-1.1.9-he9d5cce_0.conda#441e45d96c8bc4078e615f993ec164ab
+https://repo.anaconda.com/pkgs/main/osx-64/tbb-2020.3-h879752b_0.conda#735a2e20d007760d789b821f1c706fd3
+https://repo.anaconda.com/pkgs/main/osx-64/tk-8.6.11-h3fd3227_1.conda#30fd8466573613aadae5fe013306b51b
+https://repo.anaconda.com/pkgs/main/osx-64/zeromq-4.3.4-h23ab428_0.conda#24ca0b9986211e74e86f8afbba5c092d
+https://repo.anaconda.com/pkgs/main/osx-64/zfp-0.5.5-he9d5cce_6.conda#701d4bf0c61dc57368d798fa5f0c81d7
+https://repo.anaconda.com/pkgs/main/osx-64/brunsli-0.1-h23ab428_0.conda#38c8911fbbfd02287a822df899a2b927
+https://repo.anaconda.com/pkgs/main/osx-64/freetype-2.11.0-hd8bbffd_0.conda#a06dcb72dc6961d37f280b4b97d74f43
+https://conda.anaconda.org/conda-forge/osx-64/hdf5-1.10.5-mpi_openmpi_h70ef20d_1003.tar.bz2#07e6fe79b209a81dd9218adb16a0082a
+https://repo.anaconda.com/pkgs/main/osx-64/krb5-1.19.2-hcd88c3b_0.conda#9477ac0fa61f323ca0864573ac240c8e
+https://repo.anaconda.com/pkgs/main/osx-64/libnghttp2-1.46.0-ha29bfda_0.conda#7b5767e0cb9676eb442ef9cd2c76730b
+https://repo.anaconda.com/pkgs/main/osx-64/libssh2-1.10.0-h0a4fc7d_0.conda#8c6b3265fbbcf552ffc20a08c117cba3
+https://repo.anaconda.com/pkgs/main/osx-64/sqlite-3.38.3-h707629a_0.conda#5d3e2867383881b9227ee3aba91cd52d
+https://repo.anaconda.com/pkgs/main/osx-64/zstd-1.4.9-h322a384_0.conda#bc8c39208f4e8205c729683dcfa7e95e
+https://repo.anaconda.com/pkgs/main/osx-64/blosc-1.21.0-h2842e9f_0.conda#75e06f4f9058f3f1a001a0ad10d7f180
+https://repo.anaconda.com/pkgs/main/osx-64/libcurl-7.82.0-h6dfd666_0.conda#b64ff06f53a518e786fda8f67480ac0a
+https://repo.anaconda.com/pkgs/main/osx-64/libtiff-4.2.0-h87d7836_0.conda#32cded0d1900a09a8fefdeda35e0de1c
+https://repo.anaconda.com/pkgs/main/osx-64/python-3.8.13-hdfd78df_0.conda#f2f8398141b52f0b9ccc04b7a41c6ba0
+https://repo.anaconda.com/pkgs/main/noarch/alabaster-0.7.12-pyhd3eb1b0_0.tar.bz2#21ad3b69a5ce6c22e724e9dbb4cffa65
+https://repo.anaconda.com/pkgs/main/noarch/appdirs-1.4.4-pyhd3eb1b0_0.conda#5673d98d06171cb6eed03a6736845c4d
+https://repo.anaconda.com/pkgs/main/osx-64/appnope-0.1.2-py38hecd8cb5_1001.conda#2b544e4b465ff69e65ea1ffa4396cde0
+https://repo.anaconda.com/pkgs/main/noarch/asciitree-0.3.3-py_2.conda#88e5fad50e595d527acfc96b782261cb
+https://repo.anaconda.com/pkgs/main/noarch/attrs-21.4.0-pyhd3eb1b0_0.conda#3bc977a57587a7964921e3e1e2e31f9e
+https://repo.anaconda.com/pkgs/main/noarch/backcall-0.2.0-pyhd3eb1b0_0.tar.bz2#b2aa5503875aba2f1d88cae9df9a96d5
+https://repo.anaconda.com/pkgs/main/osx-64/certifi-2022.5.18.1-py38hecd8cb5_0.conda#f81376d06da67f25c50290d26535452f
+https://conda.anaconda.org/conda-forge/noarch/cfgv-3.3.1-pyhd8ed1ab_0.tar.bz2#ebb5f5f7dc4f1a3780ef7ea7738db08c
+https://repo.anaconda.com/pkgs/main/osx-64/cfitsio-3.470-hee0f690_6.conda#5fac36e0ef1d6018e55f3e062c2951fe
+https://repo.anaconda.com/pkgs/main/noarch/charset-normalizer-2.0.4-pyhd3eb1b0_0.conda#e7a441d94234b2b5fafee06e25dbf076
+https://repo.anaconda.com/pkgs/main/noarch/cloudpickle-2.0.0-pyhd3eb1b0_0.conda#8e38585c33e6c659e0e5b0b18e6bf3e2
+https://repo.anaconda.com/pkgs/main/noarch/colorama-0.4.4-pyhd3eb1b0_0.conda#f550604d18b83878f647a491b2b343d6
+https://repo.anaconda.com/pkgs/main/osx-64/coverage-5.5-py38h9ed2024_2.conda#f4758d624f8cb061e82b5a7adf193300
+https://repo.anaconda.com/pkgs/main/noarch/cycler-0.11.0-pyhd3eb1b0_0.conda#f5e365d2cdb66d547eb8c3ab93843aab
+https://repo.anaconda.com/pkgs/main/osx-64/debugpy-1.5.1-py38he9d5cce_0.conda#2ddf77b041a27682881dfc342abbd687
+https://repo.anaconda.com/pkgs/main/noarch/decorator-5.1.1-pyhd3eb1b0_0.conda#4d969aac32a0faf84af90c797bfc7fec
+https://repo.anaconda.com/pkgs/main/noarch/defusedxml-0.7.1-pyhd3eb1b0_0.conda#d912068b0729930972adcaac338882c0
+https://repo.anaconda.com/pkgs/main/noarch/distlib-0.3.2-pyhd3eb1b0_0.conda#86c256c16d9b416ffee75a4cfccf6c9a
+https://repo.anaconda.com/pkgs/main/osx-64/docutils-0.17.1-py38hecd8cb5_1.conda#dda26c59f2e4f82e09dfe7158537f067
+https://repo.anaconda.com/pkgs/main/osx-64/entrypoints-0.4-py38hecd8cb5_0.conda#f6003ec78f7c64fb47686adccc2675d1
+https://repo.anaconda.com/pkgs/main/noarch/executing-0.8.3-pyhd3eb1b0_0.conda#7be61d1c3c555fb37682b28d7a53d622
+https://repo.anaconda.com/pkgs/main/noarch/filelock-3.6.0-pyhd3eb1b0_0.conda#527be2ebbc60c0de6533ce33132ce303
+https://repo.anaconda.com/pkgs/main/osx-64/fsspec-2022.3.0-py38hecd8cb5_0.conda#6114cd136aa78d5db350e1fbd33fe49b
+https://repo.anaconda.com/pkgs/main/noarch/idna-3.3-pyhd3eb1b0_0.conda#8f43a528cf83b43af38a4d142fa38b8a
+https://repo.anaconda.com/pkgs/main/noarch/imagesize-1.3.0-pyhd3eb1b0_0.conda#306855b2038e489d01dff5b343a8adb9
+https://repo.anaconda.com/pkgs/main/noarch/iniconfig-1.1.1-pyhd3eb1b0_0.tar.bz2#e40edff2c5708f342cef43c7f280c507
+https://repo.anaconda.com/pkgs/main/noarch/ipython_genutils-0.2.0-pyhd3eb1b0_1.conda#553832c0b872a28088a0001fa2ba3822
+https://repo.anaconda.com/pkgs/main/noarch/isort-5.9.3-pyhd3eb1b0_0.conda#75f2497fe01a9ac6208d72e26066b76a
+https://repo.anaconda.com/pkgs/main/noarch/joblib-1.1.0-pyhd3eb1b0_0.conda#cae25b839f3b24686e683addde01b742
+https://repo.anaconda.com/pkgs/main/noarch/json5-0.9.6-pyhd3eb1b0_0.conda#4e721ee2dbfa20069719d2ee19185031
+https://repo.anaconda.com/pkgs/main/osx-64/kiwisolver-1.4.2-py38he9d5cce_0.conda#69e898666e90cfa4c7ecc827b1990f09
+https://repo.anaconda.com/pkgs/main/osx-64/lcms2-2.12-hf1fd2bf_0.conda#697aba7a3308226df7a93ccfeae16ffa
+https://repo.anaconda.com/pkgs/main/osx-64/libwebp-1.2.2-h56c3ce4_0.conda#027d2450b64e251b8169798f6121b47a
+https://repo.anaconda.com/pkgs/main/osx-64/llvmlite-0.36.0-py38he4411ff_4.conda#3f9309846c28eec6bdafe4aa39ffa9f6
+https://repo.anaconda.com/pkgs/main/osx-64/locket-1.0.0-py38hecd8cb5_0.conda#c20b81da1e7a7eb21660d3d069709fa1
+https://repo.anaconda.com/pkgs/main/osx-64/markupsafe-2.0.1-py38h9ed2024_0.conda#ce67815e11d846384abf63db2db75a8a
+https://repo.anaconda.com/pkgs/main/osx-64/mccabe-0.6.1-py38_1.conda#86e2057ac3b9c7a26ab9a52bb0f53d6e
+https://repo.anaconda.com/pkgs/main/osx-64/mistune-0.8.4-py38h1de35cc_1001.conda#14552fbbea9538d0aa2686a3c7cba628
+https://repo.anaconda.com/pkgs/main/noarch/mock-4.0.3-pyhd3eb1b0_0.conda#e30b674f018b25357c076ae407d769b9
+https://repo.anaconda.com/pkgs/main/noarch/more-itertools-8.12.0-pyhd3eb1b0_0.conda#ac1210cc005fb8bd631ea8beb8343332
+https://repo.anaconda.com/pkgs/main/osx-64/mpi4py-3.0.3-py38h27a7d74_1.conda#2e4c9f6cde00b616c2fb0c025eb225d9
+https://repo.anaconda.com/pkgs/main/osx-64/msgpack-python-1.0.3-py38haf03e11_0.conda#06c7070693428ccf4815c78bfeaa841b
+https://repo.anaconda.com/pkgs/main/noarch/munkres-1.1.4-py_0.conda#148362ba07f92abab76999a680c80084
+https://repo.anaconda.com/pkgs/main/osx-64/nest-asyncio-1.5.5-py38hecd8cb5_0.conda#f0d2034d99e98561f9859f0a1a13360f
+https://repo.anaconda.com/pkgs/main/noarch/networkx-2.7.1-pyhd3eb1b0_0.conda#6c97a8687676de8dac42bd8373892397
+https://repo.anaconda.com/pkgs/main/osx-64/openjpeg-2.4.0-h66ea3da_0.conda#882833bd7befc5e60e6fba9c518c1b79
+https://repo.anaconda.com/pkgs/main/noarch/pandocfilters-1.5.0-pyhd3eb1b0_0.conda#5547ced9e3bb4c513405998957b52c7b
+https://repo.anaconda.com/pkgs/main/noarch/parso-0.8.3-pyhd3eb1b0_0.conda#c6f0f6219bf5ce2b510ef4b75cbc3e01
+https://repo.anaconda.com/pkgs/main/noarch/pickleshare-0.7.5-pyhd3eb1b0_1003.conda#4a6363fd8dda664b95f99f7c5aa95abc
+https://repo.anaconda.com/pkgs/main/osx-64/pluggy-0.13.1-py38hecd8cb5_0.conda#142fadba8c1ed28a493f1a327d0a3a1b
+https://conda.anaconda.org/conda-forge/noarch/prefixed-0.3.2-pyhd8ed1ab_0.tar.bz2#101a437c0ab238eaa1736dd665b33fa2
+https://repo.anaconda.com/pkgs/main/noarch/prometheus_client-0.13.1-pyhd3eb1b0_0.conda#05275f89084c4ce7f9b0bc1e258b3e9e
+https://repo.anaconda.com/pkgs/main/noarch/ptyprocess-0.7.0-pyhd3eb1b0_2.conda#7441d2827d4bfbcc1fa308875a146246
+https://repo.anaconda.com/pkgs/main/noarch/pure_eval-0.2.2-pyhd3eb1b0_0.conda#a87d6d9827e5dff68d34d69971f8a9b1
+https://repo.anaconda.com/pkgs/main/noarch/py-1.11.0-pyhd3eb1b0_0.conda#7205a898ed2abbf6e9b903dff6abe08e
+https://repo.anaconda.com/pkgs/main/noarch/pycodestyle-2.7.0-pyhd3eb1b0_0.conda#30e8cdd78a0754c2d789d53fa465cd30
+https://repo.anaconda.com/pkgs/main/noarch/pycparser-2.21-pyhd3eb1b0_0.conda#135a72ff2a31150a3a3ff0b1edd41ca9
+https://repo.anaconda.com/pkgs/main/noarch/pyflakes-2.3.1-pyhd3eb1b0_0.conda#eaecb0dee9d296e2ba1dadf6902149f3
+https://repo.anaconda.com/pkgs/main/noarch/pygments-2.11.2-pyhd3eb1b0_0.conda#eff55c770961f459a734cf86768aac98
+https://repo.anaconda.com/pkgs/main/noarch/pyparsing-3.0.4-pyhd3eb1b0_0.conda#6bca2ae9c9aae9ccdebcb8cf2aa87cb3
+https://repo.anaconda.com/pkgs/main/osx-64/pyrsistent-0.18.0-py38hca72f7f_0.conda#113311ade8193fee41c406cec2678852
+https://repo.anaconda.com/pkgs/main/osx-64/pysocks-1.7.1-py38_1.conda#54d4dfc99989f03cefe7d922a741f3cf
+https://repo.anaconda.com/pkgs/main/noarch/python-fastjsonschema-2.15.1-pyhd3eb1b0_0.conda#ad1b2f7b33a45d0d68979ca2ad84b6a9
+https://conda.anaconda.org/conda-forge/osx-64/python_abi-3.8-2_cp38.tar.bz2#156803acb0247c263c9586f190b72f1c
+https://repo.anaconda.com/pkgs/main/noarch/pytz-2021.3-pyhd3eb1b0_0.conda#76415b791ffd2007687ac5f0665aa7af
+https://repo.anaconda.com/pkgs/main/osx-64/pyyaml-6.0-py38hca72f7f_1.conda#243ac745f35beceb65ec17bff3698757
+https://repo.anaconda.com/pkgs/main/osx-64/pyzmq-22.3.0-py38he9d5cce_2.conda#fe003dc72d676b6ce6f6abd3fb575964
+https://repo.anaconda.com/pkgs/main/noarch/send2trash-1.8.0-pyhd3eb1b0_1.conda#bfa3c5c61a5a91e528a1d2d1e3cae6c9
+https://repo.anaconda.com/pkgs/main/noarch/six-1.16.0-pyhd3eb1b0_1.conda#34586824d411d36af2fa40e799c172d0
+https://repo.anaconda.com/pkgs/main/osx-64/sniffio-1.2.0-py38hecd8cb5_1.conda#51d4ef66b0571f459e6a9613444bdab6
+https://repo.anaconda.com/pkgs/main/noarch/snowballstemmer-2.2.0-pyhd3eb1b0_0.conda#c8c10f2cd854c0a27630760958bba60c
+https://repo.anaconda.com/pkgs/main/noarch/sphinx_rtd_theme-0.4.3-pyhd3eb1b0_0.conda#0c60976249f116d5aa21fd50f0f94990
+https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-applehelp-1.0.2-pyhd3eb1b0_0.tar.bz2#ac923499f97b9a9ab7c672b27cb2a1a8
+https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-devhelp-1.0.2-pyhd3eb1b0_0.tar.bz2#bc39c2b70430734b5879d6b504e3311f
+https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-htmlhelp-2.0.0-pyhd3eb1b0_0.conda#2af558ca8b56151110c7a3639a1ea348
+https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-jsmath-1.0.1-pyhd3eb1b0_0.tar.bz2#e43f8de7d6a717935ab220a0c957771d
+https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-qthelp-1.0.3-pyhd3eb1b0_0.tar.bz2#08d67f73f640b4d1e5e8890a324b60e3
+https://repo.anaconda.com/pkgs/main/noarch/sphinxcontrib-serializinghtml-1.1.5-pyhd3eb1b0_0.conda#0440b84dfd478f340cf14c2d7c24f6c7
+https://repo.anaconda.com/pkgs/main/noarch/testpath-0.5.0-pyhd3eb1b0_0.conda#bd2a5c664c982e8637ae17b1662bd9a4
+https://repo.anaconda.com/pkgs/main/noarch/threadpoolctl-2.2.0-pyh0d69192_0.conda#bbfdbae4934150b902f97daaf287efe2
+https://repo.anaconda.com/pkgs/main/noarch/toml-0.10.2-pyhd3eb1b0_0.conda#cda05f5f6d8509529d1a2743288d197a
+https://repo.anaconda.com/pkgs/main/noarch/toolz-0.11.2-pyhd3eb1b0_0.conda#9fedc09c1ff4c9bc22695093c1ecd335
+https://repo.anaconda.com/pkgs/main/osx-64/tornado-6.1-py38h9ed2024_0.conda#430b78790d6d2f428bcf6cfd4a64127c
+https://repo.anaconda.com/pkgs/main/noarch/tqdm-4.62.2-pyhd3eb1b0_1.conda#9e0c24d3f7c51fbd42a2ebeb50b5c0fa
+https://repo.anaconda.com/pkgs/main/noarch/traitlets-5.1.1-pyhd3eb1b0_0.conda#675f60e84f695e63749b09f9ed464eda
+https://repo.anaconda.com/pkgs/main/noarch/typing_extensions-4.1.1-pyh06a4308_0.conda#8d4303f11560fe9621c962e87cf64d27
+https://repo.anaconda.com/pkgs/main/noarch/wcwidth-0.2.5-pyhd3eb1b0_0.conda#ffa649340272c3f6466ba01da254c3b0
+https://repo.anaconda.com/pkgs/main/osx-64/webencodings-0.5.1-py38_1.conda#06bf5fe6b42bc9d00c96f4be7beaaf26
+https://repo.anaconda.com/pkgs/main/noarch/wheel-0.37.1-pyhd3eb1b0_0.conda#ab85e96e26da8d5797c2458232338b86
+https://repo.anaconda.com/pkgs/main/osx-64/zipp-3.8.0-py38hecd8cb5_0.conda#581323dbb487b96fb811106546a73af4
+https://repo.anaconda.com/pkgs/main/osx-64/anyio-3.5.0-py38hecd8cb5_0.conda#626b497546f7f6e4286a86f2b8b84d51
+https://repo.anaconda.com/pkgs/main/noarch/asttokens-2.0.5-pyhd3eb1b0_0.conda#140486e2ce4f3931b44aa5f7ff8d88da
+https://repo.anaconda.com/pkgs/main/noarch/babel-2.9.1-pyhd3eb1b0_0.conda#61575e8b70e18ebc54e65da5e441b861
+https://conda.anaconda.org/conda-forge/osx-64/blessed-1.19.1-py38h50d1736_1.tar.bz2#3b04eb0edd6b9e0a1fcbba9b0802acc4
+https://repo.anaconda.com/pkgs/main/osx-64/cffi-1.15.0-py38hc55c11b_1.conda#5f7caa2c82d00004ee9bde44840945dc
+https://repo.anaconda.com/pkgs/main/osx-64/cytoolz-0.11.0-py38haf1e3a3_0.conda#f58527d5ddae8d53ee7570e698b2ee49
+https://repo.anaconda.com/pkgs/main/noarch/fasteners-0.16.3-pyhd3eb1b0_0.conda#335fdb99580fb176808d42ccd3c332e1
+https://repo.anaconda.com/pkgs/main/noarch/fonttools-4.25.0-pyhd3eb1b0_0.conda#bb9c5b5a6d892fca5efe4bf0203b6a48
+https://repo.anaconda.com/pkgs/main/osx-64/importlib-metadata-4.11.3-py38hecd8cb5_0.conda#97d838ed6fa3cadd79922114accfc0fb
+https://repo.anaconda.com/pkgs/main/noarch/importlib_resources-5.2.0-pyhd3eb1b0_1.conda#3e7caf9dbd3b4771e9b951ffc7cdad80
+https://repo.anaconda.com/pkgs/main/osx-64/jedi-0.18.1-py38hecd8cb5_1.conda#f0aa32d375b15db6f9aa00c5ceae339c
+https://repo.anaconda.com/pkgs/main/noarch/jinja2-3.0.3-pyhd3eb1b0_0.conda#a5b0429ead9704cd1ad0b044c97e728f
+https://repo.anaconda.com/pkgs/main/osx-64/jupyter_core-4.10.0-py38hecd8cb5_0.conda#8c94540b1c15d9fd8b5a1dc8c4c92f64
+https://repo.anaconda.com/pkgs/main/noarch/jupyterlab_pygments-0.1.2-py_0.conda#af46aff4922ca45df6ba19b313df6070
+https://repo.anaconda.com/pkgs/main/noarch/matplotlib-inline-0.1.2-pyhd3eb1b0_2.conda#47e865f8b884de7c5d516349e83457a7
+https://repo.anaconda.com/pkgs/main/osx-64/mkl-service-2.4.0-py38h9ed2024_0.conda#aa83a42208ae871c8f1d3e15720024c4
+https://repo.anaconda.com/pkgs/main/noarch/packaging-21.3-pyhd3eb1b0_0.conda#07bbfbb961db7fa329cc42716943ea62
+https://repo.anaconda.com/pkgs/main/noarch/partd-1.2.0-pyhd3eb1b0_1.conda#d02d8b6ea30c680d3fafe4ac50cc4b18
+https://repo.anaconda.com/pkgs/main/noarch/pexpect-4.8.0-pyhd3eb1b0_3.conda#765b2562d6cdd14bb6d44fc170a04331
+https://repo.anaconda.com/pkgs/main/osx-64/pillow-9.0.1-py38hde71d04_0.conda#9b05d3f1ef363938abfd1140d3104a81
+https://repo.anaconda.com/pkgs/main/noarch/prompt-toolkit-3.0.20-pyhd3eb1b0_0.conda#19fa1fa6a03645e39e7dce3bdbe9d72f
+https://repo.anaconda.com/pkgs/main/noarch/python-dateutil-2.8.2-pyhd3eb1b0_0.conda#211ee00320b08a1ac9fea6677649f6c9
+https://repo.anaconda.com/pkgs/main/osx-64/setuptools-61.2.0-py38hecd8cb5_0.conda#d7a83933228d51053ea27c48a2959f1a
+https://repo.anaconda.com/pkgs/main/osx-64/snakeviz-2.0.1-py38hecd8cb5_0.conda#0fec234d151c6afae1a921bf6dbe77ed
+https://repo.anaconda.com/pkgs/main/osx-64/terminado-0.13.1-py38hecd8cb5_0.conda#70cd24be2f01996667d580408c973d93
+https://repo.anaconda.com/pkgs/main/noarch/typing-extensions-4.1.1-hd3eb1b0_0.conda#0b535dfd0618653dd772c78c9c2b56a8
+https://repo.anaconda.com/pkgs/main/osx-64/virtualenv-20.4.6-py38hecd8cb5_1.conda#080438332d4a3e1a551c72ac51fb8481
+https://repo.anaconda.com/pkgs/main/osx-64/websocket-client-0.58.0-py38hecd8cb5_4.conda#28bd1df12ee80049de5caaae2ebce77e
+https://repo.anaconda.com/pkgs/main/osx-64/argon2-cffi-bindings-21.2.0-py38hca72f7f_0.conda#241491682516288378ab940ad09d7f74
+https://repo.anaconda.com/pkgs/main/noarch/bleach-4.1.0-pyhd3eb1b0_0.conda#256eb7e384e35f993ef8ccd6c4f45e58
+https://repo.anaconda.com/pkgs/main/osx-64/brotlipy-0.7.0-py38h9ed2024_1003.conda#41b0bc0721aecf75336a098f4d5314b8
+https://repo.anaconda.com/pkgs/main/osx-64/cryptography-37.0.1-py38hf6deb26_0.conda#4d1cadeecc9246ce76605e9bf89fad9e
+https://repo.anaconda.com/pkgs/main/osx-64/dask-core-2022.5.0-py38hecd8cb5_0.conda#ee065a8767e0f8fc4d0950c6577f0043
+https://conda.anaconda.org/conda-forge/noarch/enlighten-1.10.1-pyhd8ed1ab_0.tar.bz2#f5c404e6c73888f69932895043ea5938
+https://repo.anaconda.com/pkgs/main/noarch/flake8-3.9.2-pyhd3eb1b0_0.conda#04cb15847ce1ae281bac8eb5d67da440
+https://repo.anaconda.com/pkgs/main/osx-64/jsonschema-4.4.0-py38hecd8cb5_0.conda#38dd39c9c45ed24ea311dc0b4d155833
+https://repo.anaconda.com/pkgs/main/osx-64/jupyter_client-7.2.2-py38hecd8cb5_0.conda#51773d706c11abaf05fea1800625ff13
+https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.6.0-pyhd8ed1ab_0.tar.bz2#0941325bf48969e2b3b19d0951740950
+https://repo.anaconda.com/pkgs/main/osx-64/numpy-base-1.20.1-py38h585ceec_0.conda#3746d2d0beb512e308f0b39c246b33fa
+https://repo.anaconda.com/pkgs/main/osx-64/pip-21.2.4-py38hecd8cb5_0.conda#7e29b793da87a1c4715c1fa119d99ef0
+https://repo.anaconda.com/pkgs/main/osx-64/pytest-6.2.4-py38hecd8cb5_2.conda#584982a60ae019a3948792e726501183
+https://repo.anaconda.com/pkgs/main/noarch/stack_data-0.2.0-pyhd3eb1b0_0.conda#6212968e73726f6da42e5ffcd2bea92d
+https://conda.anaconda.org/conda-forge/osx-64/ukkonen-1.0.1-py38h12bbefe_1.tar.bz2#b77764caaf71b45937ac28f6bab7764d
+https://repo.anaconda.com/pkgs/main/noarch/argon2-cffi-21.3.0-pyhd3eb1b0_0.conda#f00b851bc61b4c313903d31c7daecb09
+https://conda.anaconda.org/conda-forge/noarch/identify-2.5.1-pyhd8ed1ab_0.tar.bz2#6f41e3056fcd3061fbc2b49b3309fe0c
+https://repo.anaconda.com/pkgs/main/osx-64/ipython-8.3.0-py38hecd8cb5_0.conda#329fc0165c4bf2d945d0c68557d5173d
+https://repo.anaconda.com/pkgs/main/osx-64/nbformat-5.3.0-py38hecd8cb5_0.conda#5b6ac6fe9256a09f8e9b1bf500740ffb
+https://repo.anaconda.com/pkgs/main/noarch/pyopenssl-22.0.0-pyhd3eb1b0_0.conda#1dbbf9422269cd62c7094960d9b43f36
+https://repo.anaconda.com/pkgs/main/osx-64/ipykernel-6.9.1-py38hecd8cb5_0.conda#1e871f3672d376078a113d66b352c1c9
+https://repo.anaconda.com/pkgs/main/osx-64/nbclient-0.5.13-py38hecd8cb5_0.conda#cb3b035daf42ea75d5f48577f502d0f2
+https://conda.anaconda.org/conda-forge/osx-64/pre-commit-2.15.0-py38h50d1736_1.tar.bz2#0662d1baf23c64a47f7f4afc52daf6a8
+https://repo.anaconda.com/pkgs/main/osx-64/urllib3-1.26.9-py38hecd8cb5_0.conda#1349aad2f43f0990eb33d31a10ed745c
+https://repo.anaconda.com/pkgs/main/osx-64/nbconvert-6.1.0-py38hecd8cb5_0.conda#11ca6fd9c54d8f54cbd0f77e9260bc3a
+https://repo.anaconda.com/pkgs/main/noarch/requests-2.27.1-pyhd3eb1b0_0.conda#9b593f86737e69140c47c2107ecf277c
+https://repo.anaconda.com/pkgs/main/noarch/jupyter_server-1.13.5-pyhd3eb1b0_0.conda#303eb09f873fde3c13abaaed542d54e0
+https://repo.anaconda.com/pkgs/main/osx-64/notebook-6.4.11-py38hecd8cb5_0.conda#1c1836860fb054c349d72f7d6b3a6efa
+https://repo.anaconda.com/pkgs/main/noarch/sphinx-4.2.0-pyhd3eb1b0_1.conda#8f65a307ecef80b3afd979777cc5b549
+https://repo.anaconda.com/pkgs/main/osx-64/jupyterlab_server-2.12.0-py38hecd8cb5_0.conda#1e745f1768c0aca6a6af0a88e8e4f8ad
+https://repo.anaconda.com/pkgs/main/noarch/nbclassic-0.3.5-pyhd3eb1b0_0.conda#22683be353228acd015cae8a4676b462
+https://repo.anaconda.com/pkgs/main/noarch/jupyterlab-3.1.7-pyhd3eb1b0_0.conda#9292f2b7ad621d8a6d9a9a7f7338664d
+https://repo.anaconda.com/pkgs/main/osx-64/bottleneck-1.3.4-py38h67323c0_0.conda#51ecbea9295cc1aa0eb6e6562826aca2
+https://conda.anaconda.org/conda-forge/osx-64/h5py-2.10.0-mpi_openmpi_py38h28212cf_2.tar.bz2#770c0cd6031f85bbc7ed745764f0babb
+https://conda.anaconda.org/conda-forge/osx-64/hdf5plugin-2.3.0-py38ha601cb3_0.tar.bz2#b81b5f2f24a99a4d0446bab539562acc
+https://repo.anaconda.com/pkgs/main/osx-64/imagecodecs-2021.8.26-py38ha952a84_0.conda#707d3ae9a006eae3ef3ee59d36f2240b
+https://repo.anaconda.com/pkgs/main/noarch/imageio-2.9.0-pyhd3eb1b0_0.conda#4f1d37bdc3afdb2d237fd9b6b920ec3d
+https://repo.anaconda.com/pkgs/main/osx-64/matplotlib-3.5.1-py38hecd8cb5_1.conda#2ca356c27d3d4163b65d4560ea6024c2
+https://repo.anaconda.com/pkgs/main/osx-64/matplotlib-base-3.5.1-py38hfb0c5b7_1.conda#737f5b096415dccfd6a6a1b357279c1c
+https://repo.anaconda.com/pkgs/main/osx-64/mkl_fft-1.3.0-py38h4a7008c_2.conda#074ae65d92fbb401e882203908b45859
+https://repo.anaconda.com/pkgs/main/osx-64/mkl_random-1.2.2-py38hb2f4e1b_0.conda#f000fabdb08ccfb051ac7134449e23b3
+https://repo.anaconda.com/pkgs/main/osx-64/numpy-1.20.1-py38hd6e1bb9_0.conda#cbf3fe59bbeabac553345a216b2883c2
+https://repo.anaconda.com/pkgs/main/osx-64/numba-0.53.1-py38hb2f4e1b_0.conda#a63656d5d809042bbde4c473e78e947f
+https://repo.anaconda.com/pkgs/main/osx-64/numcodecs-0.8.0-py38h23ab428_0.conda#9792b5443f40671efd3124a749012b01
+https://repo.anaconda.com/pkgs/main/osx-64/numexpr-2.7.3-py38h5873af2_1.conda#620c4922f3745f0580eb49fe2ca4a3f2
+https://repo.anaconda.com/pkgs/main/noarch/opt_einsum-3.3.0-pyhd3eb1b0_1.conda#53205b8b5762c06f85b6bb7abd4f496e
+https://repo.anaconda.com/pkgs/main/osx-64/pywavelets-1.3.0-py38hca72f7f_0.conda#3fed974bbd0df850bbe7176cd13044d8
+https://repo.anaconda.com/pkgs/main/osx-64/scipy-1.6.2-py38hd5f7400_1.conda#e35cb7581cc465135b22bf215d2ec35b
+https://repo.anaconda.com/pkgs/main/noarch/tifffile-2021.7.2-pyhd3eb1b0_2.conda#5a265e3b9694c13bcfb8c40a3b8e3d8f
+https://repo.anaconda.com/pkgs/main/osx-64/pandas-1.3.3-py38h5008ddb_0.conda#b9b8ba75c0650d7f1cb2bb5af909cb16
+https://conda.anaconda.org/conda-forge/osx-64/pytables-3.6.1-py38h6f8395a_1.tar.bz2#a2d59c3be8c694e43ba4621b596f82f6
+https://repo.anaconda.com/pkgs/main/osx-64/scikit-image-0.19.2-py38hae1ba45_0.conda#00367d2a591fe71d6ffa33c2cdd87aaa
+https://repo.anaconda.com/pkgs/main/osx-64/scikit-learn-0.24.2-py38hb2f4e1b_0.conda#cab481bb97698d6dea0d7d70ade7ce5d
+https://repo.anaconda.com/pkgs/main/noarch/zarr-2.8.1-pyhd3eb1b0_0.conda#7df763b90dcefae1c6039911fc72b694
+https://repo.anaconda.com/pkgs/main/noarch/seaborn-0.11.2-pyhd3eb1b0_0.conda#36b64fb4e3b76ded59d6388c9582de69
+# pip click @ https://files.pythonhosted.org/packages/d2/3d/fa76db83bf75c4f8d338c2fd15c8d33fdd7ad23a9b5e57eb6c5de26b430e/click-7.1.2-py2.py3-none-any.whl#md5=None
+# pip colorlog @ https://files.pythonhosted.org/packages/51/62/61449c6bb74c2a3953c415b2cdb488e4f0518ac67b35e2b03a6d543035ca/colorlog-4.8.0-py2.py3-none-any.whl#md5=None
+# pip fastjsonschema @ https://files.pythonhosted.org/packages/e6/0b/24795939622d60f4b453aa7040f23c6a6f8b44c7c026c3b42d9842e6cc31/fastjsonschema-2.15.3-py3-none-any.whl#md5=None
+# pip future @ https://files.pythonhosted.org/packages/45/0b/38b06fd9b92dc2b68d58b75f900e97884c45bedd2ff83203d933cf5851c9/future-0.18.2.tar.gz#md5=None
+# pip lazy-object-proxy @ https://files.pythonhosted.org/packages/5c/96/2c984706be60a1671177f57ba9f6b17a11b4cbf1b6704f3839ad6addc284/lazy_object_proxy-1.7.1-cp38-cp38-macosx_10_9_x86_64.whl#md5=None
+# pip progress @ https://files.pythonhosted.org/packages/e9/ff/7871f3736dc6707435b2a2f217c46b5a5bc6ea7e0a9a443cd69146a1afd1/progress-1.4.tar.gz#md5=None
+# pip smmap @ https://files.pythonhosted.org/packages/6d/01/7caa71608bc29952ae09b0be63a539e50d2484bc37747797a66a60679856/smmap-5.0.0-py3-none-any.whl#md5=None
+# pip tabulate @ https://files.pythonhosted.org/packages/ca/80/7c0cad11bd99985cfe7c09427ee0b4f9bd6b048bd13d4ffb32c6db237dfb/tabulate-0.8.9-py3-none-any.whl#md5=None
+# pip unidecode @ https://files.pythonhosted.org/packages/f9/5b/7603add7f192252916b85927263b598c74585f82389e6e42318a6278159b/Unidecode-1.3.4-py3-none-any.whl#md5=None
+# pip wrapt @ https://files.pythonhosted.org/packages/33/cd/7335d8b82ff0a442581ab37a8d275ad76b4c1f33ace63c1a4d7c23791eee/wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl#md5=None
+# pip astroid @ https://files.pythonhosted.org/packages/94/58/6f1bbfd88b6ba5271b4a9be99cb15cb2fe369794ba410390f0d672c6ad39/astroid-2.11.5-py3-none-any.whl#md5=None
+# pip docstr-coverage @ https://files.pythonhosted.org/packages/ef/97/80f5de5ab716ece99fec79ce1ae51821ef4fcd6ccd64902b4481991fbba4/docstr_coverage-2.1.1-py3-none-any.whl#md5=None
+# pip flake8-polyfill @ https://files.pythonhosted.org/packages/86/b5/a43fed6fd0193585d17d6faa7b85317d4461f694aaed546098c69f856579/flake8_polyfill-1.0.2-py2.py3-none-any.whl#md5=None
+# pip genbadge @ https://files.pythonhosted.org/packages/20/b8/61d32e888fdcced280813ec871c50c6d0ef17fc266fe56d600fd77201566/genbadge-1.0.6-py2.py3-none-any.whl#md5=None
+# pip gitdb @ https://files.pythonhosted.org/packages/a3/7c/5d747655049bfbf75b5fcec57c8115896cb78d6fafa84f6d3ef4c0f13a98/gitdb-4.0.9-py3-none-any.whl#md5=None
+# pip mando @ https://files.pythonhosted.org/packages/e6/cc/f6e25247c1493a654785e68cd975e479c311e99dafedd49ed17f8d300e0c/mando-0.6.4-py2.py3-none-any.whl#md5=None
+# pip retrying @ https://files.pythonhosted.org/packages/44/ef/beae4b4ef80902f22e3af073397f079c96969c69b2c7d52a57ea9ae61c9d/retrying-1.3.3.tar.gz#md5=None
+# pip gitpython @ https://files.pythonhosted.org/packages/83/32/ce68915670da6fd6b1e3fb4b3554b4462512f6441dddd194fc0f4f6ec653/GitPython-3.1.27-py3-none-any.whl#md5=None
+# pip plotly @ https://files.pythonhosted.org/packages/1f/f6/bd3c17c8003b6641df1228e80e1acac97ed8402635e46c2571f8e1ef63af/plotly-4.14.3-py2.py3-none-any.whl#md5=None
+# pip radon @ https://files.pythonhosted.org/packages/cf/fe/c400dbbbbde6649ad0164ef2ffef3672baefc62ecb676f58d0f25d8f83b0/radon-4.0.0-py2.py3-none-any.whl#md5=None
+# pip sphinx-autoapi @ https://files.pythonhosted.org/packages/5e/67/249380ade22a7efaa8a335f45a9b87f2fdda499c9fdc53913096dec5d1fe/sphinx_autoapi-1.8.4-py2.py3-none-any.whl#md5=None
+# pip wily @ https://files.pythonhosted.org/packages/e7/2c/53638ade80511eee70c29bcc52e90ca017836feecba1762c935112249aea/wily-1.20.0-py3-none-any.whl#md5=None
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..d0c3cbf1020d5c292abdedf27627c6abe25e2293
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS    ?=
+SPHINXBUILD   ?= sphinx-build
+SOURCEDIR     = source
+BUILDDIR      = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/coverage.svg b/docs/coverage.svg
new file mode 100644
index 0000000000000000000000000000000000000000..b9bdc604c487030a75f876728374a4393dcd9713
--- /dev/null
+++ b/docs/coverage.svg
@@ -0,0 +1 @@
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="114" height="20" role="img" aria-label="coverage: 67.15%"><title>coverage: 67.15%</title><linearGradient id="s" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><clipPath id="r"><rect width="114" height="20" rx="3" fill="#fff"/></clipPath><g clip-path="url(#r)"><rect width="61" height="20" fill="#555"/><rect x="61" width="53" height="20" fill="#fe7d37"/><rect width="114" height="20" fill="url(#s)"/></g><g fill="#fff" text-anchor="middle" font-family="Verdana,Geneva,DejaVu Sans,sans-serif" text-rendering="geometricPrecision" font-size="110"><text aria-hidden="true" x="315" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="510">coverage</text><text x="315" y="140" transform="scale(.1)" fill="#fff" textLength="510">coverage</text><text aria-hidden="true" x="865" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="430">67.15%</text><text x="865" y="140" transform="scale(.1)" fill="#fff" textLength="430">67.15%</text></g></svg>
diff --git a/docs/docstr_coverage_badge.svg b/docs/docstr_coverage_badge.svg
new file mode 100644
index 0000000000000000000000000000000000000000..1e6a25876c7b46d8385389b59d9f87518dbf8f45
--- /dev/null
+++ b/docs/docstr_coverage_badge.svg
@@ -0,0 +1,20 @@
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="142" height="20">
+    <linearGradient id="s" x2="0" y2="100%">
+        <stop offset="0" stop-color="#bbb" stop-opacity=".1"/>
+        <stop offset="1" stop-opacity=".1"/>
+    </linearGradient>
+    <clipPath id="r">
+        <rect width="142" height="20" rx="3" fill="#fff"/>
+    </clipPath>
+    <g clip-path="url(#r)">
+        <rect width="99" height="20" fill="#555"/>
+        <rect x="99" width="43" height="20" fill="#a4a61d"/>
+        <rect width="142" height="20" fill="url(#s)"/>
+    </g>
+    <g fill="#fff" text-anchor="middle" font-family="Verdana,Geneva,DejaVu Sans,sans-serif" font-size="110">
+        <text x="505" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="890">docstr-coverage</text>
+        <text x="505" y="140" transform="scale(.1)" textLength="890">docstr-coverage</text>
+        <text x="1195" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)">78%</text>
+        <text x="1195" y="140" transform="scale(.1)">78%</text>
+    </g>
+</svg>
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 0000000000000000000000000000000000000000..6fcf05b4b76f8b9774c317ac8ada402f8a7087de
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+	set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=source
+set BUILDDIR=build
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+	echo.
+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+	echo.installed, then set the SPHINXBUILD environment variable to point
+	echo.to the full path of the 'sphinx-build' executable. Alternatively you
+	echo.may add the Sphinx directory to PATH.
+	echo.
+	echo.If you don't have Sphinx installed, grab it from
+	echo.https://www.sphinx-doc.org/
+	exit /b 1
+)
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd
diff --git a/docs/source/_templates/custom-class-template.rst b/docs/source/_templates/custom-class-template.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b29757c52142bc09a6fb23e8fa8291d10556f44a
--- /dev/null
+++ b/docs/source/_templates/custom-class-template.rst
@@ -0,0 +1,32 @@
+{{ fullname | escape | underline}}
+
+.. currentmodule:: {{ module }}
+
+.. autoclass:: {{ objname }}
+   :members:
+   :show-inheritance:
+   :inherited-members:
+
+   {% block methods %}
+   .. automethod:: __init__
+
+   {% if methods %}
+   .. rubric:: {{ _('Methods') }}
+
+   .. autosummary::
+   {% for item in methods %}
+      ~{{ name }}.{{ item }}
+   {%- endfor %}
+   {% endif %}
+   {% endblock %}
+
+   {% block attributes %}
+   {% if attributes %}
+   .. rubric:: {{ _('Attributes') }}
+
+   .. autosummary::
+   {% for item in attributes %}
+      ~{{ name }}.{{ item }}
+   {%- endfor %}
+   {% endif %}
+   {% endblock %}
diff --git a/docs/source/_templates/custom-module-template.rst b/docs/source/_templates/custom-module-template.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b0bb3e1a71d994e91720e40f892680cc833b54d1
--- /dev/null
+++ b/docs/source/_templates/custom-module-template.rst
@@ -0,0 +1,64 @@
+{{ fullname | escape | underline}}
+
+.. automodule:: {{ fullname }}
+
+   {% block attributes %}
+   {% if attributes %}
+   .. rubric:: {{ _('Module Attributes') }}
+
+   .. autosummary::
+   {% for item in attributes %}
+      {{ item }}
+   {%- endfor %}
+   {% endif %}
+   {% endblock %}
+
+   {% block functions %}
+   {% if functions %}
+   .. rubric:: {{ _('Functions') }}
+
+   .. autosummary::
+   {% for item in functions %}
+      {{ item }}
+   {%- endfor %}
+   {% endif %}
+   {% endblock %}
+
+   {% block classes %}
+   {% if classes %}
+   .. rubric:: {{ _('Classes') }}
+
+   .. autosummary::
+      :toctree:
+      :template: custom-class-template.rst
+   {% for item in classes %}
+      {{ item }}
+   {%- endfor %}
+   {% endif %}
+   {% endblock %}
+
+   {% block exceptions %}
+   {% if exceptions %}
+   .. rubric:: {{ _('Exceptions') }}
+
+   .. autosummary::
+      :toctree:
+   {% for item in exceptions %}
+      {{ item }}
+   {%- endfor %}
+   {% endif %}
+   {% endblock %}
+
+{% block modules %}
+{% if modules %}
+.. rubric:: Modules
+
+.. autosummary::
+   :toctree:
+   :template: custom-module-template.rst
+   :recursive:
+{% for item in modules %}
+   {{ item }}
+{%- endfor %}
+{% endif %}
+{% endblock %}
diff --git a/docs/source/biblio.bib b/docs/source/biblio.bib
new file mode 100644
index 0000000000000000000000000000000000000000..3df551689add8745b44d1d89252936ceeab7cdd2
--- /dev/null
+++ b/docs/source/biblio.bib
@@ -0,0 +1,102 @@
+@article{Chambolle2004,
+  title = {An {{Algorithm}} for {{Total Variation Minimization}} and {{Applications}}},
+  author = {Chambolle, Antonin},
+  year = {2004},
+  month = jan,
+  journal = {Journal of Mathematical Imaging and Vision},
+  volume = {20},
+  number = {1},
+  pages = {89--97},
+  issn = {1573-7683},
+  doi = {10.1023/B:JMIV.0000011325.36760.1e},
+}
+
+@article{Condat2013,
+  author    = {Laurent Condat},
+  title     = {A Primal-Dual Splitting Method for Convex Optimization Involving {L}ipschitzian, Proximable and Linear Composite Terms},
+  journal   = {J. Optim. Theory Appl.},
+  year      = {2013},
+  volume    = {158},
+  pages     = {460--479},
+  owner     = {pthouven},
+  timestamp = {2016.10.03},
+}
+
+@article{Condat2014spl,
+  author    = {Laurent Condat},
+  title     = {A Generic Proximal Algorithm for Convex Optimization -- Application to Total Variation minimization},
+  journal   = {IEEE Signal Process. Lett.},
+  year      = {2014},
+  volume    = {21},
+  number    = {8},
+  pages     = {985-989},
+  month     = aug,
+  owner     = {pthouven},
+  timestamp = {2016.10.03}
+}
+
+@article{Figueiredo2010,
+  title = {Restoration of {{Poissonian Images Using Alternating Direction Optimization}}},
+  author = {Figueiredo, M{\'a}rio A. T. and {Bioucas-Dias}, Jos{\'e} M.},
+  year = {2010},
+  month = dec,
+  volume = {19},
+  pages = {3133--3145},
+  issn = {1057-7149, 1941-0042},
+  doi = {10.1109/TIP.2010.2053941},
+  journal = {IEEE Trans.  Image Process.},
+  number = {12}
+}
+
+@article{Matakos2013,
+  title = {Accelerated {{Edge}}-{{Preserving Image Restoration Without Boundary Artifacts}}},
+  author = {Matakos, A. and Ramani, S. and Fessler, J. A.},
+  year = {2013},
+  month = may,
+  journal = {IEEE Trans.  Image Process.},
+  volume = {22},
+  number = {5},
+  pages = {2019--2029},
+  issn = {1057-7149, 1941-0042},
+  doi = {10.1109/TIP.2013.2244218},
+}
+
+@PhdThesis{Prusa2012,
+  author = {Zden\v{e}k Pru\v{s}a},
+  title  = {Segmentwise discrete wavelet transform},
+  school = {Brno university of technology},
+  year   = {2012},
+}
+
+@inproceedings{Vono2019icassp,
+  author={M. {Vono} and N. {Dobigeon} and P. {Chainais}},
+  booktitle={ICASSP 2019 - 2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
+  title={Bayesian Image Restoration under Poisson Noise and Log-concave Prior},
+  year={2019},
+  volume={},
+  number={},
+  pages={1712-1716},
+  address={Brighton, United Kingdom}
+}
+
+@article{Vono2019tsp,
+    Title = {Split-and-augmented {Gi}bbs sampler - Application to large-scale inference problems},
+    Author = {Vono, M. and Dobigeon, N. and Chainais, P.},
+    Journal = {IEEE Transactions on Signal Processing},
+    Year = {2019},
+    Pages = {1648--1661},
+    Volume = {67},
+    Number = {6},
+    Month = {March}
+}
+
+@article{Vu2013,
+  author  = {Bang C{\^o}ng V{\~{u}}},
+  title   = {A splitting algorithm for dual monotone inclusions involving cocoercive operators},
+  journal = {Advances in Computational Mathematics},
+  year    = {2013},
+  volume  = {38},
+  number  = {3},
+  pages   = {667--681},
+  month   = apr,
+}
diff --git a/docs/source/biblio.rst b/docs/source/biblio.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9acd0543155c96fb983b32066f01c838ce469c32
--- /dev/null
+++ b/docs/source/biblio.rst
@@ -0,0 +1,8 @@
+References
+==========
+
+.. bibliography::
+    :style: alpha
+    :all:
+
+.. :cited:
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ed7df51357654b0ddc1021b7ebaa40ffd787732
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,170 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# This file only contains a selection of the most common options. For a full
+# list see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+import os
+import sys
+
+sys.path.insert(0, os.path.abspath("../../src"))
+import sphinx_rtd_theme
+
+# -- Project information -----------------------------------------------------
+
+project = "aaxda"
+copyright = "2022, pthouvenin"
+author = "pthouvenin"
+
+# The full version, including alpha/beta/rc tags
+release = "0.0"
+
+
+# -- General configuration ---------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    # "sphinx.ext.autodoc",  # Core library for html generation from docstrings
+    # "sphinx.ext.autosummary",  # Create neat summary tables
+    "autoapi.extension",
+    # "sphinx.ext.autodoc",  # Include documentation from docstrings
+    "sphinx.ext.coverage",
+    "sphinx.ext.viewcode",
+    "sphinx.ext.napoleon",
+    "sphinx_rtd_theme",
+    "sphinxcontrib.bibtex",
+    "sphinx.ext.mathjax",  # Render math via JavaScript
+    "sphinx.ext.inheritance_diagram",  # Include inheritance diagrams
+    # Official Sphinx extensions
+    # https://www.sphinx-doc.org/en/master/usage/extensions/index.html
+    # "sphinx.ext.autodoc",  # Include documentation from docstrings
+    # "sphinx.ext.autosectionlabel",  # Allow reference sections using its title
+    # "sphinx.ext.autosummary",  # Generate autodoc summaries
+    # "sphinx.ext.coverage",  # Collect doc coverage stats
+    # "sphinx.ext.doctest",  # Test snippets in the documentation
+    # "sphinx.ext.duration",  # Measure durations of Sphinx processing
+    # "sphinx.ext.extlinks",  # Markup to shorten external links
+    # "sphinx.ext.githubpages",  # Publish HTML docs in GitHub Pages
+    # "sphinx.ext.graphviz",  # Add Graphviz graphs
+    # "sphinx.ext.ifconfig",  # Include content based on configuration
+    # "sphinx.ext.imgconverter",  # A reference image converter using Imagemagick
+    # "sphinx.ext.inheritance_diagram",  # Include inheritance diagrams
+    # "sphinx.ext.intersphinx",  # Link to other projects’ documentation
+    # "sphinx.ext.linkcode",  # Add external links to source code
+    # "sphinx.ext.imgmath",  # Render math as images
+    # "sphinx.ext.mathjax",  # Render math via JavaScript
+    # "sphinx.ext.jsmath",  # Render math via JavaScript
+    # "sphinx.ext.napoleon",  # Support for NumPy and Google style docstrings
+    # "sphinx.ext.todo",  # Support for todo items # .. todo:: directive
+    # "sphinx.ext.viewcode",  # Add links to highlighted source code
+    # Non-official Sphinx extensions need to be installed
+    # https://github.com/sphinx-contrib/
+    # "sphinxcontrib.bibtex",  # Sphinx extension for BibTeX style citations
+    # "sphinxcontrib.proof",  # Sphinx extension to typeset theorems, proofs
+    # Non-official Sphinx extension for matplotlib plots
+    # https://matplotlib.org/stable/api/sphinxext_plot_directive_api.html?highlight=plot_directive#module-matplotlib.sphinxext.plot_directive
+    # "matplotlib.sphinxext.plot_directive",  # .. plot:: directive for plt.plot
+]
+# autosummary_generate = True  # Turn on sphinx.ext.autosummary
+
+autoapi_dirs = ["../../src/aaxda"]
+autoapi_options = [
+    "members",
+    "undoc-members",
+    "private-members",
+    "show-inheritance",
+    "show-module-summary",
+    "imported-members",
+]
+autodoc_typehints = "description"
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ["_templates"]
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = [
+    "_build",
+    "_templates",
+    "sandbox",
+    "tests",
+    "drafts",
+    "*archive",
+]
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+#
+html_theme = "sphinx_rtd_theme"
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = []  # ["_static"]
+
+html_theme_options = {
+    "logo_only": False,
+    "display_version": True,
+    "prev_next_buttons_location": "bottom",
+    "style_external_links": False,
+    # "vcs_pageview_mode": "",
+    # 'style_nav_header_background': 'white',
+    # Toc options
+    "collapse_navigation": True,
+    "sticky_navigation": True,
+    "navigation_depth": 2,
+    "includehidden": True,
+    "titles_only": False,
+}
+
+# html_extra_path = ["../coverage_html_report"]
+
+# -- Extension configuration -------------------------------------------------
+
+# Bibliography
+# sphinxcontrib-bibtex https://sphinxcontrib-bibtex.readthedocs.io/en/latest/usage.html
+bibtex_bibfiles = ["strings_all_ref.bib", "biblio.bib"]
+bibtex_encoding = "utf-8-sig"
+bibtex_default_style = "alpha"
+# bibtex_reference_style =  # 'author_year'
+# bibtex_reference_style = "alpha"  # alpha, plain , unsrt, and unsrtalpha
+
+# matplotlib.sphinxext.plot_directive
+# https://matplotlib.org/stable/api/sphinxext_plot_directive_api.html
+# plot_include_source = True
+# plot_html_show_source_link =
+# plot_pre_code =
+# plot_basedir =
+# plot_formats =
+# plot_html_show_formats =
+# plot_rcparams =
+# plot_apply_rcparams =
+# plot_working_directory =
+# plot_template =
+
+# Napoleon settings
+napoleon_google_docstring = True
+napoleon_numpy_docstring = True
+napoleon_include_init_with_doc = False
+napoleon_include_private_with_doc = False
+napoleon_include_special_with_doc = True
+napoleon_use_admonition_for_examples = False
+napoleon_use_admonition_for_notes = False
+napoleon_use_admonition_for_references = False
+napoleon_use_ivar = False
+napoleon_use_param = True
+napoleon_use_rtype = True
+napoleon_preprocess_types = False
+napoleon_type_aliases = None
+napoleon_attr_annotations = True
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e6ec05af1b593fc15ece936686c3cae8b79a496f
--- /dev/null
+++ b/docs/source/index.rst
@@ -0,0 +1,32 @@
+Welcome to aaxda documentation
+=========================================
+
+.. toctree::
+   :maxdepth: 1
+   :caption: Installation
+
+   setup
+   biblio
+   Code coverage report<../coverage_html_report/index.html#http://>
+   Code quality<../wily_report.html#http://>
+
+..
+   .. .. autosummary::
+   ..    :toctree: _autosummary
+   ..    :template: custom-module-template.rst
+   ..    :recursive:
+
+   ..    aaxda
+
+.. note::
+
+   - This project is under active development.
+   - A complete code coverage report is available `here <../coverage_html_report/index.html#http://>`_.
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/docs/source/setup.rst b/docs/source/setup.rst
new file mode 100644
index 0000000000000000000000000000000000000000..eab3a1eb14065f9390fcd44b3bd089f94826bc18
--- /dev/null
+++ b/docs/source/setup.rst
@@ -0,0 +1,168 @@
+Setup
+=====
+
+Installation
+------------
+
+.. _environment-setup:
+
+Environment setup
+^^^^^^^^^^^^^^^^^
+
+Clone the current repository with all the submodules as follows
+
+.. code-block:: bash
+
+    # Cloning the repo. with all the submodules:
+    git clone --recurse-submodules https://gitlab.cristal.univ-lille.fr/pthouven/dspa.git
+    cd demo-jcgs
+
+    # Create anaconda environment
+    conda create --name demo-jcgs --file conda-osx-64.lock # or linux-64, win-64
+    # # or, conda env create --name demo-jcgs --file environment.yml
+    # # or, using the requirement.txt file
+    # conda config --append channels conda-forge
+    # conda config --append channels anaconda
+    # conda create --name demo-jcgs --file requirement.txt --yes -c conda-forge -c anaconda
+    # # needed only if building the documentation
+    # pip install docstr-coverage
+    # pip install genbadge
+
+    # Removing environment
+    conda env remove --name demo-jcgs
+
+To run the code from this repo in editable mode, once the ``demo-jcgs`` environment has been activated, issue the following command in the directory of the repository (reference `here <https://stackoverflow.com/questions/49474575/how-to-install-my-own-python-module-package-via-conda-and-watch-its-changes>`_).
+
+.. code-block:: bash
+
+    # install project in development mode
+    conda develop .
+    # equivalent in pip:
+    # pip install -e .
+
+    # uninstall project if needed
+    conda develop -u .
+    # equivalent in pip:
+    # pip uninstall .
+
+To avoid `file lock issue in h5py <https://github.com/h5py/h5py/issues/1101>`_, add the following to the ``~/.zshrc`` file (or ``~/.bashrc``)
+
+.. code-block:: bash
+
+    export HDF5_USE_FILE_LOCKING='FALSE'
+
+
+Updating submodules (optional)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+From an existing ``demo-jcgs`` repository, issue the following commands
+
+.. code-block:: bash
+
+    cd path/to/repo
+    git pull
+
+
+Experiments
+-----------
+
+To run a representative example of the experiments, configure and run the ``main.py`` script.
+
+.. code-block:: bash
+
+    conda activate demo-jcgs
+    cd path/to/repo
+    # to check the list of input parameters
+    python aaxda/main.py --help
+    # default run to generate the data from a specific ground truth image, using 2 cores
+    mpiexec -n 2 python aaxda/main.py --data --imfile=img/boat.png --datafilename=boat_convolution --dpath=data --logfile=data.log -v
+    # launch the sampler using the default sampler parameters, with 2 cores
+    mpiexec -n 2 python aaxda/main.py --datafilename=boat_convolution --dpath=data --logfile=axda.log -v
+    # deactivating the conda environment (when no longer needed)
+    conda deactivate
+
+Another option consists in configuring and running the ``.sh`` scripts provided (``*local*.sh`` to run locally through ``tmux``, and ``*slurm*.sh`` version to submit the job to a machine configured with the ``slurm`` scheduler). An exemple is provided below.
+
+.. code-block:: bash
+
+    # configure script to run the serial version of the sampler
+    vi submit_local_serial.sh
+    # run sampler in a tmux session
+    bash submit_local_serial.sh
+    # join tmux session (replace <session-name> by the name taken for the session)
+    tmux a -t <session-name>
+
+To quickly inspect the content of an ``.h5``  file from the terminal (`reference <https://docs.h5py.org/en/stable/mpi.html?highlight=h5dump#using-parallel-hdf5-from-h5py>`_):
+
+.. code-block:: bash
+
+    # replace <filename> by the name of your file
+    h5dump --header <filename>.h5 # displays the name and size of all variables contained in the file
+    # h5dump <filename.h5 # diplays the value of all the variables saved in the file
+
+
+Please check the ``h5py`` `documentation <https://docs.h5py.org/en/stable/quick.html>`_ for further details.
+
+
+Contributing
+------------
+
+- Issue Tracker: `https://github.com/pthouvenin/demo-jcgs/issues <https://github.com/pthouvenin/demo-jcgs/issues>`_
+- Source Code: `https://github.com/pthouvenin/demo-jcgs <https://github.com/pthouvenin/demo-jcgs>`_
+
+To contribute to the project, make sure the following elements are properly configured before submitting any pull request (PR).
+
+Recommended text editor
+^^^^^^^^^^^^^^^^^^^^^^^
+
+For any contribution development, the ``vscode`` text editor is highly recommended with the following packages.
+
+1. `python <https://marketplace.visualstudio.com/items?itemName=ms-python.python>`_
+2. `python docstring generator <https://marketplace.visualstudio.com/items?itemName=njpwerner.autodocstring>`_
+3. better comments
+4. `jupyter <https://marketplace.visualstudio.com/items?itemName=ms-toolsai.jupyter>`_
+5. `gitlens <https://marketplace.visualstudio.com/items?itemName=eamodio.gitlens>`_
+6. `markdown all in one <https://marketplace.visualstudio.com/items?itemName=yzhang.markdown-all-in-one>`_
+
+Pre-commit setup
+^^^^^^^^^^^^^^^^
+
+Pre-commit hooks automatically trigger ``black`` and ``isort`` whenever the code is committed to the repo. To configure it, follow the instructions given below.
+
+- Create the ``conda`` environment and activate it (already done in the :ref:`installation section <environment-setup>`)
+- Install the ``pre-commit`` (`pre-commit documentation <https://pre-commit.com/>`_) hooks (`full list of hooks available here <https://pre-commit.com/hooks.html>`_)
+
+.. code-block:: bash
+
+    pre-commit install # setup pre-commit
+    pre-commit run --all-files # optional, to check the hooks run fine against all
+
+
+Assessing code and docstring coverage
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To test the code/docstring coverage locally, run the following commands
+
+.. code-block:: bash
+
+    export NUMBA_DISABLE_JIT=1 # need to disable jit compilation to check test coverage
+    coverage run -m unittest # check all tests
+    coverage report # generate a coverage report in the terminal
+    coverage html # HTML-based reports which let you visually see what lines of code were not tested
+    coverage xml -o reports/coverage/coverage.xml # produce xml file to generate the badge
+    genbadge coverage -o docs/coverage.svg
+    docstr-coverage . # check docstring coverage and generate the associated badge
+
+
+Building the documentation
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Make sure any new functionality is properly documented using the ``numpy`` docstring style.
+- As soon as ``sphinx`` is installed (``conda install -c anaconda sphinx``), issue the following commands.
+
+.. code-block:: bash
+
+    cd build docs/build/html
+    make html # compile documentation in html, latex or linkcheck
+    # sphinx-apidoc [OPTIONS] -o <OUTPUT_PATH> <MODULE_PATH> [EXCLUDE_PATTERN …]
+    # cd docs/source && sphinx-apidoc --force -o . ../.. ../../*test* ../../*main*
diff --git a/docs/source/strings_all_ref.bib b/docs/source/strings_all_ref.bib
new file mode 100644
index 0000000000000000000000000000000000000000..42bf22517c3c641e47ee912a45960bda0167ca74
--- /dev/null
+++ b/docs/source/strings_all_ref.bib
@@ -0,0 +1,452 @@
+% Maths
+@string{AJM      = {Amer. J. of Math.}}
+@string{AISM     = {Ann. Inst. Statist. Math.}}
+@string{ANNMATH  = {Ann. Mathematics}}
+@string{ANNSTAT  = {Ann. Stat.}}
+@string{ANNPROB  = {Ann. Prob.}}
+@STRING{ANNAPPSTAT = {Ann. Appl. Stat.}}
+@string{ANNIP    = {Ann. Inst. Henri Poincar\'{e}}}
+@string{ACHA     = {Applied and Computational Harmonic Analysis}}
+@string{AML      = {Appl. Math. Lett.}}
+
+@string{AUT      = {Automatica}}
+@string{BIT      = {BIT}}
+@string{BSTJ     = {Bell Syst. Tech. J.}}
+@string{BAMS     = {Bull. Amer. Math. Soc.}}
+@string{CACM     = {Comm. ACM}}
+@string{CMP      = {Comm. Math. Phys.}}
+@string{CPAM     = {Comm. Pure Appl. Math.}}
+@string{CRAS     = {Comptes Rendus de l'Acad\'emie des sciences}}
+@string{COMPAM   = {Comm. Pure Appl. Math.}}
+@string{CAGD     = {Comput. Aided Geom. Des.}}
+@string{CONAP    = {Constr. Approx.}}
+@string{CR       = {C. R. Acad. Sci. Paris S\'er. I Math.}}
+@string{EL       = {Electronics Letters}}
+@string{ESAIMPS  = {ESAIM Probability and Statistics}}
+@string{NEURALCOMP = {Neural Comput.}}
+
+
+@string{JAStA    = {J. Amer. Stat. Assoc.}}
+@string{JAStS    = {J. Amer. Stat. Soc.}}
+@string{JASA     = {J. Acoust. Soc. Amer.}}
+@string{JAMS     = {J. Amer. Math. Soc.}}
+@string{JAT      = {J. Approx. Theory}}
+@string{JAES     = {J. Audio Eng. Soc.}}
+@string{JFI      = {J. Franklin Inst.}}
+@string{JCAM     = {J. Comput. Appl. Math.}}
+@string{JCP      = {J. Comput. Phys.}}
+@string{JFAA     = {J. Fourier Anal. Appl.}}
+@string{JFA      = {J. Funct. Anal.}}
+@string{JIEE     = {J. IEE}}
+@string{JMAA     = {J. Math. Anal. Appl.}}
+@string{JMIV     = {J. Math. Imag. Vision}}
+@string{JMP      = {J. Math. Phys.}}
+@string{JMPA     = {J. Math. Pure Appl.}}
+@string{JOS      = {J. Opt. Soc.}}
+@string{JOSTA    = {J. Optim. Theory Appl.}}
+@string{JOSAA    = {J. Opt. Soc. Am. A}}
+@string{JCGS     = {J. Comput. and Graph. Stat.}}
+@string{JROYS    = {J. Roy. Stat. Soc.}}
+@string{JROYSA   = {J. Roy. Stat. Soc. Ser. A}}
+@string{JROYSB   = {J. Roy. Stat. Soc. Ser. B}}
+@string{JROYSC   = {J. Roy. Stat. Soc. Ser. C}}
+@string{JROYSD   = {J. Roy. Stat. Soc. Ser. D}}
+@string{JSV      = {J. Sound Vib.}}
+@string{LNM      = {Lecture Notes in Math.}}
+@string{LINALG   = {Linear Algebra and its Applications}}
+@string{MC       = {Math. Comp.}}
+@string{MM       = {Monatsh. Math.}}
+@string{MN       = {Math. Nach.}}
+@string{MS       = {Math. Scand.}}
+@string{NM       = {Numer. Math.}}
+@string{NA       = {Numer. Algor.}}
+@string{OE       = {Opt. Eng.}}
+@string{PJR      = {Philips J. Research}}
+@string{PAMS     = {Proc. Amer. Math. Soc.}}
+@string{PR       = {Phys. Rev.}}
+@string{PRA      = {Phys. Rev. A}}
+@string{PRL      = {Phys. Rev. Lett.}}
+@string{PRP      = {Phys. Rep.}}
+@string{PTP      = {Prog. Theor. Phys.}}
+@string{RMI      = {Rev. Mat. Iberoamericana}}
+
+% Springer
+@string{JGO      = {J. Glob. Optim}}
+@string{JSC      = {J. Sci. Comput.}}
+
+% SIAM
+@string{SIAMAM   = {SIAM J. Appl. Math.}}
+@string{SIAMMA   = {SIAM J. Math. Anal.}}
+@string{SIMAT    = {SIAM J. Matrix Anal. Appl.}}
+@string{SIAMNA   = {SIAM J. Numer. Anal.}}
+@string{SIJSC    = {SIAM J. Sci. Comput.}}
+@STRING{SISC     = {SIAM J. Sci. Comput.}}
+@string{SIJSSC   = {SIAM J. Sci. Statist. Comp.}}
+@string{SIAMR    = {SIAM Rev.}}
+@string{SIAMIS   = {SIAM J. Imaging Sci.}}
+@string{SIAMJO   = {SIAM J. Optim.}}
+@string{SIAMJOTA = {SIAM J. Optim. Theory. Appl.}}
+
+% Diverse
+@string{EUSP       = {Signal Process.}}
+@string{TAMS       = {Trans. Amer. Math. Soc.}}
+@string{CISS       = {Proc. Conf. Information Sciences and Systems}}
+@string{ICA        = {Proc. Workshop on Independent Component Analysis and Signal Separation (ICA)}}
+@STRING{Maxent     = {Proc. Int. Workshop on Bayesian Inference and Maximun Entropy Methods in Science and Engineering (MaxEnt)}}
+@STRING{IJCNNN     = {Proc. Int. Joint Conf. Neural Net. (IJCNN)}}
+@string{EUSIPCO    = {Proc. European Signal Process. Conf. (EUSIPCO)}}
+@string{TU         = {Vienna University of Technology}}
+@string{INTHFT     = {Institute of Communications and Radio-Frequency Engineering, Vienna University of Technology}}
+@STRING{ICML       = {Proc. Int. Conf. Machine Learning ({ICML})}}
+@STRING{AISTATS    = {Proc. Int. Conf. Artificial Intelligence and Statistics (AISTATS)}}
+@STRING{IJRS       = {Int. J. Remote Sens.}}
+@STRING{IP         = {Inv. Prob.}}
+@STRING{JGR        = {J. Geophys. Res.}}
+@STRING{JSPS       = {J. Signal Process. Syst.}}
+@STRING{PNAS       = {Proc. Nat. Acad. Sci. USA}}
+@STRING{RSE        = {Remote Sens. Environment}}
+@STRING{RSR        = {Remote Sensing Rev.}}
+@STRING{JMLR       = {J. Mach. Learning Research}}
+@STRING{BIOMET     = {Biometrika}}
+@STRING{StatComput = {Stat. Comput.}}
+@STRING{NIPS       = {Adv. in Neural Information Processing Systems}}
+@STRING{MNRAS       = {Monthly Notices of the Royal Astronomical Society}}
+@STRING{apj        = {The Astrophysical Journal}}
+
+% Date
+%@string{jan      = {Jan.}}
+%@string{feb      = {Feb.}}
+%@string{mar      = {Mar.}}
+%@string{apr      = {Apr.}}
+%@string{may      = {May}}
+%@string{jun      = {Jun.}}
+%@string{jul      = {Jul.}}
+%@string{aug      = {Aug.}}
+%@string{sep      = {Sep.}}
+%@string{oct      = {Oct.}}
+%@string{nov      = {Nov.}}
+%@string{dec      = {Dec.}}
+
+% Editors
+@string{academic = {Academic Press}}
+@string{addwes   = {Addison Wesley}}
+@string{artech   = {Artech House}}
+@string{dover    = {Dover Publications}}
+@string{elsevier = {Elsevier}}
+@string{kluwer   = {Kluwer}}
+@string{dekker   = {Marcel Dekker}}
+@string{mcgraw   = {McGraw-Hill}}
+@string{pergamon = {Pergamon Press}}
+@string{prent    = {Prentice Hall}}
+@string{spring   = {Springer}}
+@string{wiley    = {Wiley}}
+
+
+%IEEE conferences
+
+@string{IEEPROC           = {IEE Proc. Vision, image and signal processing}}
+@string{IEEEPROC          = {Proc. IEEE}}
+@string{ICASSP            = {Proc. IEEE Int. Conf. Acoust., Speech, and Signal Processing (ICASSP)}}
+@STRING{MLSP              = {Proc. IEEE Workshop Mach. Learning for Signal Process. (MLSP)}}
+@string{ICIP              = {Proc. IEEE Int. Conf. Image Processing (ICIP)}}
+@string{ISBI              = {Proc. IEEE Int. Symp. Biomed. Imaging (ISBI)}}
+@STRING{WHISPERS          = {Proc. IEEE GRSS Workshop Hyperspectral Image Signal Process.: Evolution in Remote Sens. (WHISPERS)}}
+@string{ISCAS             = {Proc. IEEE Int. Symp. Circuits and Systems (ISCAS)}}
+@string{TFTS              = {Proc. IEEE-SP Int. Sympos. Time-Frequency Time-Scale Analysis}}
+@string{SSAP              = {Proc. IEEE-SP Workshop Stat. Signal and Array Processing (SSAP)}}
+@string{SSP               = {Proc. IEEE-SP Workshop Stat. and Signal Processing (SSP)}}
+@string{ASILOMAR          = {Proc. IEEE Asilomar Conf. Signals, Systems, Computers}}
+@STRING{ACSSC             = {Proc. IEEE Asil. Conf. on Sig., Sys., and Comp. (ASSC)}}
+@STRING{IEEEMLSP          = {Proc. IEEE Workshop Mach. Learning for Signal Process. (MLSP)}}
+@STRING{IGARSS            = {Proc. IEEE Int. Conf. Geosci. Remote Sens. (IGARSS)}}
+@STRING{camsap            = {Proc. IEEE Int. Workshop Comput. Adv. Multi-Sensor Adaptive Process. (CAMSAP)}}
+@STRING{sam               = {Proc. IEEE Sensor Array and Multichannel Signal Process. Workshop (SAM)}}
+@STRING{IEEESIPS          = {Proc. IEEE Workshop Signal Process. Syst. (SIPS)}}
+@STRING{IEEEUS            = {Proc. IEEE Ultrason. Symp. (US)}}
+@string{IEEENSSMIC        = {Proc. IEEE Nucl. Sci. Symp. and Med. Imag. Conf. (NSS/MIC)}}
+
+%-----------------------------------------------------------------------%
+%-IEEE Abbreviations for Transactions, Journals, Letters, and Magazines-%
+%-----------------------------------------------------------------------%
+%IEEE Journals
+
+% aerospace and military
+@STRING{IEEE_J_AES        = {IEEE Trans. Aerosp. Electron. Syst.}}
+@STRING{IEEE_J_ANE        = {IEEE Trans. Aerosp. Navig. Electron.}}   %*
+@STRING{IEEE_J_ANNE       = {IEEE Trans. Aeronaut. Navig. Electron.}} %*
+@STRING{IEEE_J_AS         = {IEEE Trans. Aerosp.}}                    %*
+@STRING{IEEE_J_AIRE       = {IEEE Trans. Airborne Electron.}}         %*
+@STRING{IEEE_J_MIL        = {IEEE Trans. Mil. Electron.}}             %*
+
+% autos, transportation and vehicles (non-aerospace)
+@STRING{IEEE_J_ITS        = {IEEE Trans. Intell. Transp. Syst.}}
+@STRING{IEEE_J_VT         = {IEEE Trans. Veh. Technol.}}
+@STRING{IEEE_J_VC         = {IEEE Trans. Veh. Commun.}}
+
+% circuits, signals, systems, audio and controls
+@STRING{IEEE_J_SPL        = {IEEE Signal Process. Lett.}}
+@STRING{IEEE_J_ASSP       = {IEEE Trans. Acoust., Speech, Signal Process.}}
+@STRING{IEEE_J_AU         = {IEEE Trans. Audio}}
+@STRING{IEEE_J_AUEA       = {IEEE Trans. Audio Electroacoust.}}
+@string{IEEE_J_ASLP       = {IEEE/ACM Trans. Audio, Speech, Language Process.}}
+@STRING{IEEE_J_AC         = {IEEE Trans. Autom. Control}}
+@STRING{IEEE_J_CAS        = {IEEE Trans. Circuits Syst.}}
+@STRING{IEEE_J_CASVT      = {IEEE Trans. Circuits Syst. Video Technol.}}
+@STRING{IEEE_J_CASI       = {IEEE Trans. Circuits Syst. I}}
+@STRING{IEEE_J_CASII      = {IEEE Trans. Circuits Syst. II}}
+% in 2004 CASI and CASII renamed part title to CASI_RP and CASII_EB, respectively.
+@STRING{IEEE_J_CASI_RP    = {IEEE Trans. Circuits Syst. I}}
+@STRING{IEEE_J_CASII_EB   = {IEEE Trans. Circuits Syst. II}}
+@STRING{IEEE_J_CT         = {IEEE Trans. Circuit Theory}}
+@STRING{IEEE_J_CST        = {IEEE Trans. Control Syst. Technol.}}
+@STRING{IEEE_J_SP         = {IEEE Trans. Signal Process.}}
+@STRING{IEEE_J_SU         = {IEEE Trans. Sonics Ultrason.}}
+@STRING{IEEE_J_SAP        = {IEEE Trans. Speech Audio Process.}}
+@STRING{IEEE_J_STSP       = {IEEE J. Sel. Topics Signal Process.}}
+@STRING{IEEE_J_SYST       = {IEEE Syst. J.}}
+@STRING{IEEE_J_UE         = {IEEE Trans. Ultrason. Eng.}}
+@STRING{IEEE_J_UFFC       = {IEEE Trans. Ultrason., Ferroelectr., Freq. Control}}
+
+% communications
+@STRING{IEEE_J_COML       = {IEEE Commun. Lett.}}
+@STRING{IEEE_J_JSAC       = {IEEE J. Sel. Areas Commun.}}
+@STRING{IEEE_J_COM        = {IEEE Trans. Commun.}}
+@STRING{IEEE_J_COMT       = {IEEE Trans. Commun. Technol.}}
+@STRING{IEEE_J_WCOM       = {IEEE Trans. Wireless Commun.}}
+
+% components, packaging and manufacturing
+@STRING{IEEE_J_ADVP       = {IEEE Trans. Adv. Packag.}}
+@STRING{IEEE_J_CHMT       = {IEEE Trans. Compon., Hybrids, Manuf. Technol.}}
+@STRING{IEEE_J_CPMTA      = {IEEE Trans. Compon., Packag., Manuf. Technol. A}}
+@STRING{IEEE_J_CPMTB      = {IEEE Trans. Compon., Packag., Manuf. Technol. B}}
+@STRING{IEEE_J_CPMTC      = {IEEE Trans. Compon., Packag., Manuf. Technol. C}}
+@STRING{IEEE_J_CAPT       = {IEEE Trans. Compon. Packag. Technol.}}
+@STRING{IEEE_J_CAPTS      = {IEEE Trans. Compon. Packag. Technol.}}
+@STRING{IEEE_J_CPART      = {IEEE Trans. Compon. Parts}}
+@STRING{IEEE_J_EPM        = {IEEE Trans. Electron. Packag. Manuf.}}
+@STRING{IEEE_J_MFT        = {IEEE Trans. Manuf. Technol.}}
+@STRING{IEEE_J_PHP        = {IEEE Trans. Parts, Hybrids, Packag.}}
+@STRING{IEEE_J_PMP        = {IEEE Trans. Parts, Mater., Packag.}}
+
+% CAD
+@STRING{IEEE_J_TCAD       = {IEEE J. Technol. Comput. Aided Design}}
+@STRING{IEEE_J_CAD        = {IEEE Trans. Comput.-Aided Design Integr. Circuits Syst.}}
+
+% coding, data, information, knowledge
+@STRING{IEEE_J_IT         = {IEEE Trans. Inf. Theory}}
+@STRING{IEEE_J_KDE        = {IEEE Trans. Knowl. Data Eng.}}
+
+% computers, computation, networking and software
+@STRING{IEEE_J_C          = {IEEE Trans. Comput.}}
+@STRING{IEEE_J_CAL        = {IEEE Comput. Archit. Lett.}}
+@STRING{IEEE_J_DSC        = {IEEE Trans. Dependable Secure Comput.}}
+@STRING{IEEE_J_ECOMP      = {IEEE Trans. Electron. Comput.}}
+@STRING{IEEE_J_EVC        = {IEEE Trans. Evol. Comput.}}
+@STRING{IEEE_J_FUZZ       = {IEEE Trans. Fuzzy Syst.}}
+@STRING{IEEE_J_IFS        = {IEEE Trans. Inf. Forensics Security}}
+@STRING{IEEE_J_MC         = {IEEE Trans. Mobile Comput.}}
+@STRING{IEEE_J_NET        = {IEEE/ACM Trans. Netw.}}
+@STRING{IEEE_J_NN         = {IEEE Trans. Neural Netw.}}
+@STRING{IEEE_J_NNLS       = {IEEE Trans. Netw. Learn. Syst.}}
+@STRING{IEEE_J_PDS        = {IEEE Trans. Parallel Distrib. Syst.}}
+@STRING{IEEE_J_SIPN        = {IEEE Trans. Signal Inf. Process. Netw.}}
+
+@STRING{IEEE_J_SE         = {IEEE Trans. Softw. Eng.}}
+
+% computer graphics, imaging, and multimedia
+@STRING{IEEE_J_JDT        = {IEEE/OSA J. Display Technol.}}
+@STRING{IEEE_J_IP         = {IEEE Trans. Image Process.}}
+@STRING{IEEE_J_CI         = {IEEE Trans. Comput. Imag.}} % temporary : to be updated
+@STRING{IEEE_J_MM         = {IEEE Trans. Multimedia}}
+@STRING{IEEE_J_VCG        = {IEEE Trans. Vis. Comput. Graphics}}
+
+% cybernetics, ergonomics, robots, man-machine, and automation
+@STRING{IEEE_J_ASE        = {IEEE Trans. Autom. Sci. Eng.}}
+@STRING{IEEE_J_JRA        = {IEEE J. Robot. Autom.}}
+@STRING{IEEE_J_H          = {IEEE Trans. Haptics}}
+@STRING{IEEE_J_HFE        = {IEEE Trans. Hum. Factors Electron.}}
+@STRING{IEEE_J_MMS        = {IEEE Trans. Man-Mach. Syst.}}
+@STRING{IEEE_J_PAMI       = {IEEE Trans. Pattern Anal. Mach. Intell.}}
+%in 1989 JRA became RA
+%in August 2004, RA split into ASE and RO
+@STRING{IEEE_J_RA         = {IEEE Trans. Robot. Autom.}}
+@STRING{IEEE_J_RO         = {IEEE Trans. Robot.}}
+@STRING{IEEE_J_SMC        = {IEEE Trans. Syst., Man, Cybern.}}
+@STRING{IEEE_J_SMCA       = {IEEE Trans. Syst., Man, Cybern. A}}
+@STRING{IEEE_J_SMCB       = {IEEE Trans. Syst., Man, Cybern. B}}
+@STRING{IEEE_J_SMCC       = {IEEE Trans. Syst., Man, Cybern. C}}
+@STRING{IEEE_J_SSC        = {IEEE Trans. Syst. Sci. Cybern.}}
+
+% earth, wind, fire and water
+@STRING{IEEE_J_GE         = {IEEE Trans. Geosci. Electron.}}
+@STRING{IEEE_J_GRS        = {IEEE Trans. Geosci. Remote Sens.}}
+@STRING{IEEE_J_GRSL       = {IEEE Geosci. Remote Sens. Lett.}}
+@STRING{IEEE_J_OE         = {IEEE J. Ocean. Eng.}}
+@STRING{IEEE_J_STARS      = {IEEE J. Sel. Topics Appl. Earth Observ. in Remote Sens.}}
+
+% education, engineering, history, IEEE, professional
+@STRING{IEEE_J_CJECE      = {Canadian J. Elect. Comput. Eng.}}
+@STRING{IEEE_J_PROC       = {Proc. IEEE}}
+@STRING{IEEE_J_EDU        = {IEEE Trans. Educ.}}
+@STRING{IEEE_J_EM         = {IEEE Trans. Eng. Manag.}}
+@STRING{IEEE_J_EWS        = {IEEE Trans. Eng. Writing Speech}}
+@STRING{IEEE_J_PC         = {IEEE Trans. Prof. Commun.}}
+
+% electromagnetics, antennas, EMI, magnetics and microwave
+@STRING{IEEE_J_AWPL       = {IEEE Antennas Wireless Propag. Lett.}}
+@STRING{IEEE_J_MGWL       = {IEEE Microw. Guided Wave Lett.}}
+%IEEE seems to want Compon. here, not Comp.
+@STRING{IEEE_J_MWCL       = {IEEE Microw. Wireless Compon. Lett.}}
+@STRING{IEEE_J_AP         = {IEEE Trans. Antennas Propag.}}
+@STRING{IEEE_J_EMC        = {IEEE Trans. Electromagn. Compat.}}
+@STRING{IEEE_J_MAG        = {IEEE Trans. Magn.}}
+@STRING{IEEE_J_MTT        = {IEEE Trans. Microw. Theory Tech.}}
+@STRING{IEEE_J_RFI        = {IEEE Trans. Radio Freq. Interference}}
+@STRING{IEEE_J_TJMJ       = {IEEE Transl. J. Magn. Jpn.}}
+
+% energy and power
+@STRING{IEEE_J_EC         = {IEEE Trans. Energy Convers.}}
+@STRING{IEEE_J_PEL        = {IEEE Power Electron. Lett.}}
+@STRING{IEEE_J_PWRAS      = {IEEE Trans. Power App. Syst.}}
+@STRING{IEEE_J_PWRD       = {IEEE Trans. Power Del.}}
+@STRING{IEEE_J_PWRE       = {IEEE Trans. Power Electron.}}
+@STRING{IEEE_J_PWRS       = {IEEE Trans. Power Syst.}}
+
+% industrial, commercial and consumer
+@STRING{IEEE_J_APPIND     = {IEEE Trans. Appl. Ind.}}
+@STRING{IEEE_J_BC         = {IEEE Trans. Broadcast.}}
+@STRING{IEEE_J_BCTV       = {IEEE Trans. Broadcast Television Receivers}}
+@STRING{IEEE_J_CE         = {IEEE Trans. Consum. Electron.}}
+@STRING{IEEE_J_IE         = {IEEE Trans. Ind. Electron.}}
+@STRING{IEEE_J_IECI       = {IEEE Trans. Ind. Electron. Contr. Instrum.}}
+@STRING{IEEE_J_IA         = {IEEE Trans. Ind. Appl.}}
+@STRING{IEEE_J_IGA        = {IEEE Trans. Ind. Gen. Appl.}}
+@STRING{IEEE_J_IINF       = {IEEE Trans. Ind. Informat.}}}
+@STRING{IEEE_J_PSE        = {IEEE J. Product Safety Eng.}}
+
+% instrumentation and measurement
+@STRING{IEEE_J_IM         = {IEEE Trans. Instrum. Meas.}}
+
+% insulation and materials
+@STRING{IEEE_J_JEM        = {IEEE/TMS J. Electron. Mater.}}
+@STRING{IEEE_J_DEI        = {IEEE Trans. Dielectr. Electr. Insul.}}
+@STRING{IEEE_J_EI         = {IEEE Trans. Electr. Insul.}}
+
+% mechanical
+@STRING{IEEE_J_MECH       = {IEEE/ASME Trans. Mechatronics}}
+@STRING{IEEE_J_MEMS       = {J. Microelectromech. Syst.}}
+
+% medical and biological
+@STRING{IEEE_J_BCAS       = {IEEE Trans. Biomed. Circuits Syst.}}
+@STRING{IEEE_J_BME        = {IEEE Trans. Biomed. Eng.}}
+% Note: The B-ME journal later dropped the hyphen and became the BME.
+@STRING{IEEE_J_B-ME       = {IEEE Trans. Bio-Med. Eng.}}
+@STRING{IEEE_J_BMELC      = {IEEE Trans. Bio-Med. Electron.}}
+@STRING{IEEE_J_CBB        = {IEEE/ACM Trans. Comput. Biol. Bioinformatics}}
+@STRING{IEEE_J_ITBM       = {IEEE Trans. Inf. Technol. Biomed.}}
+@STRING{IEEE_J_ME         = {IEEE Trans. Med. Electron.}}
+@STRING{IEEE_J_MI         = {IEEE Trans. Med. Imag.}}
+@STRING{IEEE_J_NB         = {IEEE Trans. Nanobiosci.}}
+@STRING{IEEE_J_NSRE       = {IEEE Trans. Neural Syst. Rehabil. Eng.}}
+@STRING{IEEE_J_RBME       = {IEEE Rev. Biomed. Eng.}}
+@STRING{IEEE_J_RE         = {IEEE Trans. Rehabil. Eng.}}
+
+% optics, lightwave and photonics
+@STRING{IEEE_J_PTL        = {IEEE Photon. Technol. Lett.}}
+@STRING{IEEE_J_JLT        = {J. Lightw. Technol.}}
+
+% physics, electrons, nanotechnology, nuclear and quantum electronics
+@STRING{IEEE_J_EDL        = {IEEE Electron Device Lett.}}
+@STRING{IEEE_J_JQE        = {IEEE J. Quantum Electron.}}
+@STRING{IEEE_J_JSTQE      = {IEEE J. Sel. Topics Quantum Electron.}}
+@STRING{IEEE_J_ED         = {IEEE Trans. Electron Devices}}
+@STRING{IEEE_J_NANO       = {IEEE Trans. Nanotechnol.}}
+@STRING{IEEE_J_NS         = {IEEE Trans. Nucl. Sci.}}
+@STRING{IEEE_J_PS         = {IEEE Trans. Plasma Sci.}}
+
+% reliability
+%IEEE seems to want Mat. here, not Mater.
+@STRING{IEEE_J_DMR        = {IEEE Trans. Device Mater. Rel.}}
+@STRING{IEEE_J_R          = {IEEE Trans. Rel.}}
+
+% semiconductors, superconductors, electrochemical and solid state
+@STRING{IEEE_J_ESSL       = {IEEE/ECS Electrochem. Solid-State Lett.}}
+@STRING{IEEE_J_JSSC       = {IEEE J. Solid-State Circuits}}
+@STRING{IEEE_J_ASC        = {IEEE Trans. Appl. Supercond.}}
+@STRING{IEEE_J_SM         = {IEEE Trans. Semicond. Manuf.}}
+
+% sensors
+@STRING{IEEE_J_SENSOR     = {IEEE Sensors J.}}
+
+% VLSI
+@STRING{IEEE_J_VLSI       = {IEEE Trans. {VLSI} Syst.}}
+
+
+% IEEE Magazines
+
+@STRING{IEEE_M_AES        = {IEEE Aerosp. Electron. Syst. Mag.}}
+@STRING{IEEE_M_HIST       = {IEEE Ann. Hist. Comput.}}
+@STRING{IEEE_M_AP         = {IEEE Antennas Propag. Mag.}}
+@STRING{IEEE_M_ASSP       = {IEEE {ASSP} Mag.}}
+@STRING{IEEE_M_CD         = {IEEE Circuits Devices Mag.}}
+@STRING{IEEE_M_CAS        = {IEEE Circuits Syst. Mag.}}
+@STRING{IEEE_M_COM        = {IEEE Commun. Mag.}}
+@STRING{IEEE_M_COMSOC     = {IEEE Commun. Soc. Mag.}}
+@STRING{IEEE_M_CIM        = {IEEE Comput. Intell. Mag.}}
+%CSEM changed to CSE in 1999
+@STRING{IEEE_M_CSE        = {IEEE Comput. Sci. Eng.}}
+@STRING{IEEE_M_CSEM       = {IEEE Comput. Sci. Eng. Mag.}} %!
+@STRING{IEEE_M_CAP        = {IEEE Comput. Appl. Power}}
+@STRING{IEEE_M_CGA        = {IEEE Comput. Graph. Appl. Mag.}}
+@STRING{IEEE_M_CONC       = {IEEE Concurrency}}
+@STRING{IEEE_M_CEM        = {IEEE Consum. Electron. Mag.}}
+@STRING{IEEE_M_CS         = {IEEE Control Syst. Mag.}}
+@STRING{IEEE_M_DT         = {IEEE Des. Test.}}
+@STRING{IEEE_M_DTC        = {IEEE Des. Test. Comput.}}
+@STRING{IEEE_M_EI         = {IEEE Electr. Insul. Mag.}}
+@STRING{IEEE_M_ECM        = {IEEE Electrmagn. Compat.}}
+@STRING{IEEE_M_EM         = {IEEE Electrific. Mag.}}
+@STRING{IEEE_M_ETR        = {IEEE ElectroTechnol. Rev.}}
+@STRING{IEEE_M_EMB        = {IEEE Eng. Med. Biol. Mag.}}
+@STRING{IEEE_M_EMR        = {IEEE Eng. Manag. Rev.}}
+@STRING{IEEE_M_EXP        = {IEEE Expert}}
+@STRING{IEEE_M_GRSM       = {IEEE Geosci. Remote Sens. Mag.}}
+@STRING{IEEE_M_IA         = {IEEE Ind. Appl. Mag.}}
+@STRING{IEEE_M_IE         = {IEEE Ind. Electron. Mag.}}
+@STRING{IEEE_M_IM         = {IEEE Instrum. Meas. Mag.}}
+@STRING{IEEE_M_IS         = {IEEE Intell. Syst.}}
+@STRING{IEEE_M_ITS        = {IEEE Intell. Transp. Syst. Mag.}}
+@STRING{IEEE_M_IC         = {IEEE Internet Comput.}}
+@STRING{IEEE_M_ITP        = {IEEE {IT} Prof.}}
+@STRING{IEEE_M_MICRO      = {IEEE Micro}}
+@STRING{IEEE_M_MW         = {IEEE Microw. Mag.}}
+@STRING{IEEE_M_MM         = {IEEE Multimedia Mag.}}
+@STRING{IEEE_M_NANO       = {IEEE Nanotechnol. Mag.}}
+@STRING{IEEE_M_NET        = {IEEE Netw.}}
+%IEEE's editorial manual lists Pers. Commun., but Personal Commun. Mag. seems to be what is used in the journals
+%@STRING{IEEE_M_PCOM       = {IEEE Personal Commun. Mag.}}
+@STRING{IEEE_M_PCOM       = {IEEE Pers. Commun.}}
+@STRING{IEEE_M_POT        = {IEEE Potentials}}
+% CAP and PER merged to form PE in 2003
+@STRING{IEEE_M_PEl        = {IEEE Power Electron. Mag.}}
+@STRING{IEEE_M_PE         = {IEEE Power Energy Mag.}}
+@STRING{IEEE_M_PER        = {IEEE Power Eng. Rev.}}
+@STRING{IEEE_M_Pu         = {IEEE Pulse}}
+@STRING{IEEE_M_RA         = {IEEE Robot. Autom. Mag.}}
+@STRING{IEEE_M_SP         = {IEEE Signal Process. Mag.}}
+@STRING{IEEE_M_SSCM       = {IEEE Solid State Circuits Mag.}}
+@STRING{IEEE_M_SAP        = {IEEE Security Privacy}}
+@STRING{IEEE_M_S          = {IEEE Softw.}}
+@STRING{IEEE_M_SPECT      = {IEEE Spectr.}}
+@STRING{IEEE_M_TS         = {IEEE Technol. Soc. Mag.}}
+@STRING{IEEE_M_VT         = {IEEE Veh. Technol. Mag.}}
+@STRING{IEEE_M_C          = {Computer}}
+@STRING{IEEE_M_PVC        = {Pervasive Comput.}}
+@STRING{IEEE_M_TODAY      = {Today's Engineer}}
+@STRING{IEEE_M_WC         = {Wireless Commun.}}
+
+
+% IEEE Online Publications
+
+@STRING{IEEE_O_CSTO        = {IEEE Commun. Surveys Tuts.}}
+@STRING{IEEE_O_DSO         = {IEEE Distrib. Syst. Online}}
diff --git a/environment.yml b/environment.yml
new file mode 100644
index 0000000000000000000000000000000000000000..7d408e75c5cb6610bfd10484831ad73ec1ff9e2e
--- /dev/null
+++ b/environment.yml
@@ -0,0 +1,51 @@
+name: jcgs_demo
+channels:
+  - defaults
+  - conda-forge
+  - anaconda
+dependencies:
+  - alabaster=0.7.12
+  - coverage=5.5
+  - enlighten=1.10.1
+  - flake8=3.9.2
+  - h5py[version='>=2.9',build=mpi*]
+  - hdf5=1.10.5
+  - hdf5plugin=2.3.0
+  - imageio=2.9.0
+  - intel-openmp=2021.4.0
+  - ipykernel=6.9.1
+  - ipython=8.3.0
+  - isort=5.9.3
+  - jupyterlab=3.1.7
+  - mkl=2021.2.0
+  - mpi4py=3.0.3
+  - nbconvert=6.1.0
+  - numba=0.53.1
+  - numcodecs=0.8.0
+  - numpy=1.20.1
+  - opt_einsum=3.3.0
+  - pandas=1.3.3
+  - pip=21.2.4
+  - pre-commit=2.15.0
+  - pytables=3.6.1
+  - pytest=6.2.4
+  - python=3.8.13
+  - pywavelets=1.3.0
+  - scikit-image=0.19.2
+  - scikit-learn=0.24.2
+  - scipy=1.6.2
+  - seaborn=0.11.2
+  - snakeviz=2.0.1
+  - sphinx=4.2.0
+  - sphinx_rtd_theme=0.4.3
+  - tqdm=4.62.2
+  - zarr=2.8.1
+  - pip:
+    - click==7.1.2
+    - docstr-coverage==2.1.1
+    - flake8-polyfill==1.0.2
+    - genbadge==1.0.6
+    - radon==4.0.0
+    - sphinx-autoapi==1.8.4
+    - wily==1.20.0
+prefix: /Users/pthouvenin/anaconda3/envs/jcgs_demo
diff --git a/examples/deconvolution/job.slurm b/examples/deconvolution/job.slurm
new file mode 100644
index 0000000000000000000000000000000000000000..0c83375966170385b625a5ab214108f309f4acf9
--- /dev/null
+++ b/examples/deconvolution/job.slurm
@@ -0,0 +1,54 @@
+#!/bin/bash
+#SBATCH --nodes=1
+## SBATCH --ntasks-per-node=1
+#SBATCH --cpus-per-task=1
+## SBATCH --time=24:00:00
+## SBATCH --job-name=sampling
+## SBATCH --mem=10G
+
+## SBATCH --ntasks= ...  # total number of tasks (if not compatible with ntasks-per-node, assigns cores as evenly as possible between workers)
+
+#SBATCH --mail-type=ALL  # email notifications about the job: start, end, ...
+#SBATCH --mail-user=usermail
+
+## SBATCH -o /home/username/python/dspa/logs/sampling.out.%A.%N.log
+## SBATCH -e /home/username/python/dspa/logs/sampling.err.%A.%N.log
+
+# module load python/3.5.1
+# source astro_env/bin/activate
+# deactivate
+
+module purge  # remove all previously loaded modules
+module load anaconda3/2020.02
+# conda create --name demo-jcgs --file requirement.txt
+
+# * The following is needed to activate conda environment in bash
+# https://github.com/conda/conda/issues/7980
+eval "$(conda shell.bash hook)"
+conda activate demo-jcgs
+
+# mkdir results
+# mkdir img
+
+mpiexec -n ${numprocs} python -m mpi4py ../../src/aaxda/main_s.py \
+--Nmc=${Nmc} \
+--rho=${rho} \
+--alpha=${alpha} \
+--beta=${beta} \
+--M=${M} \
+--kernel_size=${ksize} \
+--checkpointfile=${checkpointfile} \
+--checkpoint_frequency=${checkpointfrequency} \
+--save=${save} \
+--checkpointname=${checkpointname} \
+--datafilename=${datafilename} \
+--dpath=${dpath} \
+--imfile=${imfile} \
+--logfile=${logfile} \
+--rpath=${rpath} \
+--verbose \
+--downsampling=${downsampling} \
+--restart=${restart} \
+${dataflag} ${loadflag}
+
+conda deactivate
diff --git a/examples/deconvolution/job_serial.slurm b/examples/deconvolution/job_serial.slurm
new file mode 100644
index 0000000000000000000000000000000000000000..4a54852960a363f0bef6ee42d57279d88491e5f2
--- /dev/null
+++ b/examples/deconvolution/job_serial.slurm
@@ -0,0 +1,52 @@
+#!/bin/bash
+#SBATCH --nodes=1
+## SBATCH --ntasks-per-node=1
+#SBATCH --cpus-per-task=1
+## SBATCH --time=02:00:00
+## SBATCH --job-name=sampling
+## SBATCH --mem=10G
+
+## SBATCH --ntasks= ...  # total number of tasks (if not compatible with ntasks-per-node, assigns cores as evenly as possible between workers)
+
+#SBATCH --mail-type=ALL  # email notifications about the job: start, end, ...
+#SBATCH --mail-user=usermail
+
+## SBATCH -o /home/username/python/dspa/logs/sampling.out.%A.%N.log
+## SBATCH -e /home/username/python/dspa/logs/sampling.err.%A.%N.log
+
+# module load python/3.5.1
+# source astro_env/bin/activate
+# deactivate
+
+module purge  # remove all previously loaded modules
+module load anaconda3/2020.02
+# conda create --name demo-jcgs --file requirement.txt
+
+# * The following is needed to activate conda environment in bash
+# https://github.com/conda/conda/issues/7980
+eval "$(conda shell.bash hook)"
+conda activate demo-jcgs
+
+python ../../src/aaxda/main_serial_s.py \
+--Nmc=${Nmc} \
+--rho=${rho} \
+--alpha=${alpha} \
+--beta=${beta} \
+--M=${M} \
+--kernel_size=${ksize} \
+--checkpointfile=${checkpointfile} \
+--checkpoint_frequency=${checkpointfrequency} \
+--checkpointname=${checkpointname} \
+--extension=${fileextension} \
+--datafilename=${datafilename} \
+--dpath=${dpath} \
+--imfile=${imfile} \
+--logfile=${logfile} \
+--rpath=${rpath} \
+--sampler=${sampler} \
+--verbose \
+--downsampling=${downsampling} \
+--restart=${restart} \
+${dataflag} ${loadflag}
+
+conda deactivate
diff --git a/examples/deconvolution/submit_local.sh b/examples/deconvolution/submit_local.sh
new file mode 100644
index 0000000000000000000000000000000000000000..89fd2365dbe9648ff59b6df51a9b96e48befbd30
--- /dev/null
+++ b/examples/deconvolution/submit_local.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+# Run the distributed version of the proposed sampler
+
+# * Parameters of the run
+# TODO: to be configured by the user
+
+imroot='peppers'              # root of the reference image name (see img/)
+imfile=../../img/${imroot}.h5 # full name of the file, with file extension
+numprocs=16             # number of MPI processes used (not active here)
+dataflag=''             # '--data' if generating synthetic data, '' otherwise
+loadflag=''             # '--load' if loading data from disk, '' otherwise
+restart=-1             # iteration from which restarting the sampler (-1 if not)
+M=30                    # maximum intensity level for the ground truth (int)
+ksize=7                 # vertical size of the square convolution kernel
+Nmc=5000                # number of Monte-Carlo iterations
+checkpointfrequency=500 # frequency at wich a checkpoint is saved to disk
+rho=1                   # splitting parameter
+alpha=1                 # augmentation parameter
+beta=1                  # regularization parameter (TV prior)
+downsampling=1          # downsampling factor to create dataset from ref. img
+fileextension='h5'      # file extension for the checkpoints produced
+seed=1234               # seed to intialize the random number generator
+save="process"          # checkpoiting mode: one per process
+session_name=${imroot}_M${M}_ks${ksize}_n${numprocs}  # name of the tmux session
+
+
+# * Name of checkpoints and paths (checkpoint, logfiles, ...)
+checkpointfile=checkpoint_n=${numprocs}_
+checkpointname=checkpoint_n=${numprocs}_
+dpath=data
+datafilename=data_${imroot}_ds${downsampling}_M${M}_ks${ksize}
+logfile=std_${imroot}_ds${downsampling}_M${M}_ks${ksize}_s${seed}_${Nmc}_r${restart}.log
+rpath=results/results_conv_${imroot}_ds${downsampling}_M${M}_ks${ksize}_s${seed}_n=${numprocs}_${fileextension}
+logpath=$rpath/logs
+logfile=${logpath}/$logfile
+
+mkdir -p $logpath
+
+
+# * TMUX session
+# ! DO NOT MODIFY
+
+tmux new -d -s ${session_name} # create detached sessions
+
+# the following is needed to activate conda environment in bash
+# https://github.com/conda/conda/issues/7980
+# tmux send-keys -t ${session_name}.0 "eval \"$(conda shell.bash hook)\"" ENTER
+tmux send-keys -t ${session_name}.0 "conda activate demo-jcgs" ENTER
+
+# pass mpi command
+tmux send-keys -t ${session_name}.0 \
+"mpiexec -n ${numprocs} python -m mpi4py ../../src/aaxda/main_s.py \
+--M=${M} --Nmc=${Nmc} --kernel_size=${ksize} --rho=${rho} --alpha=${alpha} --beta=${beta} \
+--checkpointfile=${checkpointfile} \
+--checkpoint_frequency=${checkpointfrequency} \
+--save=${save} \
+--checkpointname=${checkpointname} \
+--datafilename=${datafilename} \
+--seed=${seed} \
+--dpath=${dpath} --imfile=${imfile} --logfile=${logfile} --rpath=${rpath} \
+--verbose --downsampling=${downsampling} --restart=${restart} ${dataflag} ${loadflag}" \
+ENTER # execute command in detached session
+# tmux attach-session -t ${session_name}
+
+# * clean up and kill tmux session once the work is done (description of the run available in the .txt log file)
+# https://unix.stackexchange.com/questions/635875/kill-tmux-session-after-script-is-done
+# tmux send-keys -t ${session_name}.0 "exec rm $mfile" ENTER
diff --git a/examples/deconvolution/submit_local_serial.sh b/examples/deconvolution/submit_local_serial.sh
new file mode 100644
index 0000000000000000000000000000000000000000..2ef0c19dc3511633e1696eaab2a1b55e5968dc90
--- /dev/null
+++ b/examples/deconvolution/submit_local_serial.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# Run the serial samplers in the background using tmux
+
+# * Parameters of the run
+# TODO: to be configured by the user
+
+imroot='peppers'              # root of the reference image name (see img/)
+imfile=../../img/${imroot}.h5 # full name of the file, with file extension
+sampler=psgla           # sampler selected ('pmyula' or 'psgla')
+dataflag=''             # '--data' if generating synthetic data, '' otherwise
+loadflag=''             # '--load' if loading checkpoint from disk, '' otherwise
+restart=-1              # iteration index indentifying the warmstart checkpoint
+M=30                    # maximum intensity level for the ground truth (int)
+ksize=7                 # vertical size of the square convolution kernel
+Nmc=5000                # number of Monte-Carlo iterations
+checkpointfrequency=500 # frequency at wich a checkpoint is saved to disk
+rho=1                   # splitting parameter
+alpha=1                 # augmentation parameter
+beta=1                  # regularization parameter (TV prior)
+downsampling=1          # downsampling factor to create dataset from ref. img
+fileextension='h5'      # file extension for the checkpoints produced
+seed=1234               # seed to intialize the random number generator
+session_name=${sampler}_${imroot}_M${M}_ks${ksize}
+
+
+# * Name of checkpoints and paths (checkpoint, logfiles, ...)
+checkpointfile=checkpoint_t=${restart}
+checkpointname=checkpoint_t=
+datafilename=data_${imroot}_ds${downsampling}_M${M}_ks${ksize}
+dpath=data
+logfile=std_serial_${imroot}_ds${downsampling}_M${M}_ks${ksize}_s${seed}_${Nmc}_r${restart}.log
+rpath=results/results_conv_${imroot}_ds${downsampling}_M${M}_ks${ksize}_s${seed}_${fileextension}/${sampler}
+logpath=$rpath/logs
+logfile=${logpath}/$logfile
+
+mkdir -p $logpath
+
+
+# * TMUX session
+# ! DO NOT MODIFY
+
+tmux new -d -s ${session_name} # create detached sessions
+
+# the following is needed to activate conda environment in bash
+# https://github.com/conda/conda/issues/7980
+# tmux send-keys -t ${session_name}.0 "eval \"$(conda shell.bash hook)\"" ENTER
+tmux send-keys -t ${session_name}.0 "conda activate demo-jcgs" ENTER
+
+# pass mpi command
+tmux send-keys -t ${session_name}.0 \
+"python ../../src/aaxda/main_serial.py \
+--M=${M} --Nmc=${Nmc} --kernel_size=${ksize} --rho=${rho} --alpha=${alpha} --beta=${beta} \
+--checkpointfile=${checkpointfile} \
+--checkpoint_frequency=${checkpointfrequency} \
+--checkpointname=${checkpointname} \
+--extension=${fileextension} \
+--datafilename=${datafilename} \
+--dpath=${dpath} --imfile=${imfile} --logfile=${logfile} --rpath=${rpath} \
+--sampler=${sampler} \
+--seed=${seed} \
+--verbose --downsampling=${downsampling} --restart=${restart} ${dataflag} ${loadflag}" \
+ENTER # execute command in detached session
+# tmux attach-session -t ${session_name}
diff --git a/examples/deconvolution/submit_metric.sh b/examples/deconvolution/submit_metric.sh
new file mode 100644
index 0000000000000000000000000000000000000000..edafed104758d6688f503ded9ec12d3a62e76f6c
--- /dev/null
+++ b/examples/deconvolution/submit_metric.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# Extract metrics for the serial sampler using the different checkpoint
+# files produced
+
+# ! Note that some fields need to be aligned with those used for the
+# ! correponding run in submit_local_serial.sh
+
+# ! Results are displayed in the log file produced by the run, and saved in a
+# ! .h5 file
+
+# * Parameters of the run
+# TODO: to be configured by the user
+
+imroot='peppers'        # root of the reference image name (see img/)
+sampler=psgla           # sampler used ('pmyula' or 'psgla')
+numprocs=1              # number of MPI processes used in the run
+M=30                    # maximum intensity level for the ground truth (int)
+ksize=7                 # vertical size of the square convolution kernel
+Nmc=5000                # number of Monte-Carlo iterations
+checkpointfrequency=500 # frequency at wich a checkpoint is saved to disk
+Nbi=2000                # number of burnin samples (needs to be a multiple of
+downsampling=1          # downsampling factor to create dataset from ref. img
+fileextension='h5'      # file extension for the checkpoints produced
+seed=1234               # seed used to intialize the random number generator in
+session_name='metric'   # name of the tmux session
+
+
+# * Name of checkpoints and paths (checkpoint, logfiles, ...)
+checkpointname=checkpoint_t=
+dpath=data
+datafilename=data_${imroot}_ds${downsampling}_M${M}_ks${ksize}
+logfile=metrics_${imroot}_ds${downsampling}_M${M}_ks${ksize}_s${seed}_${Nmc}_nbi=${Nbi}.log
+rpath=results/results_conv_${imroot}_ds${downsampling}_M${M}_ks${ksize}_s${seed}_${fileextension}/${sampler}
+
+rfile=final_results_${imroot}_ds${downsampling}_M${M}_ks${ksize}_s${seed}_${Nmc}_nbi=${Nbi}
+logpath=$rpath/logs
+logfile=${logpath}/$logfile
+
+mkdir -p $logpath
+
+
+# * TMUX session
+# ! DO NOT MODIFY
+
+tmux new -d -s ${session_name} # create detached sessions
+
+# the following is needed to activate conda environment in bash
+# https://github.com/conda/conda/issues/7980
+# tmux send-keys -t ${session_name}.0 "eval \"$(conda shell.bash hook)\"" ENTER
+tmux send-keys -t ${session_name}.0 "conda activate demo-jcgs" ENTER
+
+# pass mpi command
+tmux send-keys -t ${session_name}.0 \
+"mpiexec -n ${numprocs} python -m mpi4py ../../src/aaxda/main_metrics.py \
+--Nmc=${Nmc} --Nbi=${Nbi} \
+--checkpoint_frequency=${checkpointfrequency} \
+--checkpointname=${checkpointname} \
+--datafilename=${datafilename} \
+--dpath=${dpath} --logfile=${logfile} \
+--rpath=${rpath} --rfile=${rfile} \
+--downsampling=${downsampling}" \
+ENTER  # execute command in detached session
+# tmux attach-session -t ${session_name}
diff --git a/examples/deconvolution/submit_metric_p.sh b/examples/deconvolution/submit_metric_p.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e4d0b7f633a76da0dbdc551d6bfbfb2edcad6950
--- /dev/null
+++ b/examples/deconvolution/submit_metric_p.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+# Extract metrics for the distributed sampler using the different checkpoint
+# files produced
+
+# ! Note that some fields need to be aligned with those used for the
+# ! correponding run in submit_local.sh
+
+# ! Results are displayed in the log file produced by the run, and saved in a
+# ! .h5 file
+
+# * Parameters of the run
+# TODO: to be configured by the user
+
+imroot='peppers'        # root of the reference image name (see img/)
+numprocs=16             # number of MPI processes used in the run
+M=30                    # maximum intensity level for the ground truth (int)
+ksize=7                 # vertical size of the square convolution kernel
+Nmc=5000                # number of Monte-Carlo iterations
+checkpointfrequency=500 # frequency at wich a checkpoint is saved to disk
+Nbi=2000                # number of burnin samples (needs to be a multiple of checkpointfrequency)
+downsampling=1          # downsampling factor to create dataset from ref. img
+fileextension='h5'      # file extension for the checkpoints produced
+seed=1234               # seed used to intialize the random number generator in the run
+session_name='metric'   # name of the tmux session
+
+
+# * Name of checkpoints and paths (checkpoint, logfiles, ...)
+checkpointname=checkpoint_n=${numprocs}
+dpath=data
+datafilename=data_${imroot}_ds${downsampling}_M${M}_ks${ksize}
+logfile=metrics_${imroot}_ds${downsampling}_M${M}_ks${ksize}_s${seed}_${Nmc}_nbi=${Nbi}.log
+rpath=results/results_conv_${imroot}_ds${downsampling}_M${M}_ks${ksize}_s${seed}_n=${numprocs}_${fileextension}
+
+rfile=final_results_${imroot}_ds${downsampling}_M${M}_ks${ksize}_s${seed}_${Nmc}_nbi=${Nbi}  # name of the file containing the results
+logpath=$rpath/logs
+logfile=${logpath}/$logfile
+
+mkdir -p $logpath
+
+
+# * TMUX session
+# ! DO NOT MODIFY
+
+tmux new -d -s ${session_name} # create detached sessions
+
+# the following is needed to activate conda environment in bash
+# https://github.com/conda/conda/issues/7980
+# tmux send-keys -t ${session_name}.0 "eval \"$(conda shell.bash hook)\"" ENTER
+tmux send-keys -t ${session_name}.0 "conda activate demo-jcgs" ENTER
+
+# pass mpi command
+# ! two different versions, check which sampler is considered  # main_metrics_processes_s
+tmux send-keys -t ${session_name}.0 \
+"mpiexec -n ${numprocs} python -m mpi4py ../../src/aaxda/main_metrics_processes_s.py \
+--Nmc=${Nmc} --Nbi=${Nbi} \
+--checkpoint_frequency=${checkpointfrequency} \
+--checkpointname=${checkpointname} \
+--datafilename=${datafilename} \
+--dpath=${dpath} --logfile=${logfile} \
+--rpath=${rpath} --rfile=${rfile} \
+--downsampling=${downsampling}" \
+ENTER  # execute command in detached session
+# tmux attach-session -t ${session_name}
diff --git a/examples/deconvolution/submit_slurm.sh b/examples/deconvolution/submit_slurm.sh
new file mode 100644
index 0000000000000000000000000000000000000000..7bd18316c3139625539b406c123be0909827c42f
--- /dev/null
+++ b/examples/deconvolution/submit_slurm.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+# * Parameters of the run
+# TODO: to be configured by the user
+
+imroot='peppers'              # root of the reference image name (see img/)
+imfile=../../img/${imroot}.h5 # full name of the file, with file extension
+numprocs=2              # number of MPI processes used (not active here)
+dataflag=''             # '--data' if generating synthetic data, '' otherwise
+loadflag=''             # '--load' if loading data from disk, '' otherwise
+restart=-1              # iteration index indentifying the warmstart checkpoint
+M=30                    # maximum intensity level for the ground truth (int)
+ksize=8                 # vertical size of the square convolution kernel
+Nmc=10000               # number of Monte-Carlo iterations
+checkpointfrequency=500 # frequency at wich a checkpoint is saved to disk
+rho=1.                  # splitting parameter
+alpha=1.                # augmentation parameter
+beta=1.                 # regularization parameter (TV prior)
+downsampling=1          # downsampling factor to create dataset from ref. img
+fileextension='h5'      # file extension for the checkpoints produced
+seed=1234               # seed to intialize the random number generator
+save="process"
+
+
+# * Name of checkpoints and paths (checkpoint, logfiles, ...)
+checkpointfile=checkpoint_n=${numprocs}_
+checkpointname=checkpoint_n=${numprocs}_
+datafilename=data_${imroot}_ds${downsampling}_M${M}_ks${ksize}
+dpath='../../data'
+logfile=std_${imroot}_ds${downsampling}_M${M}_ks${ksize}_s${seed}_${Nmc}_r${restart}.log
+rpath=results_conv_${imroot}_ds${downsampling}_M${M}_ks${ksize}_s${seed}_n=${numprocs}_${fileextension}_s
+logpath=$rpath/logs
+logfile=${logpath}/$logfile
+
+mkdir -p $logpath
+
+
+# * SLURM options, path to results and launch command
+# DO NOT MODIFY, unless aware of the impact
+
+mem=15G  # 15
+time=02:00:00
+
+sbatch --job-name=aaxda.$numprocs.$imroot \
+--mem=${mem} \
+--time=${time} \
+--ntasks-per-node=$numprocs \
+--output=$logpath/aaxda_${imroot}_n=${numprocs}.out \
+--error=$logpath/aaxda_${imroot}_n=${numprocs}.err \
+--export=numprocs=${numprocs},\
+M=${M},Nmc=${Nmc},ksize=${ksize},rho=${rho},alpha=${alpha},beta=${beta},\
+checkpointfile=${checkpointfile},\
+checkpointfrequency=${checkpointfrequency},\
+checkpointname=${checkpointname},\
+datafilename=${datafilename},\
+seed=${seed},\
+dpath=${dpath},imfile=${imfile},logfile=${logfile},rpath=${rpath},\
+downsampling=${downsampling},restart=${restart},save=${save},\
+logpath=${logpath},dataflag=${dataflag},loadflag=${loadflag} job.slurm
diff --git a/examples/deconvolution/submit_slurm_serial.sh b/examples/deconvolution/submit_slurm_serial.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c5b38a2fb77aec678e95952116a1ca2f90babac1
--- /dev/null
+++ b/examples/deconvolution/submit_slurm_serial.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+# * Parameters of the run
+# TODO: to be configured by the user
+
+imroot='house'                # root of the reference image name (see img/)
+imfile=../../img/${imroot}.h5 # full name of the file, with file extension
+sampler='pmyula'        # sampler selected ('pmyula' or 'psgla')
+dataflag=''             # '--data' if generating synthetic data, '' otherwise
+loadflag=''             # '--load' if loading checkpoint from disk, '' otherwise
+restart=-1              # iteration index indentifying the warmstart checkpoint
+M=30                    # maximum intensity level for the ground truth (int)
+ksize=8                 # vertical size of the square convolution kernel
+Nmc=5000                # number of Monte-Carlo iterations
+checkpointfrequency=500 # frequency at wich a checkpoint is saved to disk
+rho=1.                  # splitting parameter
+alpha=1.                # augmentation parameter
+beta=1.                 # regularization parameter (TV prior)
+downsampling=1          # downsampling factor to create dataset from ref. img
+fileextension='h5'      # file extension for the checkpoints produced
+seed=1234               # seed to intialize the random number generator
+
+
+# * Name of checkpoints and paths (checkpoint, logfiles, ...)
+checkpointfile=checkpoint_serial_t=${restart}
+checkpointname=checkpoint_serial_t=
+datafilename=data_${imroot}_ds${downsampling}_M${M}_ks${ksize}
+dpath='../../data'
+logfile=std_serial_${imroot}_ds${downsampling}_M${M}_ks${ksize}_s${seed}_${Nmc}_r${restart}.log
+rpath=results_conv_${imroot}_ds${downsampling}_M${M}_ks${ksize}_s${seed}_${fileextension}/${sampler}
+logpath=$rpath/logs_serial
+logfile=${logpath}/$logfile
+
+mkdir -p $logpath
+
+
+# * SLURM options, path to results and launch command
+# DO NOT MODIFY, unless aware of the impact
+
+mem=15G
+time=03:00:00
+
+sbatch --job-name=serial.$sampler.$imroot \
+--mem=${mem} \
+--time=${time} \
+--ntasks-per-node=$numprocs \
+--output=$logpath/$algo_${imroot}.out \
+--error=$logpath/$algo_${imroot}.err \
+--export=numprocs=${numprocs},\
+M=${M},Nmc=${Nmc},ksize=${ksize},rho=${rho},alpha=${alpha},beta=${beta},\
+checkpointfile=${checkpointfile},\
+checkpointfrequency=${checkpointfrequency},\
+checkpointname=${checkpointname},\
+extension=${fileextension},\
+datafilename=${datafilename},\
+seed=${seed},\
+dpath=${dpath},imfile=${imfile},logfile=${logfile},rpath=${rpath},\
+downsampling=${downsampling},restart=${restart},\
+sampler=${sampler},\
+logpath=${logpath},dataflag=${dataflag},loadflag=${loadflag} job_serial.slurm
diff --git a/img/5.1.13.tiff b/img/5.1.13.tiff
new file mode 100644
index 0000000000000000000000000000000000000000..d09cef3ad54b5152c4667e3744cdc1949cb9391d
Binary files /dev/null and b/img/5.1.13.tiff differ
diff --git a/img/5.3.01.tiff b/img/5.3.01.tiff
new file mode 100644
index 0000000000000000000000000000000000000000..13a756d8b7566d29438c6d03910d0ca977156d59
Binary files /dev/null and b/img/5.3.01.tiff differ
diff --git a/img/airport.h5 b/img/airport.h5
new file mode 100644
index 0000000000000000000000000000000000000000..937c699e44ae398a9c4b9640097a96f6bb5e9580
Binary files /dev/null and b/img/airport.h5 differ
diff --git a/img/bank.png b/img/bank.png
new file mode 100644
index 0000000000000000000000000000000000000000..71529dc024f13f94d5ce42a86eb4659ae50f486a
Binary files /dev/null and b/img/bank.png differ
diff --git a/img/bank_color.png b/img/bank_color.png
new file mode 100644
index 0000000000000000000000000000000000000000..b390136fce633be696aaa28991bb40f1f01ed7a8
Binary files /dev/null and b/img/bank_color.png differ
diff --git a/img/barb.png b/img/barb.png
new file mode 100755
index 0000000000000000000000000000000000000000..e9f29e5ccc8d9296066192006074591bcd76f357
Binary files /dev/null and b/img/barb.png differ
diff --git a/img/boat.png b/img/boat.png
new file mode 100755
index 0000000000000000000000000000000000000000..9098a93d998332bca33e3a20a6921770d62bec86
Binary files /dev/null and b/img/boat.png differ
diff --git a/img/cameraman.h5 b/img/cameraman.h5
new file mode 100644
index 0000000000000000000000000000000000000000..87e31509ba8d1d85ecfdeb4f162fb3a53c68a11f
Binary files /dev/null and b/img/cameraman.h5 differ
diff --git a/img/cameraman.png b/img/cameraman.png
new file mode 100755
index 0000000000000000000000000000000000000000..8a2912d74aa60816208a02a7e1d814597ffbbd0b
Binary files /dev/null and b/img/cameraman.png differ
diff --git a/img/chessboard.png b/img/chessboard.png
new file mode 100755
index 0000000000000000000000000000000000000000..0531a8dad9cda0fb1b69cdcff97e5e08915389ef
Binary files /dev/null and b/img/chessboard.png differ
diff --git a/img/corral.png b/img/corral.png
new file mode 100755
index 0000000000000000000000000000000000000000..5a789fbe07e6d193b56b9cffc0f207927e02c10d
Binary files /dev/null and b/img/corral.png differ
diff --git a/img/cortex.png b/img/cortex.png
new file mode 100755
index 0000000000000000000000000000000000000000..48bd58918ed660e476681d46e97ce51d7c18252e
Binary files /dev/null and b/img/cortex.png differ
diff --git a/img/grating.png b/img/grating.png
new file mode 100755
index 0000000000000000000000000000000000000000..2317973d33a85a6330b78635f4a43c00b56ae80e
Binary files /dev/null and b/img/grating.png differ
diff --git a/img/hair.png b/img/hair.png
new file mode 100755
index 0000000000000000000000000000000000000000..c953f129381224f42af3e56f919389bd8683edc6
Binary files /dev/null and b/img/hair.png differ
diff --git a/img/house.h5 b/img/house.h5
new file mode 100644
index 0000000000000000000000000000000000000000..25993f66002451631b81285f7b134b807b4d6501
Binary files /dev/null and b/img/house.h5 differ
diff --git a/img/house1021.h5 b/img/house1021.h5
new file mode 100644
index 0000000000000000000000000000000000000000..e4306b67c0d2e5f4d7fa9aeca9f5a3a190c23da9
Binary files /dev/null and b/img/house1021.h5 differ
diff --git a/img/house1022.h5 b/img/house1022.h5
new file mode 100644
index 0000000000000000000000000000000000000000..6246aa3e9fc02426c121e4f639d306d0bc7dc92e
Binary files /dev/null and b/img/house1022.h5 differ
diff --git a/img/house511.h5 b/img/house511.h5
new file mode 100644
index 0000000000000000000000000000000000000000..296af7a45b5c6ad3b161c58ebe9b412efaa2f9ed
Binary files /dev/null and b/img/house511.h5 differ
diff --git a/img/house512.h5 b/img/house512.h5
new file mode 100644
index 0000000000000000000000000000000000000000..d8683d46b59958f45c523886ccf245e928034ab2
Binary files /dev/null and b/img/house512.h5 differ
diff --git a/img/image_micro_8.h5 b/img/image_micro_8.h5
new file mode 100644
index 0000000000000000000000000000000000000000..933c78566247aacad71906c524b462ae1755a56f
Binary files /dev/null and b/img/image_micro_8.h5 differ
diff --git a/img/lena.png b/img/lena.png
new file mode 100755
index 0000000000000000000000000000000000000000..f14918282436fcd454f2587942fbe5e2564e468a
Binary files /dev/null and b/img/lena.png differ
diff --git a/img/line.png b/img/line.png
new file mode 100755
index 0000000000000000000000000000000000000000..b48ddad36cbdf76ce6ec58d271791b67bad106c1
Binary files /dev/null and b/img/line.png differ
diff --git a/img/line_horizontal.png b/img/line_horizontal.png
new file mode 100755
index 0000000000000000000000000000000000000000..06c39f4c3ed1892231a80dd892c3bfa7e780be00
Binary files /dev/null and b/img/line_horizontal.png differ
diff --git a/img/line_vertical.png b/img/line_vertical.png
new file mode 100755
index 0000000000000000000000000000000000000000..7bf11fb97b4d1dfac2d69e620fd7ff65ba38cbd3
Binary files /dev/null and b/img/line_vertical.png differ
diff --git a/img/male.h5 b/img/male.h5
new file mode 100644
index 0000000000000000000000000000000000000000..c264ffc55e3f630b018d97133d0060ae05ad1e2a
Binary files /dev/null and b/img/male.h5 differ
diff --git a/img/mandrill.png b/img/mandrill.png
new file mode 100755
index 0000000000000000000000000000000000000000..58b49fbc2e57cde1124fbfaae6be90a24ca2e7c1
Binary files /dev/null and b/img/mandrill.png differ
diff --git a/img/mosque.png b/img/mosque.png
new file mode 100644
index 0000000000000000000000000000000000000000..32b02fc039467d5d8b89c7e65502332b3850dc48
Binary files /dev/null and b/img/mosque.png differ
diff --git a/img/mosque_color.png b/img/mosque_color.png
new file mode 100644
index 0000000000000000000000000000000000000000..9048f0b009617c6d7e0a65834541cf4164261616
Binary files /dev/null and b/img/mosque_color.png differ
diff --git a/img/parrot-mask.png b/img/parrot-mask.png
new file mode 100755
index 0000000000000000000000000000000000000000..831ab3775bddc0ab8e387f1268a8276905e0425d
Binary files /dev/null and b/img/parrot-mask.png differ
diff --git a/img/parrot.png b/img/parrot.png
new file mode 100755
index 0000000000000000000000000000000000000000..4cfdf35ee9ef9f172f83c0f889fc5b778b6f7498
Binary files /dev/null and b/img/parrot.png differ
diff --git a/img/parrotgray.png b/img/parrotgray.png
new file mode 100644
index 0000000000000000000000000000000000000000..57551b3e4b374e7cc8378140623c4cccad2e0a6d
Binary files /dev/null and b/img/parrotgray.png differ
diff --git a/img/peppers-bw.png b/img/peppers-bw.png
new file mode 100755
index 0000000000000000000000000000000000000000..d9333c9f8a88ccf9c09d583aee7bedb6836e115b
Binary files /dev/null and b/img/peppers-bw.png differ
diff --git a/img/peppers.h5 b/img/peppers.h5
new file mode 100644
index 0000000000000000000000000000000000000000..1ef2c4378ba1ba5348bede56ba7f7cb92393dad0
Binary files /dev/null and b/img/peppers.h5 differ
diff --git a/img/peppers1024.h5 b/img/peppers1024.h5
new file mode 100644
index 0000000000000000000000000000000000000000..ef99fa1b779dc893d37d0b71c7c1696195ebf337
Binary files /dev/null and b/img/peppers1024.h5 differ
diff --git a/img/peppers256.h5 b/img/peppers256.h5
new file mode 100644
index 0000000000000000000000000000000000000000..1c67cd82ab9798ff6a37fd6fd5dbf0690d62f345
Binary files /dev/null and b/img/peppers256.h5 differ
diff --git a/img/periodic_bumps.png b/img/periodic_bumps.png
new file mode 100755
index 0000000000000000000000000000000000000000..fb4fe1b2facdb8dd9d82a1a094644898878eabaf
Binary files /dev/null and b/img/periodic_bumps.png differ
diff --git a/img/rubik1.png b/img/rubik1.png
new file mode 100755
index 0000000000000000000000000000000000000000..bbe824072a2feecaea4ee742967e921c8a938190
Binary files /dev/null and b/img/rubik1.png differ
diff --git a/img/rubik2.png b/img/rubik2.png
new file mode 100755
index 0000000000000000000000000000000000000000..d56bd3e11c12cd2ff683dcb68458a7d759b5020a
Binary files /dev/null and b/img/rubik2.png differ
diff --git a/img/rubik3.png b/img/rubik3.png
new file mode 100755
index 0000000000000000000000000000000000000000..e2a140af7b9cb89af5b15babaacf9209833b8228
Binary files /dev/null and b/img/rubik3.png differ
diff --git a/img/taxi1.png b/img/taxi1.png
new file mode 100755
index 0000000000000000000000000000000000000000..d3859086df432710c74fd0408e61faead55e936b
Binary files /dev/null and b/img/taxi1.png differ
diff --git a/img/taxi2.png b/img/taxi2.png
new file mode 100755
index 0000000000000000000000000000000000000000..618840842b18f53f79af146b670684d4ce422d2e
Binary files /dev/null and b/img/taxi2.png differ
diff --git a/img/taxi3.png b/img/taxi3.png
new file mode 100755
index 0000000000000000000000000000000000000000..583a76f197f5e9e537033e70b54bf7b4949ceaf6
Binary files /dev/null and b/img/taxi3.png differ
diff --git a/img/vessels.png b/img/vessels.png
new file mode 100755
index 0000000000000000000000000000000000000000..3998b37cd71df9869cba1aece77c19e130ab05b1
Binary files /dev/null and b/img/vessels.png differ
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000000000000000000000000000000000000..4d2c196d34bde10f23f5ccc556d696ed49810a5a
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,25 @@
+[tool.black]
+line-length = 88
+target-version = ['py37']
+include = '\.pyi?$'
+extend-exclude = '''
+# A regex preceded with ^/ will apply only to files and directories
+# in the root of the project.
+^/foo.py  # exclude a file named foo.py in the root of the project (in addition to the defaults)
+skip-string-normalization = true
+'''
+
+[tool.isort]
+profile = "black"
+multi_line_output = 3
+include_trailing_comma = true
+force_grid_wrap = 0
+use_parentheses = true
+ensure_newline_before_comments = true
+line_length = 88
+
+[tool.pylint.messages_control]
+disable = "C0330, C0326"
+
+[tool.pylint.format]
+max-line-length = "88"
diff --git a/requirement.txt b/requirement.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a63f74d750a40c7ee8c79f0d6dfa83fc1bb8ed4a
--- /dev/null
+++ b/requirement.txt
@@ -0,0 +1,19 @@
+numpy
+scipy
+numba
+mpi4py
+h5py>=2.9=mpi*
+matplotlib
+imageio
+seaborn
+snakeviz
+tqdm
+jupyterlab
+pre-commit
+black
+flake8
+isort
+coverage
+sphinx
+sphinx_rtd_theme
+sphinxcontrib-bibtex
diff --git a/run_tests.sh b/run_tests.sh
new file mode 100644
index 0000000000000000000000000000000000000000..07fc56fea5c0a2da4a2a89c2ba5d8dfd83cfff6e
--- /dev/null
+++ b/run_tests.sh
@@ -0,0 +1,13 @@
+
+session_name=test
+
+tmux new -d -s ${session_name}
+# tmux send-keys -t ${session_name}.0 "echo a=${a}; echo b=${b}" ENTER
+tmux send-keys -t ${session_name}.0 "conda activate jcgs_demo; \
+export NUMBA_DISABLE_JIT=1; \
+coverage run -m pytest; \
+coverage html; \
+coverage xml -o reports/coverage/coverage.xml; \
+genbadge coverage -o docs/coverage.svg; \
+docstr-coverage ." \
+ENTER
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..592398169b111207bb065dcd35aac851bfbb91a3
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,61 @@
+[flake8]
+# Recommend matching the black line length (default 88),
+# rather than using the flake8 default of 79:
+max-line-length = 88
+extend-ignore =
+    # See https://github.com/PyCQA/pycodestyle/issues/373
+    E203,
+
+[pylint]
+max-line-length = 88
+
+[pylint.messages_control]
+disable = C0330, C0326
+
+# .coveragerc to control coverage.py
+[coverage:run]
+branch = True
+omit =
+    # omit these single files
+    main.py
+    *_test.py
+    # tests/test_mpi.py
+    # tests/test_mpi_convolutions.py
+    *__init__.py
+    # omit directory
+    drafts/*
+    src/drafts/*
+
+
+[coverage:report]
+# Regexes for lines to exclude from consideration
+exclude_lines =
+    # Have to re-enable the standard pragma
+    pragma: no cover
+
+    # Don't complain about missing debug-only code:
+    def __repr__
+    if self\.debug
+
+    # Don't complain if tests don't hit defensive assertion code:
+    raise AssertionError
+    raise NotImplementedError
+
+    # Don't complain if non-runnable code isn't run:
+    if 0:
+    if __name__ == .__main__.:
+
+omit =
+    tests/*
+
+ignore_errors = True
+
+[coverage:html]
+directory = ./docs/build/coverage_html_report
+
+[tool:pytest]
+markers =
+    slow: marks tests as slow (deselect with '-m not slow')
+    mpi: parallel tests requiring mpi
+addopts = "-ra -q -vv"
+testpaths = "tests"
diff --git a/src/aaxda/__init__.py b/src/aaxda/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/aaxda/main_metrics.py b/src/aaxda/main_metrics.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2fcb9872a0899a2a8ffab6f2f7154d263ca1b2d
--- /dev/null
+++ b/src/aaxda/main_metrics.py
@@ -0,0 +1,335 @@
+"""Main script to compute the reconstruction metrics and extract the estimators
+(MSSE and MAP) from the availabel samples.
+"""
+
+import logging
+from os.path import join
+from pathlib import Path
+
+import h5py
+import numpy as np
+import utils.args_metrics as argm
+from mpi4py import MPI
+from skimage.metrics import structural_similarity as ssim
+
+from aaxda.utils.checkpoint import DistributedCheckpoint
+from aaxda.utils.communications import local_split_range_nd
+
+# TODO: adapt to setting where there is one checkpoint file per process (to be
+# TODO- investigated)
+
+
+def main_metrics(
+    comm,
+    rank,
+    filename,
+    checkpointname,
+    logger,
+    Nbi,
+    downsampling,
+    Nmc,
+    checkpoint_frequency,
+    outputname,
+):
+
+    size = comm.Get_size()
+    ndims = 2
+
+    # * setup communicator
+    grid_size = MPI.Compute_dims(size, ndims)
+    cartcomm = comm.Create_cart(dims=grid_size, periods=ndims * [False], reorder=False)
+    ranknd = cartcomm.Get_coords(rank)
+    ranknd = np.array(ranknd, dtype="i")
+    grid_size = np.array(grid_size, dtype="i")
+
+    # parallel data loading
+    if rank == 0:
+        logger.info("Begin: defining auxiliary variables to load ground truth")
+
+    f = h5py.File(filename + ".h5", "r+", driver="mpio", comm=MPI.COMM_WORLD)
+    N = f["N"][()]
+    # M = f["M"][()]
+    # h = f["h"][()]
+    f.close()
+
+    # overlap_size = np.array(h.shape, dtype="i")
+    # overlap_size -= 1
+    # data_size = N + overlap_size
+
+    # slice to extract image tile from full image file
+    tile_pixels = local_split_range_nd(grid_size, N, ranknd)
+    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+    global_slice_tile = tuple(
+        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    )
+
+    # * create distributed checkpoint object to load the files of interest
+    if rank == 0:
+        logger.info("End: defining auxiliary variables to load ground truth")
+        logger.info("Begin: setup checkpointer objects")
+
+    # convolution nodel and checkpoint
+    # sync_conv_model = SyncConvModel(
+    #     N, data_size, h, comm, grid_size, h.itemsize, False
+    # )
+    # ground truth
+    checkpointer_gt = DistributedCheckpoint(comm, filename)
+    dic = checkpointer_gt.load("", [global_slice_tile], None, "x")
+    local_x_gt = dic["x"]
+    # checkpoint files from sampler
+    checkpointer = DistributedCheckpoint(comm, checkpointname)
+    # object to save final results to disk
+    saver = DistributedCheckpoint(comm, outputname)
+
+    # * form MMSE estimator across all files and processes
+    if rank == 0:
+        logger.info("End: setup checkpointer objects")
+        logger.info("Begin: compute MMSE estimator")
+
+    c_ = 0
+    local_x_mmse = np.zeros(tile_size, dtype="d")
+
+    for counter, value in enumerate(
+        range(Nbi + checkpoint_frequency, Nmc + 1, checkpoint_frequency)
+    ):
+        dic = checkpointer.load(
+            value, [global_slice_tile, np.s_[:]], None, "x_m", "counter"
+        )
+        local_x_mmse += dic["counter"] * dic["x_m"]
+        c_ += dic["counter"]
+    local_x_mmse /= c_
+
+    # * aggregate all necessary elements for MAP, atime + std, score, reg.
+    # parameter in a single file
+    if rank == 0:
+        logger.info("End: compute MMSE estimator")
+        logger.info("Begin: retrieve MAP estimator")
+
+    file_id_map = np.empty(1, dtype="i")
+    atime = 0.0
+    stdtime = 0.0
+    runtime = 0.0
+    beta_ = None
+    score_ = None
+
+    if rank == 0:
+        c_ = 0
+        score_map = np.inf
+        file_id_map[0] = 0
+        id_map = 0
+        atime = 0.0
+        asqtime = 0.0
+        beta_ = np.empty(Nmc - Nbi)
+        score_ = np.empty(Nmc - Nbi)
+
+        for counter, value in enumerate(
+            range(Nbi + checkpoint_frequency, Nmc + 1, checkpoint_frequency)
+        ):
+            with h5py.File("{}{}.h5".format(checkpointname, value), "r") as f:
+                # potential
+                dset = f["score"]
+                dset.read_direct(
+                    score_,
+                    np.s_[:],
+                    np.s_[
+                        counter
+                        * checkpoint_frequency : (counter + 1)
+                        * checkpoint_frequency
+                    ],
+                )
+                # beta
+                dset = f["beta"]
+                dset.read_direct(
+                    beta_,
+                    np.s_[:],
+                    np.s_[
+                        counter
+                        * checkpoint_frequency : (counter + 1)
+                        * checkpoint_frequency
+                    ],
+                )
+                # time (average and standard deviation)
+                c__ = f["counter"][0]
+                atime += c__ * f["atime"][0]
+                asqtime += c__ * f["asqtime"][0]
+                c_ += c__
+                # MAP
+                file_id_m = value
+                id_m = np.argmin(f["score"])
+                score_m = f["score"][id_map]
+                if score_m < score_map:
+                    score_map = score_m
+                    file_id_map[0] = file_id_m
+                    id_map = id_m
+        runtime = atime
+        atime /= c_
+        stdtime = np.sqrt((asqtime - c_ * atime**2) / (c_ - 1))
+
+    comm.Bcast([file_id_map, 1, MPI.INT], root=0)
+
+    dic = checkpointer.load(file_id_map[0], [global_slice_tile], None, "x_map")
+    local_x_map = dic["x_map"]  # .astype("=d")
+
+    # * compute reconstruction metrics
+    if rank == 0:
+        logger.info("End: retrieve MAP estimator")
+        logger.info("Begin: compute metrics")
+
+    local_norm_gt = np.array(np.sum(local_x_gt**2), dtype="d")
+    local_err_mmse = np.array(np.sum((local_x_gt - local_x_mmse) ** 2), dtype="d")
+    local_err_map = np.array(np.sum((local_x_gt - local_x_map) ** 2), dtype="d")
+
+    norm_gt = np.full(1, np.inf, dtype="d")
+    err_mmse = np.full(1, np.inf, dtype="d")
+    err_map = np.full(1, np.inf, dtype="d")
+
+    comm.Reduce(
+        [local_norm_gt, MPI.DOUBLE],
+        [norm_gt, MPI.DOUBLE],
+        op=MPI.SUM,
+        root=0,
+    )
+
+    comm.Reduce(
+        [local_err_mmse, MPI.DOUBLE],
+        [err_mmse, MPI.DOUBLE],
+        op=MPI.SUM,
+        root=0,
+    )
+
+    comm.Reduce(
+        [local_err_map, MPI.DOUBLE],
+        [err_map, MPI.DOUBLE],
+        op=MPI.SUM,
+        root=0,
+    )
+
+    # * save results to disk
+
+    # MMSE and MAP (need all processes)
+    saver.save("", [N], [global_slice_tile], [None], mode="w", x_mmse=local_x_mmse)
+    saver.save("", [N], [global_slice_tile], [None], mode="a", x_map=local_x_map)
+
+    snr_mmse = 0.0
+    snr_map = 0.0
+    if rank == 0:
+        snr_mmse = 10 * np.log10(norm_gt[0] / err_mmse[0])
+        snr_map = 10 * np.log10(norm_gt[0] / err_map[0])
+        logger.info(
+            r"snr (MMSE) = {:1.3e}, snr (MAP) = {:1.3e}, atime = {:1.3e}, std_time = {:1.3e}, runtime = {:1.3e}".format(
+                snr_mmse,
+                snr_map,
+                atime,
+                stdtime,
+                runtime,
+            )
+        )
+
+    # compute SSIM (only on rank 0, cannot be easily parallelized (unless
+    # rewriting the function manually))
+    dic_x = saver.load_from_process(0, "", 2 * [np.s_[:]], None, "x_mmse", "x_map")
+
+    checkpointer_gt = DistributedCheckpoint(comm, filename)
+    dic = checkpointer_gt.load_from_process(0, "", [np.s_[:]], None, "x")
+
+    ssim_mmse = -1.0
+    ssim_map = -1.0
+
+    if rank == 0:
+        ssim_mmse = ssim(
+            dic["x"], dic_x["x_mmse"], data_range=dic["x"].max() - dic["x"].min()
+        )
+        ssim_map = ssim(
+            dic["x"], dic_x["x_map"], data_range=dic["x"].max() - dic["x"].min()
+        )
+
+        logger.info(
+            r"SSIM (MMSE) = {:1.3e}, SSIM (MAP) = {:1.3e}".format(
+                ssim_mmse,
+                ssim_map,
+            )
+        )
+        logger.info("End: compute metrics")
+        logger.info("Begin: save results to disk")
+
+    # snr, timing, reg. parameter, score
+    select = 10 * [np.s_[:]]
+    chunk_sizes = 10 * [None]
+
+    saver.save_from_process(
+        0,
+        "",
+        select,
+        chunk_sizes,
+        mode="a",
+        snr_mmse=snr_mmse,
+        snr_map=snr_map,
+        ssim_mmse=ssim_mmse,
+        ssim_map=ssim_map,
+        atime=atime,
+        stdtime=stdtime,
+        runtime=runtime,
+        beta=beta_,
+        score=score_,
+        file_id_map=file_id_map,
+    )
+
+    if rank == 0:
+        logger.info("End: save results to disk")
+
+
+if __name__ == "__main__":
+
+    comm = MPI.COMM_WORLD
+    rank = comm.Get_rank()
+
+    args = argm.parse_args()
+
+    if rank == 0:
+        Path(args.rpath).mkdir(parents=True, exist_ok=True)
+        Path(args.dpath).mkdir(parents=True, exist_ok=True)
+        Path("debug").mkdir(parents=True, exist_ok=True)
+        logger = logging.getLogger(__name__)
+
+        logging.basicConfig(
+            filename=args.logfile,
+            level=logging.INFO,
+            filemode="w",
+            format="%(asctime)s %(levelname)s %(message)s",
+        )
+
+        logger.info("Host: {}".format(MPI.Get_processor_name()))
+
+    else:
+        logger = None
+
+    # * debugging values
+    # args.datafilename = "inpainting_data_image_micro_8_ds1_isnr40" # "data_image_micro_8_ds1_M30"
+    # args.rpath = "results_inpainting_image_micro_8_ds1_isnr40_s1234_h5" # "results_image_micro_8_ds1_M30_s1234_h5_fbeta"
+    # args.checkpointname = "pmyula/checkpoint_serial_t="  # "pmyula/checkpoint_t="
+    # args.Nbi = 50000
+    # args.downsampling = 1
+    # args.checkpoint_frequency = 10000
+    # args.Nmc = 100000
+    # args.rfile = "final_results"
+    # args.sampler = "pmyula"
+    # Path(join(args.rpath, args.sampler)).mkdir(parents=True, exist_ok=True)
+
+    datafilename = join(args.dpath, args.datafilename)
+    checkpointname = join(args.rpath, args.checkpointname)
+    outputname = join(args.rpath, args.rfile)
+
+    main_metrics(
+        comm,
+        rank,
+        datafilename,
+        checkpointname,
+        logger,
+        args.Nbi,
+        args.downsampling,
+        args.Nmc,
+        args.checkpoint_frequency,
+        outputname,
+    )
+
+    pass
diff --git a/src/aaxda/main_metrics_processes_s.py b/src/aaxda/main_metrics_processes_s.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7e881d513eafacb4a7f974f3a4039ff69e7c3f5
--- /dev/null
+++ b/src/aaxda/main_metrics_processes_s.py
@@ -0,0 +1,353 @@
+"""Main script to compute the reconstruction metrics and extract the estimators
+(MSSE and MAP) from the available samples (one checkpoint file per process).
+"""
+
+import logging
+from os.path import join
+from pathlib import Path
+
+import h5py
+import numpy as np
+import utils.args_metrics as argm
+from mpi4py import MPI
+from skimage.metrics import structural_similarity as ssim
+
+from aaxda.utils.checkpoint import DistributedCheckpoint
+from aaxda.utils.communications import local_split_range_nd
+
+
+def main_metrics(
+    comm,
+    rank,
+    filename,
+    checkpointname,
+    logger,
+    Nbi,
+    downsampling,
+    Nmc,
+    checkpoint_frequency,
+    outputname,
+):
+
+    size = comm.Get_size()
+    ndims = 2
+
+    # * setup communicator
+    grid_size = MPI.Compute_dims(size, ndims)
+    cartcomm = comm.Create_cart(dims=grid_size, periods=ndims * [False], reorder=False)
+    ranknd = cartcomm.Get_coords(rank)
+    ranknd = np.array(ranknd, dtype="i")
+    grid_size = np.array(grid_size, dtype="i")
+
+    # parallel data loading
+    if rank == 0:
+        logger.info("Begin: defining auxiliary variables to load ground truth")
+
+    f = h5py.File(filename + ".h5", "r+", driver="mpio", comm=MPI.COMM_WORLD)
+    N = f["N"][()]
+    # M = f["M"][()]
+    # h = f["h"][()]
+    f.close()
+
+    # overlap_size = np.array(h.shape, dtype="i")
+    # overlap_size -= 1
+    # data_size = N + overlap_size
+
+    # slice to extract image tile from full image file
+    tile_pixels = local_split_range_nd(grid_size, N, ranknd)
+    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+    global_slice_tile = tuple(
+        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    )
+
+    # * create distributed checkpoint object to load the files of interest
+    if rank == 0:
+        logger.info("End: defining auxiliary variables to load ground truth")
+        logger.info("Begin: setup checkpointer objects")
+
+    # convolution nodel and checkpoint
+    # sync_conv_model = SyncConvModel(
+    #     N, data_size, h, comm, grid_size, h.itemsize, False
+    # )
+    # ground truth
+    checkpointer_gt = DistributedCheckpoint(comm, filename)
+    dic = checkpointer_gt.load("", [global_slice_tile], None, "x")
+    local_x_gt = dic["x"]
+    # checkpoint files from sampler
+    # checkpointer = DistributedCheckpoint(comm, checkpointname)
+    # object to save final results to disk
+    saver = DistributedCheckpoint(comm, outputname)
+
+    # * form MMSE estimator across all files and processes
+    if rank == 0:
+        logger.info("End: setup checkpointer objects")
+        logger.info("Begin: compute MMSE estimator")
+        # logger.info("{}_p0_t={}.h5".format(checkpointname, 2000))
+
+    c_ = 0
+    local_x_mmse = np.zeros(tile_size, dtype="d")
+
+    for counter, value in enumerate(
+        range(Nbi + checkpoint_frequency, Nmc + 1, checkpoint_frequency)
+    ):
+        # x_mmse += np.sum(f["x"][()], axis=0)
+        # dic = checkpointer.load(
+        #     value, [global_slice_tile, np.s_[:]], None, "x_m", "counter"
+        # )
+        with h5py.File(
+            "{}_p0_t={}.h5".format(checkpointname, value),
+            "r",
+            driver="mpio",
+            comm=comm,
+        ) as f:
+            local_counter = f["counter"][()]
+
+        with h5py.File("{}_p{}_t={}.h5".format(checkpointname, rank, value), "r") as f:
+            local_xm = f["x_m"][()]
+
+        local_x_mmse += local_counter * local_xm
+        c_ += local_counter
+    local_x_mmse /= c_
+
+    # * aggregate all necessary elements for MAP, atime + std, score, reg.
+    # parameter in a single file
+    if rank == 0:
+        logger.info("End: compute MMSE estimator")
+        logger.info("Begin: retrieve MAP estimator")
+
+    file_id_map = np.empty(1, dtype="i")
+    atime = 0.0
+    stdtime = 0.0
+    runtime = 0.0
+    beta_ = None
+    score_ = None
+
+    if rank == 0:
+        c_ = 0
+        score_map = np.inf
+        file_id_map[0] = 0
+        id_map = 0
+        atime = 0.0
+        asqtime = 0.0
+        beta_ = np.empty(Nmc - Nbi)
+        score_ = np.empty(Nmc - Nbi)
+
+        for counter, value in enumerate(
+            range(Nbi + checkpoint_frequency, Nmc + 1, checkpoint_frequency)
+        ):
+            with h5py.File(
+                "{}_p{}_t={}.h5".format(checkpointname, rank, value), "r"
+            ) as f:
+                # potential
+                dset = f["score"]
+                dset.read_direct(
+                    score_,
+                    np.s_[:],
+                    np.s_[
+                        counter
+                        * checkpoint_frequency : (counter + 1)
+                        * checkpoint_frequency
+                    ],
+                )
+                # beta
+                dset = f["regularization"]
+                dset.read_direct(
+                    beta_,
+                    np.s_[:, 4],
+                    np.s_[
+                        counter
+                        * checkpoint_frequency : (counter + 1)
+                        * checkpoint_frequency,
+                    ],
+                )
+                # time (average and standard deviation)
+                c__ = f["counter"][0]
+                atime += c__ * f["atime"][0]
+                asqtime += c__ * f["asqtime"][0]
+                c_ += c__
+                # MAP
+                file_id_m = value
+                id_m = np.argmin(f["score"])
+                score_m = f["score"][id_map]
+                if score_m < score_map:
+                    score_map = score_m
+                    file_id_map[0] = file_id_m
+                    id_map = id_m
+        runtime = atime
+        atime /= c_
+        stdtime = np.sqrt((asqtime - c_ * atime**2) / (c_ - 1))
+
+    comm.Bcast([file_id_map, 1, MPI.INT], root=0)
+
+    # dic = checkpointer.load(file_id_map[0], [global_slice_tile], None, "x_map")
+    # local_x_map = dic["x_map"]  # .astype("=d")
+
+    with h5py.File(
+        "{}_p{}_t={}.h5".format(checkpointname, rank, file_id_map[0]), "r"
+    ) as f:
+        local_x_map = f["x_map"][()]
+
+    # * compute reconstruction metrics
+    if rank == 0:
+        logger.info("End: retrieve MAP estimator")
+        logger.info("Begin: compute metrics")
+
+    local_norm_gt = np.array(np.sum(local_x_gt**2), dtype="d")
+    local_err_mmse = np.array(np.sum((local_x_gt - local_x_mmse) ** 2), dtype="d")
+    local_err_map = np.array(np.sum((local_x_gt - local_x_map) ** 2), dtype="d")
+
+    norm_gt = np.full(1, np.inf, dtype="d")
+    err_mmse = np.full(1, np.inf, dtype="d")
+    err_map = np.full(1, np.inf, dtype="d")
+
+    comm.Reduce(
+        [local_norm_gt, MPI.DOUBLE],
+        [norm_gt, MPI.DOUBLE],
+        op=MPI.SUM,
+        root=0,
+    )
+
+    comm.Reduce(
+        [local_err_mmse, MPI.DOUBLE],
+        [err_mmse, MPI.DOUBLE],
+        op=MPI.SUM,
+        root=0,
+    )
+
+    comm.Reduce(
+        [local_err_map, MPI.DOUBLE],
+        [err_map, MPI.DOUBLE],
+        op=MPI.SUM,
+        root=0,
+    )
+
+    # * save results to disk
+
+    # MMSE and MAP (need all processes)
+    saver.save("", [N], [global_slice_tile], [None], mode="w", x_mmse=local_x_mmse)
+
+    # ! seems to create a segfault for a large number of processes: why?
+    saver.save("", [N], [global_slice_tile], [None], mode="a", x_map=local_x_map)
+
+    snr_mmse = 0.0
+    snr_map = 0.0
+    if rank == 0:
+        snr_mmse = 10 * np.log10(norm_gt[0] / err_mmse[0])
+        snr_map = 10 * np.log10(norm_gt[0] / err_map[0])
+        logger.info(
+            r"snr (MMSE) = {:1.3e}, snr (MAP) = {:1.3e}, atime = {:1.3e}, std_time = {:1.3e}, runtime = {:1.3e}".format(
+                snr_mmse,
+                snr_map,
+                atime,
+                stdtime,
+                runtime,
+            )
+        )
+
+    # compute SSIM (only on rank 0, cannot be easily parallelized (unless
+    # rewriting the function manually))
+    dic_x = saver.load_from_process(0, "", 2 * [np.s_[:]], None, "x_mmse", "x_map")
+
+    checkpointer_gt = DistributedCheckpoint(comm, filename)
+    dic = checkpointer_gt.load_from_process(0, "", [np.s_[:]], None, "x")
+
+    ssim_mmse = -1.0
+    ssim_map = -1.0
+
+    if rank == 0:
+        ssim_mmse = ssim(
+            dic["x"], dic_x["x_mmse"], data_range=dic["x"].max() - dic["x"].min()
+        )
+        ssim_map = ssim(
+            dic["x"], dic_x["x_map"], data_range=dic["x"].max() - dic["x"].min()
+        )
+
+        logger.info(
+            r"SSIM (MMSE) = {:1.3e}, SSIM (MAP) = {:1.3e}".format(
+                ssim_mmse,
+                ssim_map,
+            )
+        )
+        logger.info("End: compute metrics")
+        logger.info("Begin: save results to disk")
+
+    # snr, timing, reg. parameter, score
+    select = 10 * [np.s_[:]]
+    chunk_sizes = 10 * [None]
+
+    saver.save_from_process(
+        0,
+        "",
+        select,
+        chunk_sizes,
+        mode="a",
+        snr_mmse=snr_mmse,
+        snr_map=snr_map,
+        ssim_mmse=ssim_mmse,
+        ssim_map=ssim_map,
+        atime=atime,
+        stdtime=stdtime,
+        runtime=runtime,
+        beta=beta_,
+        score=score_,
+        file_id_map=file_id_map,
+    )
+
+    if rank == 0:
+        logger.info("End: save results to disk")
+
+
+if __name__ == "__main__":
+
+    comm = MPI.COMM_WORLD
+    rank = comm.Get_rank()
+
+    args = argm.parse_args()
+
+    if rank == 0:
+        Path(args.rpath).mkdir(parents=True, exist_ok=True)
+        Path(args.dpath).mkdir(parents=True, exist_ok=True)
+        Path("debug").mkdir(parents=True, exist_ok=True)
+        logger = logging.getLogger(__name__)
+
+        logging.basicConfig(
+            filename=args.logfile,
+            level=logging.INFO,
+            filemode="w",
+            format="%(asctime)s %(levelname)s %(message)s",
+        )
+
+        logger.info("Host: {}".format(MPI.Get_processor_name()))
+
+    else:
+        logger = None
+
+    # * debugging values
+    # args.datafilename = "data_boat_ds1"
+    # args.rpath = "results_boat_ds1_h5"
+    # args.checkpointname = "checkpoint_t="
+    # args.Nbi = 10
+    # args.downsampling = 1
+    # args.checkpoint_frequency = 10
+    # args.Nmc = 30
+    # args.rfile = "final_results"
+    # Path(join(args.rpath, args.sampler)).mkdir(parents=True, exist_ok=True)
+
+    datafilename = join(args.dpath, args.datafilename)
+    checkpointname = join(args.rpath, args.checkpointname)
+    outputname = join(args.rpath, args.rfile)
+
+    main_metrics(
+        comm,
+        rank,
+        datafilename,
+        checkpointname,
+        logger,
+        args.Nbi,
+        args.downsampling,
+        args.Nmc,
+        args.checkpoint_frequency,
+        outputname,
+    )
+
+    pass
diff --git a/src/aaxda/main_s.py b/src/aaxda/main_s.py
new file mode 100644
index 0000000000000000000000000000000000000000..90f4d5c9b1003807759810ab818b03a0c3c9c9ef
--- /dev/null
+++ b/src/aaxda/main_s.py
@@ -0,0 +1,396 @@
+"""Main script to launch the distributed SPA sampler instantied for a linear
+deconvolution problem.
+"""
+
+import cProfile
+import logging
+import sys
+from os.path import join
+from pathlib import Path
+
+import h5py
+import numpy as np
+import utils.args
+from mpi4py import MPI
+
+import aaxda.models.data as data
+from aaxda.models.distributed_convolutions import calculate_local_data_size
+from aaxda.models.models import SyncConvModel
+from aaxda.samplers.parallel.spa_psgla_sync_s import spa_psgla_mpi
+from aaxda.utils.checkpoint import DistributedCheckpoint, SerialCheckpoint
+from aaxda.utils.communications import local_split_range_nd
+
+# import tau
+# https://forum.hdfgroup.org/t/crash-when-writing-parallel-compressed-chunks/6186
+
+
+def main(
+    comm,
+    rank,
+    bool_data,
+    results_path,
+    imagefilename,
+    filename,
+    checkpointname,
+    logger,
+    save_mode="process",
+    warmstart_iter=0,
+    downsampling=1,
+    checkpointflag=False,
+    profiling=False,
+    M=30,
+    Nmc=1000,
+    rho=1.0,
+    alpha=1.0,
+    beta=1.0,
+    checkpoint_frequency=500,
+    monitor_frequency=5,
+    seed=1234,
+    kernel_size=8,
+):
+
+    size = comm.Get_size()
+    ndims = 2
+
+    # * setup communicator
+    grid_size = MPI.Compute_dims(size, ndims)
+    cartcomm = comm.Create_cart(dims=grid_size, periods=ndims * [False], reorder=False)
+    ranknd = cartcomm.Get_coords(rank)
+    ranknd = np.array(ranknd, dtype="i")
+    grid_size = np.array(grid_size, dtype="i")
+
+    # * loading / generating synthetic data (as in Vono et al.)
+    if bool_data:
+
+        if rank == 0:
+            logger.info("Data generation")
+
+            # data  parameters
+            kernel_std = 1
+
+            # load image and generate kernel
+            x = data.get_image(imagefilename, M)
+            x = x[::downsampling, ::downsampling]
+            N = np.array(x.shape, dtype="i")
+            h = data.generate_2d_gaussian_kernel(kernel_size, kernel_std)
+        else:
+            N = np.empty((2,), dtype="i")
+            h = np.empty(2 * [kernel_size], dtype="d")
+
+        # send necessary info to each worker
+        comm.Bcast([N, 2, MPI.INT], root=0)
+        comm.Bcast([h, kernel_size**2, MPI.DOUBLE], root=0)
+
+        if rank == 0:
+            logger.info("Begin: write image to disk")
+            logger.info("Image size {}x{}".format(*N))
+            f = h5py.File(filename, "w")
+            dset_N = f.create_dataset("N", [2], dtype="i")
+            dset_h = f.create_dataset("h", 2 * [kernel_size], dtype="d")
+            dset_M = f.create_dataset("M", [1], dtype="i")
+            dset_x = f.create_dataset("x", N, dtype="d")
+            dset_N[()] = N
+            dset_h[()] = h
+            dset_M[()] = M
+            dset_x[()] = x
+            f.close()
+            del dset_N, dset_M, dset_h, dset_x
+            del x
+        comm.Barrier()
+
+        if rank == 0:
+            logger.info("End: write image to disk")
+            logger.info("Begin: load image facets")
+
+        # load image tile
+        overlap_size = np.array(h.shape, dtype="i")
+        overlap_size -= 1
+        facet, tile_pixels = data.mpi_load_image_from_h5(
+            comm, grid_size, ranknd, filename, N, overlap_size
+        )
+
+        if rank == 0:
+            logger.info("End: load image facets")
+            logger.info("Begin: generating local data")
+
+        # parallel data generation
+        local_data, local_clean_data, global_slice_data = data.generate_local_data(
+            comm, cartcomm, grid_size, ranknd, facet, h, N, tile_pixels, backward=False
+        )
+
+        if rank == 0:
+            logger.info("End: generating local data")
+            logger.info("Begin: writing data to disk")
+
+        # parallel data save
+        # TODO: use checkpoint object here
+        data_size = N + overlap_size
+        data.mpi_save_data_to_h5(
+            comm, filename, data_size, local_data, local_clean_data, global_slice_data
+        )
+
+        if rank == 0:
+            logger.info("End: writing data to disk")
+    else:
+        # parallel data loading
+        if rank == 0:
+            logger.info("Begin: loading data and images")
+
+        f = h5py.File(filename, "r+", driver="mpio", comm=MPI.COMM_WORLD)
+        N = f["N"][()]
+        M = f["M"][()]
+        h = f["h"][()]
+        f.close()
+
+        overlap_size = np.array(ndims * [kernel_size], dtype="i")
+        overlap_size -= 1
+        data_size = N + overlap_size
+        tile_pixels = local_split_range_nd(grid_size, N, ranknd)
+        tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+
+        local_data_size, facet_size, facet_size_adj = calculate_local_data_size(
+            tile_size, ranknd, overlap_size, grid_size, backward=False
+        )
+        local_data = data.mpi_load_data_from_h5(
+            comm,
+            ranknd,
+            filename,
+            data_size,
+            local_data_size,
+            tile_pixels,
+            overlap_size,
+            var_name="data",
+            backward=False,
+        )
+        if rank == 0:
+            logger.info("End: loading data and images")
+
+        # parameters of the Gamma prior on the regularization parameter
+        a = 1e-3
+        b = 1e-3
+
+        # SPA sampler
+        rho1 = rho
+        rho2 = rho
+
+        alpha1 = alpha
+        alpha2 = alpha
+
+        # convolution nodel and checkpoint
+        sync_conv_model = SyncConvModel(
+            N, data_size, h, comm, grid_size, h.itemsize, False
+        )
+
+        if save_mode == "process":
+            # using one checkpointer per process (multiple files)
+            checkpointer = SerialCheckpoint("{}p{}_t=".format(checkpointname, rank))
+        elif save_mode == "single":
+            # ! not supported at the moment (need debugging + extension loading
+            # ! / saving)
+            # use a distributed checkpointer object (write in a single file)
+            checkpointer = DistributedCheckpoint(comm, checkpointname)
+        else:
+            raise ValueError(
+                r"Unkown save mode {}. Possible options are: 'single' or 'process'".format(
+                    save_mode
+                )
+            )
+
+        if rank == 0:
+            logger.info("Parameters defined, setup sampler")
+
+        if args.prof:
+            pr = cProfile.Profile()
+            pr.enable()
+
+        # parameter for the maximization of the marginal likelihood
+        Nbi_p = 2
+
+        spa_psgla_mpi(
+            local_data,
+            checkpointname,
+            checkpoint_frequency,
+            warmstart_iter,
+            monitor_frequency,
+            checkpointflag,
+            sync_conv_model,
+            checkpointer,
+            rho1,
+            rho2,
+            alpha1,
+            alpha2,
+            beta,
+            a,
+            b,
+            Nmc,
+            Nbi_p,
+            M,
+            seed,
+            logger,
+        )
+
+        if args.prof:
+            pr.disable()
+            # Dump results:
+            # - for binary dump
+            pr.dump_stats("debug/cpu_%d.prof" % comm.rank)
+            # - for text dump
+            with open("debug/cpu_%d.txt" % comm.rank, "w") as output_file:
+                sys.stdout = output_file
+                pr.print_stats(sort="time")
+                sys.stdout = sys.__stdout__
+
+
+if __name__ == "__main__":
+
+    comm = MPI.COMM_WORLD
+    rank = comm.Get_rank()
+
+    args = utils.args.parse_args()
+
+    if rank == 0:
+        Path(args.rpath).mkdir(parents=True, exist_ok=True)
+        Path(args.dpath).mkdir(parents=True, exist_ok=True)
+        Path("debug").mkdir(parents=True, exist_ok=True)
+        logger = logging.getLogger(__name__)
+
+        logging.basicConfig(
+            filename=args.logfile,
+            level=logging.INFO,
+            filemode="w",
+            format="%(asctime)s %(levelname)s %(message)s",
+        )
+
+        logger.info("Host: {}".format(MPI.Get_processor_name()))
+
+        if args.verbose:
+            logger.info("Verbosity: on")
+        if args.prof:
+            logger.info("Code profiling: on")
+        if args.data:
+            logger.info("Generate data")
+        else:
+            logger.info("AXDA sampling")
+        if args.load:
+            logger.info("Load checkpoint: {}".format(args.checkpointfile))
+
+    else:
+        logger = None
+
+    # * debugging values
+    # args.imfile = "img/cameraman.png"
+    # args.datafilename = "conv_data_cameraman_ds1_M30_k8"
+    # args.rpath = "results_conv_cameraman_ds1_M30_k8_h5"
+    # args.checkpointname = "checkpoint_t="
+    # args.restart = -1
+    # args.downsampling = 1
+    # args.data = False
+    # args.load = False
+    # args.save = "process"
+    # args.checkpoint_frequency = 10
+    # args.Nmc = 30
+    # args.kernel_size = 8
+    # args.M = 30
+    # Path(join(args.rpath, args.sampler)).mkdir(parents=True, exist_ok=True)
+    # args.rho = 1.
+    # args.alpha = 1.
+    # args.beta = 0.2
+
+    datafilename = join(args.dpath, args.datafilename + ".h5")
+    checkpointname = join(args.rpath, args.checkpointname)
+
+    # tau.run("""main(
+    #     comm,
+    #     rank,
+    #     args.data,
+    #     args.rpath,
+    #     args.imfile,
+    #     datafilename,
+    #     args.checkpointname,
+    #     checkpointfile,
+    #     logger,
+    #     profiling=args.prof,
+    #     alpha=args.alpha,
+    #     beta=args.beta,
+    #     Nmc=args.Nmc,
+    #     checkpoint_frequency=args.checkpoint_frequency,
+    #     monitor_frequency=5,
+    # )""")
+
+    main(
+        comm,
+        rank,
+        args.data,
+        args.rpath,
+        args.imfile,
+        datafilename,
+        checkpointname,
+        logger,
+        save_mode=args.save,
+        warmstart_iter=args.restart,
+        downsampling=args.downsampling,
+        checkpointflag=args.load,
+        profiling=args.prof,
+        M=args.M,
+        Nmc=args.Nmc,
+        rho=args.rho,
+        alpha=args.alpha,
+        beta=args.beta,
+        checkpoint_frequency=args.checkpoint_frequency,
+        monitor_frequency=1,
+        seed=args.seed,
+        kernel_size=args.kernel_size,
+    )
+
+    # tau.run(
+    #     """main(
+    #     comm,
+    #     rank,
+    #     args.data,
+    #     args.rpath,
+    #     args.imfile,
+    #     datafilename,
+    #     checkpointfile,
+    #     checkpointname,
+    #     logger,
+    #     warmstart_iter=args.restart,
+    #     downsampling=args.downsampling,
+    #     checkpointflag=args.load,
+    #     profiling=args.prof,
+    #     Nmc=args.Nmc,
+    #     alpha=args.alpha,
+    #     beta=args.beta,
+    #     checkpoint_frequency=args.checkpoint_frequency,
+    #     monitor_frequency=1,  # TODO: to be passed as an input
+    #     seed=args.seed,
+    # )"""
+    # )
+    # MPI.Finalize()
+
+    pass
+
+    # mpiexec -n 2 python main.py
+    # h5dump --header data/data.h5
+    # python -m cProfile -o output.prof path/to/your/script arg1 arg2
+    # python /path/to/your/script arg1 arg2 # when using modifications from the
+    # script above
+
+    # profiling with CProfile:
+    # mpiexec -n 2 python main.py --prof --verbose
+    # mpiexec -n 2 python -m mpi4py aaxda/main.py --prof --verbose
+    # mpiexec -n numprocs python -m mpi4py pyfile [arg] ...
+    #
+    # mpiexec -n 2 python -m mpi4py aaxda/main.py \
+    # --checkpointfile=checkpoint.h5 --checkpointname=checkpoint --data \
+    # --datafilename=data --dpath=data --imfile=img/boat.png \
+    # --logfile=std.log --rpath=results --verbose \
+    # --load --data
+
+    # mpiexec -n 2 python main.py | tee 2>&1 log_file.txt
+    # mpirun -np 2 tau_python -io ./ring
+
+    # another option documented here
+    # https://stackoverflow.com/questions/33503176/profile-parallelized-python-script-with-mpi4py
+    # mpiexec --tag-output -np 2 python -m cProfile main.py
+
+# %%
diff --git a/src/aaxda/main_serial.py b/src/aaxda/main_serial.py
new file mode 100644
index 0000000000000000000000000000000000000000..3dccbf466c4fac57b86380f7a5a6d17a757fb188
--- /dev/null
+++ b/src/aaxda/main_serial.py
@@ -0,0 +1,309 @@
+"""Main script to launch the serial SPA sampler instantied for a linear
+deconvolution problem.
+"""
+
+import cProfile
+import logging
+import sys
+from os.path import join
+from pathlib import Path
+from socket import gethostname
+
+import h5py
+import numpy as np
+import utils.args
+
+from aaxda.models.data import generate_2d_gaussian_kernel, generate_data, get_image
+from aaxda.models.models import SerialConvModel
+from aaxda.samplers.serial.spa_pmyula import spa_pmyula
+from aaxda.samplers.serial.spa_psgla import spa_psgla
+from aaxda.utils.checkpoint import SerialCheckpoint
+
+# import tau
+# https://forum.hdfgroup.org/t/crash-when-writing-parallel-compressed-chunks/6186
+
+
+def main(
+    bool_data,
+    results_path,
+    imagefilename,
+    filename,
+    checkpointfile,
+    checkpointname,
+    logger,
+    sampler="psgla",
+    warmstart_iter=0,
+    downsampling=1,
+    checkpointflag=False,
+    profiling=False,
+    M=30,
+    Nmc=1000,
+    rho=1.0,
+    alpha=1.0,
+    beta=1.0,
+    checkpoint_frequency=500,
+    monitor_frequency=5,
+    seed=1234,
+    kernel_size=8,
+):
+
+    # * loading / generating synthetic data
+    if bool_data:
+        logger.info("Data generation")
+
+        # data  parameters
+        kernel_std = 1
+
+        # load image and generate kernel
+        x = get_image(imagefilename, M)
+        x = x[::downsampling, ::downsampling]
+        N = np.array(x.shape, dtype="i")
+        h = generate_2d_gaussian_kernel(kernel_size, kernel_std)
+
+        logger.info("Begin: write image to disk")
+        logger.info("Image size {}x{}".format(*N))
+        f = h5py.File(filename, "w")
+        dset_N = f.create_dataset("N", [2], dtype="i")
+        dset_h = f.create_dataset("h", 2 * [kernel_size], dtype="d")
+        dset_M = f.create_dataset("M", [1], dtype="i")
+        dset_x = f.create_dataset("x", N, dtype="d")
+        dset_N[()] = N
+        dset_h[()] = h
+        dset_M[()] = M
+        dset_x[()] = x
+
+        del dset_N, dset_M, dset_h, dset_x
+
+        logger.info("End: write image to disk")
+        logger.info("Begin: load image")
+
+        # load image tile
+        overlap_size = np.array(h.shape, dtype="i")
+        overlap_size -= 1
+
+        logger.info("End: load image")
+        logger.info("Begin: generating local data")
+
+        # data generation
+        data, clean_data = generate_data(x, h)
+
+        logger.info("End: generating local data")
+        logger.info("Begin: writing data to disk")
+
+        # data save
+        data_size = N + overlap_size
+        dset_data = f.create_dataset("data", data_size, dtype="d")
+        dset_clean_data = f.create_dataset("clean_data", data_size, dtype="d")
+        dset_data[()] = data
+        dset_clean_data[()] = clean_data
+        f.close()
+
+        logger.info("End: writing data to disk")
+    else:
+        logger.info("Begin: loading data and images")
+
+        f = h5py.File(filename, "r")
+        N = f["N"][()]
+        M = f["M"][()]
+        h = f["h"][()]
+        data = f["data"][()]
+        f.close()
+        logger.info("End: loading data and images")
+
+        # parameters of the Gamma prior on the regularization parameter
+        a = 1e-3
+        b = 1e-3
+
+        # SPA sampler
+        rho1 = rho
+        rho2 = rho
+        alpha1 = alpha
+        alpha2 = alpha
+
+        logger.info("Parameters defined, setup sampler")
+
+        if args.prof:
+            pr = cProfile.Profile()
+            pr.enable()
+
+        # ! to be revised
+        data_size = np.array(data.shape, dtype="i")
+        serial_chkpt = SerialCheckpoint(checkpointname)
+
+        if sampler == "pmyula":
+            # sampler described in :cite:p:`Vono2019icassp`
+            # ! use circular convolution model (leads to sampling a 0-padded
+            # ! version of the image of interest)
+            serial_model = SerialConvModel(data_size, h, data_size)
+
+            rho3 = rho
+            alpha3 = alpha
+
+            spa_pmyula(
+                data,
+                checkpointname,
+                checkpoint_frequency,
+                warmstart_iter,
+                monitor_frequency,
+                checkpointflag,
+                serial_model,
+                serial_chkpt,
+                rho1,
+                rho2,
+                rho3,
+                alpha1,
+                alpha2,
+                alpha3,
+                beta,
+                a,
+                b,
+                Nmc,
+                M,
+                seed,
+                logger,
+            )
+        else:
+            # proposed sampler
+            # ! linear convolution model
+            serial_model = SerialConvModel(N, h, data_size)
+            spa_psgla(
+                data,
+                checkpointname,
+                checkpoint_frequency,
+                warmstart_iter,
+                monitor_frequency,
+                checkpointflag,
+                serial_model,
+                serial_chkpt,
+                rho1,
+                rho2,
+                alpha1,
+                alpha2,
+                beta,
+                a,
+                b,
+                Nmc,
+                M,
+                seed,
+                logger,
+            )
+
+        if args.prof:
+            pr.disable()
+            # Dump results:
+            # - for binary dump
+            pr.dump_stats("debug/cpu_serial.prof")
+            # - for text dump
+            with open("debug/cpu_serial.txt", "w") as output_file:
+                sys.stdout = output_file
+                pr.print_stats(sort="time")
+                sys.stdout = sys.__stdout__
+
+
+if __name__ == "__main__":
+
+    args = utils.args.parse_args()
+
+    Path(args.rpath).mkdir(parents=True, exist_ok=True)
+    Path(args.dpath).mkdir(parents=True, exist_ok=True)
+    Path("debug").mkdir(parents=True, exist_ok=True)
+    logger = logging.getLogger(__name__)
+
+    logging.basicConfig(
+        filename=args.logfile,
+        level=logging.INFO,
+        filemode="w",
+        format="%(asctime)s %(levelname)s %(message)s",
+    )
+
+    logger.info("Host: {}".format(gethostname()))
+
+    if args.verbose:
+        logger.info("Verbosity: on")
+    if args.prof:
+        logger.info("Code profiling: on")
+    if args.data:
+        logger.info("Generate data")
+    else:
+        logger.info("AXDA sampling")
+    if args.load:
+        logger.info("Load checkpoint: {}".format(args.checkpointfile))
+
+    # * debugging values
+    # args.imfile = "img/image_micro_8.h5"
+    # args.datafilename = "data_image_micro_8_ds1_M30_k8"
+    # args.rpath = "results_image_micro_8_ds1_M30_k8_h5"
+    # args.checkpointfile = "checkpoint_serial_t=500"
+    # args.checkpointname = "checkpoint_serial_t="
+    # args.restart = 500
+    # args.downsampling = 1
+    # args.data = False
+    # args.load = False
+    # args.sampler = "pmyula"
+    # args.checkpoint_frequency = 10
+    # args.Nmc = 30
+    # args.kernel_size = 8
+    # Path(join(args.rpath, args.sampler)).mkdir(parents=True, exist_ok=True)
+    # args.rho = 1.
+    # args.alpha = 1.
+    # args.beta = 0.2
+
+    datafilename = join(args.dpath, args.datafilename + ".h5")
+    checkpointfile = join(args.rpath, args.checkpointfile)
+    checkpointname = join(args.rpath, args.checkpointname)
+
+    # tau.run("""main(
+    #     args.data,
+    #     args.rpath,
+    #     args.imfile,
+    #     datafilename,
+    #     args.checkpointname,
+    #     checkpointfile,
+    #     logger,
+    #     profiling=args.prof,
+    #     alpha=args.alpha,
+    #     beta=args.beta,
+    #     Nmc=args.Nmc,
+    #     checkpoint_frequency=args.checkpoint_frequency,
+    #     monitor_frequency=5,
+    # )""")
+
+    main(
+        args.data,
+        args.rpath,
+        args.imfile,
+        datafilename,
+        checkpointfile,
+        checkpointname,
+        logger,
+        sampler=args.sampler,
+        warmstart_iter=args.restart,
+        downsampling=args.downsampling,
+        checkpointflag=args.load,
+        profiling=args.prof,
+        M=args.M,
+        Nmc=args.Nmc,
+        rho=args.rho,
+        alpha=args.alpha,
+        beta=args.beta,
+        checkpoint_frequency=args.checkpoint_frequency,
+        monitor_frequency=1,
+        seed=args.seed,
+        kernel_size=args.kernel_size,
+    )
+
+    pass
+
+    # h5dump --header data/data.h5
+    # python -m cProfile -o output.prof path/to/your/script arg1 arg2
+    # python /path/to/your/script arg1 arg2 # when using modifications from the
+    # script above
+
+    # profiling with CProfile:
+    # python main_serial.py --prof --verbose
+
+    # python aaxda/main_serial.py \
+    # --checkpointfile=checkpoint.h5 --checkpointname=checkpoint --data \
+    # --datafilename=data --dpath=data --imfile=img/boat.png \
+    # --logfile=std.log --rpath=results --verbose \
+    # --load --data
diff --git a/src/aaxda/models/__init__.py b/src/aaxda/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/aaxda/models/convolutions.py b/src/aaxda/models/convolutions.py
new file mode 100755
index 0000000000000000000000000000000000000000..48371c4d3df861bd70097ff27dfb7ab9d30faac2
--- /dev/null
+++ b/src/aaxda/models/convolutions.py
@@ -0,0 +1,176 @@
+"""Helper functions to implement the FFT-based convolution operator.
+"""
+import numpy as np
+
+
+def fft2_conv(x, h, shape=None):
+    r"""FFT-based 2d convolution.
+
+    Convolve the array ``x`` with the 2d kernel ``h`` using the FFT algorithm.
+    Performs linear or circular convolution depending on the padding needed to
+    reach the desired size ``shape``.
+
+    Parameters
+    ----------
+    x : numpy.ndarray
+        Input array (of size :math:`N`).
+    h : numpy.ndarray
+        Input convolution kernel (of size :math:`M`).
+    shape : tuple, int, optional
+        Desired convolution size (:math:`K \geq \max \{ N, M \}`), by default
+        None.
+
+    Returns
+    -------
+    y : numpy.ndarray
+        Output convolution.
+    fft_h : numpy.ndarray
+        Fourier transform of the convolution kernel ``h`` (of size :math:`K`).
+
+    Raises
+    ------
+    ValueError
+        ``x.shape`` and ``shape`` must have the same length.
+    ValueError
+        ``x.shape`` and ``h.shape`` must have the same length.
+
+    Note
+    ----
+    This function does not allow the adjoint convolution operator to be easily
+    encoded. See :func:`aaxda.models.convolutions.fft_conv` instead.
+    """
+    if shape is None:
+        shape = x.shape
+
+    if not len(x.shape) == len(shape):
+        raise ValueError("x.shape and shape must have the same length")
+
+    if not len(h.shape) == len(shape):
+        raise ValueError("x.shape and h.shape must have the same length")
+
+    if (x.dtype.kind == "c") or (h.dtype.kind == "c"):
+        fft_h = np.fft.fft2(h, shape)
+        y = np.fft.ifft2(fft_h * np.fft.fft2(x, shape))  # cropping handled separately
+    else:
+        fft_h = np.fft.rfft2(h, shape)
+        y = np.fft.irfft2(fft_h * np.fft.rfft2(x, shape), shape)
+
+    return y, fft_h
+
+
+def fft_conv(x, fft_h, shape):
+    r"""FFT-based nd convolution.
+
+    Convolve the array ``x`` with the kernel of Fourier transform ``fft_h``
+    using the FFT. Performs linear or circular convolution depending on
+    the 0-padding initially adopted for ``fft_h``.
+
+    Parameters
+    ----------
+    x : numpy.ndarray
+        Input array (of size :math:`N`).
+    fft_h : numpy.ndarray
+        Input kernel (of size
+        :math:`\lfloor K/2 \rfloor + 1` if real, :math:`K` otherwise).
+    shape : tuple[int]
+        Full shape of the convolution (referred to as :math:`K` above).
+
+    Returns
+    -------
+    y : numpy.ndarray
+        Convolution results.
+    """
+    # turn shape into a list if only given as a scalar
+    if np.isscalar(shape):
+        shape_ = [shape]
+    else:
+        shape_ = shape
+    if x.dtype.kind == "c":
+        y = np.fft.ifftn(fft_h * np.fft.fftn(x, shape_))
+    else:  # assuming h is a real kernel as well
+        y = np.fft.irfftn(fft_h * np.fft.rfftn(x, shape_), shape_)
+
+    return y
+
+
+if __name__ == "__main__":
+    import matplotlib.pyplot as plt
+    import scipy.signal as sg
+    from imageio import imread
+
+    from aaxda.models.models import SerialConvModel
+    from aaxda.models.padding import adjoint_padding, pad_array, pad_array_nd
+
+    # Generate 2D Gaussian convolution kernel
+    vr = 1
+    M = 7
+    if np.mod(M, 2) > 0:  # M odd
+        n = np.arange(-(M - 1) // 2, (M - 1) // 2 + 1)
+    else:
+        n = np.arange(-M // 2, M // 2)
+    h = np.exp(-(n**2 + n[:, np.newaxis] ** 2) / (2 * vr))
+
+    # plt.imshow(h, cmap=plt.cm.gray)
+    # plt.show()
+
+    x = imread("img/cameraman.png").astype(float)
+    N = x.shape
+    M = h.shape
+
+    # version 1: circular convolution
+    K = N
+    hpad = pad_array(h, K, padmode="after")  # after, using fft convention for center
+    yc, H = fft2_conv(x, hpad, K)
+
+    circ_conv = SerialConvModel(np.array(K, dtype="i"), h, np.array(K, dtype="i"))
+    yc2 = circ_conv.apply_direct_operator(x)
+    print("yc2 == yc ? {0}".format(np.allclose(yc2, yc)))
+
+    # plt.imshow(yc, cmap=plt.cm.gray)
+    # plt.show()
+
+    # check adjoint operator (circular convolution)
+    rng = np.random.default_rng(1234)
+    x_ = rng.standard_normal(N)
+    Hx_ = circ_conv.apply_direct_operator(x_)
+    y_ = rng.standard_normal(K)
+    Hadj_y_ = circ_conv.apply_adjoint_operator(y_)
+    hp1 = np.sum(Hx_ * y_)
+    hp2 = np.sum(x_ * Hadj_y_)
+
+    print(
+        "Correct adjoint operator (circular convolution)? {}".format(
+            np.isclose(hp1, hp2)
+        )
+    )
+
+    # version 2: linear convolution
+    # linear convolution
+    K = [N[n] + M[n] - 1 for n in range(len(N))]
+    H = np.fft.rfft2(h, K)
+    yl = np.fft.irfft2(H * np.fft.rfft2(x, K), K)  # zeros appear around
+
+    yl2 = fft_conv(x, H, K)
+    print("yl2 == yl ? {0}".format(np.allclose(yl2, yl)))
+
+    linear_conv = SerialConvModel(np.array(N, dtype="i"), h, np.array(K, dtype="i"))
+    yl3 = linear_conv.apply_direct_operator(x)
+    print("yl3 == yl ? {0}".format(np.allclose(yl3, yl)))
+
+    # plt.imshow(yl, cmap=plt.cm.gray)
+    # plt.show()
+
+    # check adjoint operator (linear convolution)
+    rng = np.random.default_rng(1234)
+    x_ = rng.standard_normal(N)
+    Hx_ = linear_conv.apply_direct_operator(x_)
+    y_ = rng.standard_normal(K)
+    Hadj_y_ = linear_conv.apply_adjoint_operator(y_)
+    hp1 = np.sum(Hx_ * y_)
+    hp2 = np.sum(x_ * Hadj_y_)
+
+    print(
+        "Correct adjoint operator (linear convolution)? {}".format(np.isclose(hp1, hp2))
+    )
+
+    pass
diff --git a/src/aaxda/models/data.py b/src/aaxda/models/data.py
new file mode 100644
index 0000000000000000000000000000000000000000..c19f2382316f5d296b9a35756482cface5bec5f8
--- /dev/null
+++ b/src/aaxda/models/data.py
@@ -0,0 +1,418 @@
+"""Helper functions to generate, save and load the synthetic data using several
+MPI processes. (To be simplified, e.g. with checkpoint and model objects).
+"""
+from os.path import splitext
+
+import h5py
+import numpy as np
+from mpi4py import MPI
+from PIL import Image
+from scipy.signal.windows import gaussian
+
+import aaxda.utils.communications as ucomm
+from aaxda.models.convolutions import fft_conv
+from aaxda.models.distributed_convolutions import create_local_to_global_slice
+from aaxda.models.prox import prox_nonegativity
+
+
+def get_image(imagefilename, M=None):
+
+    ext = splitext(imagefilename)[-1]
+
+    if ext == ".h5":
+        with h5py.File(imagefilename, "r") as f:
+            x = f["x"][()]
+    else:  # .png file by default
+        img = Image.open(imagefilename, "r")
+        x = np.asarray(img).astype(np.double)
+
+    # ! make sure no pixel is 0 or lower
+    if M is None:
+        M = np.max(x)
+    x[x <= 0] = np.min(x[x > 0])  # np.finfo(x.dtype).eps
+    x = M * x / np.max(x)
+    prox_nonegativity(x)
+
+    return x
+
+
+def generate_2d_gaussian_kernel(kernel_size, kernel_std):
+
+    # equivalent to fspecial('gaussian', ...) in Matlab
+    w = gaussian(kernel_size, kernel_std)
+    h = w[:, np.newaxis] * w[np.newaxis, :]
+    h = h / np.sum(h)
+    return h
+
+
+def generate_random_mask(image_size, percent, rng):
+    r"""Generate a random inpainting mask.
+
+    Parameters
+    ----------
+    mask_size : [type]
+        [description]
+    percent : float
+        Fraction of masked pixels, ``1-p`` corresponding to the fraction of
+        observed pixels in the image (``p`` needs to be nonnegative, smaller
+        than or equal to 1).
+    rng : numpy.random.Generator
+        Numpy random number generator.
+
+    Returns
+    -------
+    mask : numpy.ndarray of bool
+        Masking operator such that ``mask.shape == image_size``.
+
+    Raises
+    ------
+    ValueError
+        Fraction of observed pixels ``percent`` should be such that
+        :math:`0 \leq p \leq 1`.
+    """
+    if percent < 0 or percent > 1:
+        raise ValueError(
+            "Fraction of observed pixels percent should be such that: 0 <= percent <= 1."
+        )
+
+    N = np.prod(image_size)  # total number of pixels
+    masked_id = np.unravel_index(
+        rng.choice(N, (percent * N).astype(int), replace=False), image_size
+    )
+    mask = np.full(image_size, True, dtype=bool)
+    mask[masked_id] = False
+
+    return mask
+
+
+def generate_local_poisson_inpainting_data(
+    comm, cartcomm, grid_size, ranknd, tile, local_mask, tile_pixels
+):
+    # * communicator info
+    size = comm.Get_size()
+    rank = comm.Get_rank()
+
+    # * parallel rng
+    child_seed = None
+    if rank == 0:
+        ss = np.random.SeedSequence(1234)
+        child_seed = ss.spawn(size)
+    local_seed = comm.scatter(child_seed, root=0)
+    local_rng = np.random.default_rng(local_seed)
+
+    # * tile size and indices in the global image / data
+    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+    local_data_size = tile_size
+
+    # * generate masked data
+    Hx = np.zeros(tile_size, dtype="d")
+    Hx[local_mask] = tile[local_mask]
+    local_data = np.zeros(tile_size, dtype="d")
+    local_data[local_mask] = local_rng.poisson(Hx[local_mask])
+
+    # * compute number of observations across all the workers
+    global_data_size = np.zeros(1, dtype="i")
+    local_size = np.sum(local_mask, dtype="i")
+    comm.Allreduce(local_size, global_data_size, op=MPI.SUM)
+
+    # slice for indexing into global arrays
+    global_slice_tile = create_local_to_global_slice(
+        tile_pixels,
+        ranknd,
+        np.zeros(tile_size.size, dtype="i"),
+        local_data_size,
+    )
+    return local_data, Hx, global_slice_tile, global_data_size
+
+
+def generate_local_gaussian_inpainting_data(
+    comm, cartcomm, grid_size, ranknd, tile, local_mask, tile_pixels, isnr
+):
+    # * communicator info
+    size = comm.Get_size()
+    rank = comm.Get_rank()
+
+    # * parallel rng
+    child_seed = None
+    if rank == 0:
+        ss = np.random.SeedSequence(1234)
+        child_seed = ss.spawn(size)
+    local_seed = comm.scatter(child_seed, root=0)
+    local_rng = np.random.default_rng(local_seed)
+
+    # * tile size and indices in the global image / data
+    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+    local_data_size = tile_size
+
+    # * generate masked data
+    Hx = np.zeros(tile_size, dtype="d")
+    Hx[local_mask] = tile[local_mask]
+
+    # * compute noise variance across all workers
+    sqnorm_Hx = np.zeros(1, dtype="d")
+    local_sqnorm_Hx = np.sum(Hx[local_mask] ** 2)
+    global_data_size = np.zeros(1, dtype="i")
+    local_size = np.sum(local_mask, dtype="i")
+
+    comm.Allreduce(local_sqnorm_Hx, sqnorm_Hx, op=MPI.SUM)
+    comm.Allreduce(local_size, global_data_size, op=MPI.SUM)
+    sig2 = 10 ** (-isnr / 10) * sqnorm_Hx / global_data_size
+
+    # * generate noisy data
+    local_data = np.zeros(tile_size, dtype="d")
+    local_data[local_mask] = Hx[local_mask] + np.sqrt(sig2) * local_rng.standard_normal(
+        size=local_size
+    )
+
+    # * slice for indexing into global arrays
+    global_slice_tile = create_local_to_global_slice(
+        tile_pixels,
+        ranknd,
+        np.zeros(tile_size.size, dtype="i"),
+        local_data_size,
+    )
+    return local_data, Hx, global_slice_tile, global_data_size, sig2
+
+
+def mpi_load_image_from_h5(comm, grid_size, ranknd, filename, image_size, overlap_size):
+
+    ndims = image_size.size
+
+    # * useful sizes
+    tile_pixels = ucomm.local_split_range_nd(grid_size, image_size, ranknd)
+    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+    facet_pixels = ucomm.local_split_range_nd(
+        grid_size, image_size, ranknd, overlap=overlap_size, backward=False
+    )
+    facet_size = facet_pixels[:, 1] - facet_pixels[:, 0] + 1
+    offset = facet_size - tile_size
+
+    print(
+        "Process {}: facet_size={}, offset={}".format(
+            comm.Get_rank(), facet_size, offset
+        )
+    )
+
+    # * setup useful slices
+    # forward overlap
+    # local_slice_tile = tuple([np.s_[: tile_size[d]] for d in range(ndims)])
+    local_slice_tile = ucomm.get_local_slice(ranknd, grid_size, offset, backward=False)
+
+    global_slice_tile = tuple(
+        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    )
+
+    print(
+        "Process {}: local_slice_tile={}, offset={}".format(
+            comm.Get_rank(), local_slice_tile, offset
+        )
+    )
+
+    # * parallel loading
+    f = h5py.File(filename, "r", driver="mpio", comm=comm)
+    dset = f["x"]
+    facet = np.empty(facet_size, dtype="d")
+    dset.read_direct(
+        facet,
+        global_slice_tile,
+        local_slice_tile,
+    )
+    f.close()
+
+    return facet, tile_pixels
+
+
+def mpi_write_image_to_h5(
+    comm, grid_size, ranknd, filename, image_size, local_slice_tile, facet, var_name
+):
+
+    ndims = image_size.size
+
+    # * useful sizes
+    tile_pixels = ucomm.local_split_range_nd(grid_size, image_size, ranknd)
+
+    # * setup useful slices
+    global_slice_tile = tuple(
+        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    )
+
+    # * parallel write
+    f = h5py.File(filename, "w", driver="mpio", comm=comm)
+    dset = f.create_dataset(var_name, image_size, dtype="d")
+    dset.write_direct(
+        facet,
+        local_slice_tile,
+        global_slice_tile,
+    )
+    f.close()
+
+    pass
+
+
+def generate_data(x, h):
+    # serial version, linear convolution
+
+    # * rng
+    rng = np.random.default_rng(1234)
+
+    # local convolution
+    data_size = np.array(x.shape, dtype="i") + np.array(h.shape, dtype="i") - 1
+    fft_h = np.fft.rfftn(h, data_size)
+
+    # ! issue: need to make sure Hx >= 0, not necessarily the case numerically
+    # ! with a fft-based convolution
+    # https://github.com/pytorch/pytorch/issues/30934
+    # Hx = scipy.ndimage.convolve(x, h, output=Hx, mode='constant', cval=0.0)
+    # Hx = convolve2d(x, h, mode='full')
+    Hx = fft_conv(x, fft_h, data_size)
+    prox_nonegativity(Hx)
+    data = rng.poisson(Hx).astype(np.double)
+
+    return data, Hx
+
+
+def generate_local_data(
+    comm, cartcomm, grid_size, ranknd, facet, h, image_size, tile_pixels, backward=False
+):
+    # distributed version, linear convolution
+
+    # * communicator info
+    size = comm.Get_size()
+    rank = comm.Get_rank()
+    ndims = image_size.size
+    kernel_size = np.array(h.shape, dtype="i")
+    overlap_size = kernel_size - 1
+
+    # * parallel rng
+    child_seed = None
+    if rank == 0:
+        ss = np.random.SeedSequence(1234)
+        child_seed = ss.spawn(size)
+    local_seed = comm.scatter(child_seed, root=0)
+    local_rng = np.random.default_rng(local_seed)
+
+    # * facet / tile size and indices in the global image / data
+    # tile_pixels = ucomm.local_split_range_nd(grid_size, image_size, ranknd)
+    # facet_pixels = ucomm.local_split_range_nd(
+    #     grid_size, image_size, ranknd, overlap=overlap_size
+    # )
+    # facet_size = facet_pixels[:, 1] - facet_pixels[:, 0] + 1
+    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+    facet_size = np.array(facet.shape, dtype="i")
+
+    # backward overlap
+    # ! larger number of data points on the border
+    # local_data_size = (
+    #     tile_size + (ranknd == grid_size - 1) * overlap_size
+    # )
+    # forward overlap
+    local_data_size = tile_size + (ranknd == 0) * overlap_size
+    local_conv_size = facet_size + overlap_size
+
+    # * communications to compute the data (distributed convolution)
+    # facet = np.empty(facet_size, dtype="d")
+    (dest, src, resizedsendsubarray, resizedrecvsubarray,) = ucomm.setup_border_update(
+        cartcomm, ndims, facet.itemsize, facet_size, overlap_size, backward=False
+    )
+
+    for d in range(ndims):
+        comm.Sendrecv(
+            [facet, 1, resizedsendsubarray[d]],
+            dest[d],
+            recvbuf=[facet, 1, resizedrecvsubarray[d]],
+            source=src[d],
+        )
+
+    # * free custom types
+    # for d in range(ndims):
+    #     if isvalid_comm[d]:
+    #         resizedsendsubarray[d].Free()
+    #         resizedrecvsubarray[d].Free()
+
+    # local convolution
+    fft_h = np.fft.rfftn(h, local_conv_size)
+    # H = np.fft.rfft2(
+    #     np.fft.fftshift(padding.pad_array(h, N, padmode="around"))
+    # )  # doing as in Vono's reference code
+    local_coeffs = ucomm.slice_valid_coefficients(ranknd, grid_size, overlap_size)
+
+    # ! issue: need to make sure Hx >= 0, not necessarily the case numerically
+    # ! with a fft-based convolution
+    # https://github.com/pytorch/pytorch/issues/30934
+    # Hx = scipy.ndimage.convolve(facet, h, output=Hx, mode='constant', cval=0.0)
+    # Hx = convolve2d(facet, h, mode='full')[local_coeffs]
+    Hx = fft_conv(facet, fft_h, local_conv_size)[local_coeffs]
+    prox_nonegativity(Hx)
+    local_data = local_rng.poisson(Hx)
+
+    # slice for indexing into global arrays
+    global_slice_data = create_local_to_global_slice(
+        tile_pixels,
+        ranknd,
+        overlap_size,
+        local_data_size,
+        backward=backward,
+    )
+
+    return local_data, Hx, global_slice_data
+
+
+def mpi_save_data_to_h5(
+    comm, filename, data_size, local_data, local_clean_data, global_slice_data
+):
+
+    f = h5py.File(filename, "r+", driver="mpio", comm=comm)
+    dset = f.create_dataset("data", data_size, dtype="d")
+    dset[global_slice_data] = local_data
+
+    dset = f.create_dataset("clean_data", data_size, dtype="d")
+    dset[global_slice_data] = local_clean_data
+    f.close()
+
+    return
+
+
+def mpi_load_data_from_h5(
+    comm,
+    ranknd,
+    filename,
+    data_size,
+    local_data_size,
+    tile_pixels,
+    overlap_size,
+    var_name="data",
+    backward=True,
+):
+
+    ndims = data_size.size
+
+    # slice for indexing into global arrays
+    # global_slice_data = tuple(
+    #     [
+    #         np.s_[tile_pixels[d, 0] : tile_pixels[d, 0] + local_data_size[d]]
+    #         for d in range(ndims)
+    #     ]
+    # )
+
+    local_slice = tuple(ndims * [np.s_[:]])
+    global_slice_data = create_local_to_global_slice(
+        tile_pixels, ranknd, overlap_size, local_data_size, backward=backward
+    )
+
+    # loading data
+    f = h5py.File(filename, "r", driver="mpio", comm=comm)
+    dset = f[var_name]
+    local_data = np.empty(local_data_size, dtype="d")
+    dset.read_direct(
+        local_data,
+        global_slice_data,
+        local_slice,
+    )
+    f.close()
+
+    return local_data
+
+
+if __name__ == "__main__":
+
+    pass
diff --git a/src/aaxda/models/distributed_convolutions.py b/src/aaxda/models/distributed_convolutions.py
new file mode 100644
index 0000000000000000000000000000000000000000..e87ec577f54cd52c76e4a0578a8dbd31a9c93539
--- /dev/null
+++ b/src/aaxda/models/distributed_convolutions.py
@@ -0,0 +1,96 @@
+""" Set of helper functions to implement a distributed convolution operator
+and its adjoint.
+"""
+import numpy as np
+
+
+def calculate_local_data_size(
+    tile_size, ranknd, overlap_size, grid_size, backward=True
+):
+    r"""Compute the size of the chunk of convolution hold by the current
+    worker.
+
+    Parameters
+    ----------
+    tile_size : numpy.ndarray[int]
+        Size of the non-overlapping image tile underlying the overlapping
+        facets.
+    ranknd : numpy.ndarray[int]
+        Rank of the current process in the nD grid of MPI processes.
+    overlap_size : numpy.ndarray[int]
+        Size of the overlap along each dimension.
+    grid_size : numpy.ndarray[int]
+        Number of processes along each dimension of the nD MPI process grid.
+    backward : bool, optional
+        Direction of the overlap in the cartesian grid along all the
+        dimensions (forward or backward overlap), by default True.
+
+    Returns
+    -------
+    local_data_size : numpy.ndarray[int]
+        Size of the local chunk of the data owned by the current process.
+    facet_size : numpy.ndarray[int]
+        Size of the overlapping facet handled by the current process (direct
+        operator).
+    facet_size_adj : numpy.ndarray[int]
+        Size of the overlapping facet handled by the current process (adjoint
+        operator).
+    """
+
+    if backward:
+        local_data_size = tile_size + (ranknd == grid_size - 1) * overlap_size
+        facet_size = tile_size + (ranknd > 0) * overlap_size
+        facet_size_adj = local_data_size + (ranknd < grid_size - 1) * overlap_size
+    else:
+        local_data_size = tile_size + (ranknd == 0) * overlap_size
+        facet_size = tile_size + (ranknd < grid_size - 1) * overlap_size
+        facet_size_adj = local_data_size + (ranknd > 0) * overlap_size
+
+    return local_data_size, facet_size, facet_size_adj
+
+
+def create_local_to_global_slice(
+    tile_pixels, ranknd, overlap_size, local_data_size, backward=True
+):
+    r"""Create a slice object to place a local chunk of the convolution into
+    the full array structure.
+
+    Parameters
+    ----------
+    tile_pixels : numpy.ndarray[int]
+        Index of the first pixel into non-overlapping pixel tile.
+    ranknd : numpy.ndarray[int]
+        Rank of the current process in the nD grid of MPI processes.
+    overlap_size : numpy.ndarray[int]
+        Size of the overlap along each dimension.
+    local_data_size : numpy.ndarray[int]
+        Size of the local chunk of the data owned by the current process.
+    backward : bool, optional
+        Direction of the overlap in the cartesian grid along all the
+        dimensions (forward or backward overlap), by default True.
+
+    Returns
+    -------
+    global_slice : tuple[slice]
+        Tuple slice to place the local chunk of convolution data into an array
+        representing the full convolution.
+    """
+
+    ndims = np.size(ranknd)
+
+    # ! offset required only when using forward overlap
+    offset = np.logical_and(not backward, ranknd > 0) * overlap_size
+
+    global_slice = tuple(
+        [
+            np.s_[
+                tile_pixels[d, 0]
+                + offset[d] : tile_pixels[d, 0]
+                + offset[d]
+                + local_data_size[d]
+            ]
+            for d in range(ndims)
+        ]
+    )
+
+    return global_slice
diff --git a/src/aaxda/models/jtv.py b/src/aaxda/models/jtv.py
new file mode 100755
index 0000000000000000000000000000000000000000..0534c437879e9072bd205c6e5cd575e20447f678
--- /dev/null
+++ b/src/aaxda/models/jtv.py
@@ -0,0 +1,450 @@
+"""Implementation of the 2d discrete anistropic total variation and a smoothed
+variant with numba jit support.
+"""
+import numpy as np
+from numba import jit
+
+# ? note: apparently, @njit not compatible with np.r_ or np.c_
+# ? np.diff not compatible with @jit (at least the axis keyword...)
+# ! need to remove keywords for proper jitting
+# ! does not support concatenation over a new dimension (cannot use np.newaxis)
+# ! does not support type elision
+# ! only jit costly parts (by decomposing function), keep flexibility of Python
+# ! as much as possible
+# import importlib
+# importlib.reload(...)
+
+# TODO: investigate jitted nD version for the TV (not only 2D)
+# TODO: try to simplify the 2d implementation of the chunked version of the TV
+# TODO (many conditions to be checked at the moment)
+
+# * Useful numba links
+# https://stackoverflow.com/questions/57662631/vectorizing-a-function-returning-tuple-using-numba-guvectorize
+# https://stackoverflow.com/questions/30363253/multiple-output-and-numba-signatures
+# https://numba.pydata.org/numba-doc/0.17.0/reference/types.html
+
+
+@jit(nopython=True, cache=True)
+def gradient_2d(x):
+    r"""Compute 2d discrete gradient (with jit support).
+
+    Compute the 2d discrete gradient of a 2d input array :math:`\mathbf{x}`,
+    *i.e.*, by computing horizontal and vertical differences (using jit compilation):
+
+    .. math::
+       \nabla(\mathbf{x}) = (\nabla_v\mathbf{x}, \mathbf{x}\nabla_h).
+
+    Parameters
+    ----------
+    x : numpy.ndarray, 2d
+        Input 2d array :math:`\mathbf{x}`.
+
+    Returns
+    -------
+    uh : numpy.ndarray
+        Horizontal differences.
+    uv : numpy.ndarray
+        Vertical differences.
+    """
+    assert len(x.shape) == 2, "gradient_2d: Invalid input, expected len(x.shape)==2"
+    uh = np.zeros_like(x)
+    uh[:, :-1] = x[:, 1:] - x[:, :-1]  # np.diff(x,1,1) horizontal differences
+    uv = np.zeros_like(x)
+    uv[:-1, :] = x[1:, :] - x[:-1, :]  # np.diff(x,1,0) vertical differences
+    return uh, uv
+
+
+@jit(nopython=True, cache=True)
+def gradient_2d_adjoint(uh, uv):
+    r"""Adjoint of the 2d discrete gradient operator (with jit support).
+
+    Compute the adjoint of the 2d discrete gradient of a 2d input array
+    :math:`\mathbf{x}` (using jit compilation),
+
+    .. math::
+       \nabla^*(\mathbf{y}) = - \text{div} (\mathbf{y})
+       = \nabla_v^*\mathbf{y}_v + \mathbf{y}_h\nabla_h^*.
+
+    Parameters
+    ----------
+    uh : numpy.ndarray, 2d
+        Horizontal differences.
+    uv : numpy.ndarray, 2d
+        Vertical differences.
+
+    Returns
+    -------
+    v : numpy.ndarray
+        Adjoint of the 2d gradient operator, evaluated in
+        :math:`(\mathbf{u}_h, \mathbf{u}_v)`.
+    """
+    assert (len(uh.shape) == 2) and (
+        len(uv.shape) == 2
+    ), "gradient_2d_adjoint: Invalid input, expected len(uh.shape)==len(uv.shape)==2"
+    # horizontal, vertical
+    v = np.zeros_like(uh)
+    v[0, :] = -uv[0, :]
+    v[1:-1, :] = uv[:-2, :] - uv[1:-1, :]  # -np.diff(uv[:-1,:],1,0)
+    v[-1, :] = uv[-2, :]
+    v[:, 0] -= uh[:, 0]
+    v[:, 1:-1] += uh[:, :-2] - uh[:, 1:-1]  # -np.diff(uv[:,:-1],1,1)
+    v[:, -1] += uh[:, -2]
+    return v
+
+
+# ! try to simplify the structure of this function make sure uh and uv are
+# ! directly concatenated
+# @jit(nopython=True, cache=True)
+@jit(
+    [
+        "UniTuple(float64[:,:], 2)(float64[:,:], b1[:])",
+        "UniTuple(complex128[:,:], 2)(complex128[:,:], b1[:])",
+    ],
+    nopython=True,
+    cache=True,
+)
+def chunk_gradient_2d(x, islast):
+    r"""Chunk of the 2d discrete gradient (with jit support).
+
+    Compute a chunk of the 2d discrete gradient operator (using jit
+    compilation).
+
+    Parameters
+    ----------
+    x : numpy.ndarray[float64 or complex128], 2d
+        Input array.
+    islast : numpy.ndarray, bool, 1d
+        Vector indicating whether the chunk is the last one along each
+        dimension of the Cartesian process grid.
+
+    Returns
+    -------
+    uh : numpy.ndarray[float64 or complex128], 2d
+        Horizontal differences.
+    uv : numpy.ndarray[float64 or complex128], 2d
+        Vertical differences.
+    """
+    assert (
+        len(x.shape) == 2 and islast.size == 2
+    ), "gradient_2d: Invalid input, expected len(x.shape)==len(offset.shape)==2"
+    # horizontal differences
+    if islast[1]:  # true if facet is the last along dimension 1
+        if islast[0]:
+            uh = np.zeros(x.shape, dtype=x.dtype)
+            uh[:, :-1] = x[:, 1:] - x[:, :-1]
+        else:
+            uh = np.zeros((x.shape[0] - 1, x.shape[1]), dtype=x.dtype)
+            uh[:, :-1] = x[:-1, 1:] - x[:-1, :-1]
+    else:
+        if islast[0]:
+            uh = x[:, 1:] - x[:, :-1]
+        else:
+            uh = x[:-1, 1:] - x[:-1, :-1]
+
+    # vertical differences
+    if islast[0]:  # true if facet is the last along dimension 0
+        if islast[1]:
+            uv = np.zeros(x.shape, dtype=x.dtype)
+            uv[:-1, :] = x[1:, :] - x[:-1, :]
+        else:
+            uv = np.zeros((x.shape[0], x.shape[1] - 1), dtype=x.dtype)
+            uv[:-1, :] = x[1:, :-1] - x[:-1, :-1]
+    else:
+        if islast[1]:
+            uv = x[1:, :] - x[:-1, :]
+        else:
+            uv = x[1:, :-1] - x[:-1, :-1]
+
+    return uh, uv
+
+
+# ! try to simplify the structure of this function make sure uh and uv are
+# ! directly concatenated
+# @jit(nopython=True, cache=True)
+@jit(
+    [
+        "void(f8[:,:], f8[:,:], f8[:,:], b1[:], b1[:])",
+        "void(c16[:,:], c16[:,:], c16[:,:], b1[:], b1[:])",
+    ],
+    nopython=True,
+    cache=True,
+)
+def chunk_gradient_2d_adjoint(uh, uv, v, isfirst, islast):
+    r"""Chunk of the adjoint 2d discrete gradient (with jit support).
+
+    Compute a chunk of the adjoint 2d discrete gradient.
+
+    Parameters
+    ----------
+    uh : numpy.ndarray[float64 or complex128], 2d
+        Horizontal differences.
+    uv : numpy.ndarray[float64 or complex128], 2d
+        Vertical differences.
+    v : numpy.ndarray[float64 or complex128], 2d
+        Output array (updated in-place).
+    isfirst : numpy.ndarray, bool, 1d
+        Vector indicating whether the chunk is the first one along each
+        dimension of the Cartesian process grid.
+    islast : numpy.ndarray, bool, 1d
+        Vector indicating whether the chunk is the last one along each
+        dimension of the Cartesian process grid.
+
+    ..note::
+        The array ``v`` is updated in-place.
+    """
+    # TODO: implicit conditions on the size of the facet: needs to be chekced
+    # TODO  before-hand
+    # TODO: find a simpler way to encode this
+
+    assert (len(uh.shape) == 2) and (
+        len(uv.shape) == 2
+    ), "gradient_2d_adjoint: Invalid input, expected \
+        len(uh.shape)==len(uv.shape)==len(offset.shape)==2"
+    # v = np.zeros_like(uh, shape=Nk)
+
+    # vertical
+    # overlap from the left
+    if isfirst[0]:
+        if isfirst[1]:
+            v[0, :] -= uv[0, :]
+            if islast[0]:
+                v[1:-1, :] += uv[:-2, :] - uv[1:-1, :]
+                v[-1, :] += uv[-2, :]
+            else:
+                v[1:, :] += uv[:-1, :] - uv[1:, :]
+        else:
+            v[0, :] -= uv[0, 1:]
+            if islast[0]:
+                v[1:-1, :] += uv[:-2, 1:] - uv[1:-1, 1:]
+                v[-1, :] += uv[-2, 1:]
+            else:
+                v[1:, :] += uv[:-1, 1:] - uv[1:, 1:]
+    else:
+        if isfirst[1]:
+            if islast[0]:
+                v[:-1, :] += uv[:-2, :] - uv[1:-1, :]
+                v[-1, :] += uv[-2, :]
+            else:
+                v += uv[:-1, :] - uv[1:, :]
+        else:
+            if islast[0]:
+                v[:-1, :] += uv[:-2, 1:] - uv[1:-1, 1:]
+                v[-1, :] += uv[-2, 1:]
+            else:
+                v += uv[:-1, 1:] - uv[1:, 1:]
+
+    # horizontal
+    if isfirst[1]:
+        if isfirst[0]:
+            v[:, 0] -= uh[:, 0]
+            if islast[1]:
+                v[:, 1:-1] += uh[:, :-2] - uh[:, 1:-1]
+                v[:, -1] += uh[:, -2]
+            else:
+                v[:, 1:] += uh[:, :-1] - uh[:, 1:]
+        else:
+            v[:, 0] -= uh[1:, 0]
+            if islast[1]:
+                v[:, 1:-1] += uh[1:, :-2] - uh[1:, 1:-1]
+                v[:, -1] += uh[1:, -2]
+            else:
+                v[:, 1:] += uh[1:, :-1] - uh[1:, 1:]
+    else:
+        if isfirst[0]:
+            if islast[1]:
+                v[:, :-1] += uh[:, :-2] - uh[:, 1:-1]
+                v[:, -1] += uh[:, -2]
+            else:
+                v += uh[:, :-1] - uh[:, 1:]
+        else:
+            if islast[1]:
+                v[:, :-1] += uh[1:, :-2] - uh[1:, 1:-1]
+                v[:, -1] += uh[1:, -2]
+            else:
+                v += uh[1:, :-1] - uh[1:, 1:]
+    return
+
+
+@jit(nopython=True, cache=True)
+def tv(x):
+    r"""Discrete anisotropic total variation (TV) (with jit support).
+
+    Compute the discrete anisotropic total variation of a 2d array (using
+    jit compilation):
+
+    .. math::
+       \text{TV}(\mathbf{x}) = \Vert \nabla (\mathbf{x}) \Vert_{2, 1},
+
+    where :math:`\nabla` is the 2d discrete gradient operator.
+
+    Parameters
+    ----------
+    x : numpy.ndarray, 2d
+        Input array.
+
+    Returns
+    -------
+    float
+        Total variation evaluated in ``x``.
+    """
+    u = gradient_2d(x)
+    return np.sum(np.sqrt(np.abs(u[0]) ** 2 + np.abs(u[1]) ** 2))
+
+
+@jit(nopython=True, cache=True)
+def smooth_tv(x, eps):
+    r"""Smooth approximation to the 2d discrete total variation (TV)
+    (with jit support).
+
+    Compute a smooth approximation to the discrete anisotropic total variation
+    of a 2d array (with jit support):
+
+    .. math::
+        \text{TV}_{\varepsilon}(\mathbf{x}) = \sum_{n=1}^N \sum_{m=1}^M \sqrt{
+        [\nabla(\mathbf{x})]_{1, m, n}^2 + [\nabla(\mathbf{x})]_{2, m, n}^2
+        + \varepsilon}, \; \varepsilon > 0,
+
+    where :math:`\nabla` is the 2d discrete gradient operator.
+
+    Parameters
+    ----------
+    x : numpy.ndarray, 2d
+        Input array.
+    eps : float, > 0
+        Smoothing parameter.
+
+    Returns
+    -------
+    float
+        Smooth TV evaluated in ``x``, :math:`\text{TV}_{\varepsilon}(\mathbf{x})`.
+    """
+    u = gradient_2d(x)
+    return np.sum(np.sqrt(np.abs(u[0]) ** 2 + np.abs(u[1]) ** 2 + eps))
+
+
+@jit(nopython=True, cache=True)
+def gradient_smooth_tv(x, eps):
+    r"""Jitted gradient of a smoothed 2d anisotropic total variation (with jit
+    support).
+
+    Compute the gradient of a smooth approximation to the 2d discrete
+    anisotropic total variation, evaluated in the input array `x` (with jit
+    support).
+
+    Parameters
+    ----------
+    x : numpy.ndarray, 2d
+        Input array.
+    eps : float, > 0
+        Smoothing parameter.
+
+    Returns
+    -------
+    numpy.ndarray, 2d
+        Gradient of :math:`\text{TV}_\varepsilon`, evaluated in ``x``.
+    """
+    u = gradient_2d(x)
+    w = np.sqrt(np.abs(u[0]) ** 2 + np.abs(u[1]) ** 2 + eps)
+    return gradient_2d_adjoint(u[0] / w, u[1] / w)
+
+
+if __name__ == "__main__":
+    import timeit
+    from inspect import cleandoc  # handle identation in multi-line strings
+
+    rng = np.random.default_rng(1234)
+    x = rng.standard_normal((10, 5))
+    eps = np.finfo(float).eps
+
+    stmt_s = "tv.gradient_smooth_tv(x, eps)"
+    setup_s = cleandoc(
+        """
+    import tv
+    import from __main__ import x, eps
+    """
+    )
+    t_tv_np = timeit.timeit(
+        stmt_s, number=100, globals=globals()
+    )  # save in pandas (prettier display)
+    print("TV (numpy version): ", t_tv_np)
+
+    _ = gradient_smooth_tv(x, eps)  # trigger numba compilation
+    stmt_s = "gradient_smooth_tv(x, eps)"
+    setup_s = "from __main__ import gradient_smooth_tv, x, eps"
+    t_tv_numba = timeit.timeit(stmt_s, setup=setup_s, number=100)
+    print("TV (numba version): ", t_tv_numba)
+
+    # ! multiple facets (serial test)
+    # uh0, uv0 = gradient_2d(x)
+
+    # yh0 = (1 + 1j) * rng.standard_normal(uh0.shape)
+    # yv0 = (1 + 1j) * rng.standard_normal(uv0.shape)
+    # z0 = gradient_2d_adjoint(yh0, yv0)
+
+    # ndims = 2
+    # grid_size = np.array([3, 2], dtype="i")
+    # nchunks = np.prod(grid_size)
+    # N = np.array(x.shape, dtype="i")
+
+    # overlap = (grid_size > 1).astype(int)
+    # isfirst = np.empty((nchunks, ndims), dtype=bool)
+    # islast = np.empty((nchunks, ndims), dtype=bool)
+
+    # range0 = []
+    # range_direct = []
+    # range_adjoint = []
+    # uh = np.zeros(x.shape)
+    # uv = np.zeros(x.shape)
+    # z = np.zeros(x.shape, dtype=complex)
+
+    # for k in range(nchunks):
+
+    #     ranknd = np.array(np.unravel_index(k, grid_size), dtype="i")
+    #     islast[k] = ranknd == grid_size - 1
+    #     isfirst[k] = ranknd == 0
+    #     range0.append(ucomm.local_split_range_nd(grid_size, N, ranknd))
+    #     Nk = range0[k][:,1] - range0[k][:,0] + 1
+
+    #     # * direct operator
+    #     # version with backward overlap
+    #     # range_direct.append(ucomm.local_split_range_nd(grid_size, N, ranknd, overlap=overlap, backward=True))
+    #     # facet = x[range_direct[k][0,0]:range_direct[k][0,1]+1, \
+    #     # range_direct[k][1,0]:range_direct[k][1,1]+1]
+    #     # uh_k, uv_k = chunk_gradient_2d(facet, islast[k])
+
+    #     # start = range_direct[k][:,0]
+    #     # stop = start + np.array(uv_k.shape, dtype="i")
+    #     # uv[start[0]:stop[0], start[1]:stop[1]] = uv_k
+    #     # stop = start + np.array(uh_k.shape, dtype="i")
+    #     # uh[start[0]:stop[0], start[1]:stop[1]] = uh_k
+
+    #     # version with forward overlap
+    #     range_direct.append(ucomm.local_split_range_nd(grid_size, N, ranknd, overlap=overlap, backward=False))
+    #     facet = x[range_direct[k][0,0]:range_direct[k][0,1]+1, \
+    #     range_direct[k][1,0]:range_direct[k][1,1]+1]
+    #     uh_k, uv_k = chunk_gradient_2d(facet, islast[k])
+
+    #     start = range0[k][:,0]
+    #     stop = start + np.array(uv_k.shape, dtype="i")
+    #     uv[start[0]:stop[0], start[1]:stop[1]] = uv_k
+    #     stop = start + np.array(uh_k.shape, dtype="i")
+    #     uh[start[0]:stop[0], start[1]:stop[1]] = uh_k
+
+    #     # * adjoint (backward overlap only, forward more difficult to encode)
+    #     range_adjoint.append(ucomm.local_split_range_nd(grid_size, N, ranknd, overlap=overlap, backward=True))
+    #     facet_h = yh0[range_adjoint[k][0,0]:range_adjoint[k][0,1]+1, \
+    #     range_adjoint[k][1,0]:range_adjoint[k][1,1]+1]
+    #     facet_v = yv0[range_adjoint[k][0,0]:range_adjoint[k][0,1]+1, \
+    #     range_adjoint[k][1,0]:range_adjoint[k][1,1]+1]
+    #     x_k = np.zeros(Nk, dtype=complex)
+    #     chunk_gradient_2d_adjoint(facet_h, facet_v, x_k, isfirst[k], islast[k])
+
+    #     start = range0[k][:,0]
+    #     stop = start + Nk
+    #     z[start[0]:stop[0], start[1]:stop[1]] = x_k
+
+    # print(np.allclose(uh, uh0))
+    # print(np.allclose(uv, uv0))
+    # print(np.allclose(z, z0))
+
+    pass
diff --git a/src/aaxda/models/model_slice.py b/src/aaxda/models/model_slice.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c5a42277602b76b2d22e0fc9db0c810fd0a986d
--- /dev/null
+++ b/src/aaxda/models/model_slice.py
@@ -0,0 +1,41 @@
+# from abc import ABCMeta, abstractmethod
+import numpy as np
+from communications import (
+    get_local_slice,
+    local_split_range_nd,
+    slice_valid_coefficients,
+)
+
+
+class ModelSlice:  # metaclass=ABCMeta
+    def __init__(self, nchunks, N, index, overlap=0, backward=True):
+        self.nchunks = nchunks
+        self.array_size = N
+        self.index = index
+        self.index_nd = np.array(np.unravel_index(index, nchunks), dtype="i")
+        self.overlap = overlap
+        self.backward = backward
+        self.ndims = nchunks.size
+
+        # self.local_to_global_slice = self.ndims * [np.s_[:]]
+        # self.global_to_global_slice = self.ndims * [np.s_[:]]
+
+        # ! alternative: define slices as None, only instantiate in daughter
+        # ! classes
+        self.local_to_global_slice = local_split_range_nd(
+            self.nchunks, self.array_size, self.index, self.overlap, self.backward
+        )
+        self.global_to_local_slice = get_local_slice(
+            self.index_nd, self.grid_size, self.overlap_size, self.backward
+        )
+
+
+class LinearConvolutionSlice(ModelSlice):
+    def __init__(self, nchunks, N, index, overlap=0, backward=True):
+        super(ModelSlice, self).__init__(nchunks, N, index, overlap, backward)
+        self.local_valid_coefficients = slice_valid_coefficients(
+            self.index_nd, self.nchunks, self.overlap_size
+        )
+
+
+# TODO: check again circular convolution case (to be documented)
diff --git a/src/aaxda/models/models.py b/src/aaxda/models/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0bc0bbd468ed5bff30e03d75d56855720db916c
--- /dev/null
+++ b/src/aaxda/models/models.py
@@ -0,0 +1,606 @@
+"""Base model class, given with a serial and distributed linear convolution
+operator.
+"""
+from abc import ABC, abstractmethod
+
+import numpy as np
+from mpi4py import MPI
+
+import aaxda.utils.communications as ucomm
+import aaxda.utils.communicators as comms
+from aaxda.models.convolutions import fft_conv
+from aaxda.models.distributed_convolutions import calculate_local_data_size
+
+
+class BaseModel(ABC):
+    r"""Base model object gathering the parameters of the measurement operator
+    underlying the inverse problem to be solved.
+
+    Attributes
+    ----------
+    image_size : numpy.ndarray of int, of size ``d``
+        Full image size.
+    data_size : numpy.ndarray of int, of size ``d``
+        Full data size.
+    """
+
+    def __init__(
+        self,
+        image_size,
+        data_size,
+    ):
+        """BaseModel constructor.
+
+        Parameters
+        ----------
+        image_size : numpy.ndarray of int, of size ``d``
+            Full image size.
+        data_size : numpy.ndarray of int
+            Full data size.
+        """
+        # if not image_size.size == data_size.size:
+        #     raise ValueError(
+        #         "image_size and data_size must have the same number of elements"
+        #     )
+        self.image_size = image_size
+        self.data_size = data_size
+        self.ndims = image_size.size
+
+    @abstractmethod
+    def apply_direct_operator(self, input_image):  # pragma: no cover
+        r"""Implementation of the direct operator to update the input array
+        ``input_image`` (from image to data space).
+
+        Parameters
+        ----------
+        input_image : numpy.ndarray
+            Input array (image space).
+
+        Returns
+        -------
+        NotImplemented
+
+        Note
+        ----
+        The method needs to be implemented in any class inheriting from
+        BaseCommunicator.
+        """
+        return NotImplemented
+
+    @abstractmethod
+    def apply_adjoint_operator(self, input_data):  # pragma: no cover
+        r"""Implementation of the adjoint operator to update the input array
+        ``input_data`` (from data to image space).
+
+        Parameters
+        ----------
+        input_data : numpy.ndarray
+            Input array (data space).
+
+        Returns
+        -------
+        NotImplemented
+
+        Note
+        ----
+        The method needs to be implemented in any class inheriting from
+        BaseCommunicator.
+        """
+        return NotImplemented
+
+
+# * Convolution model object (serial and distributed)
+# ! keep kernel out of the object? (e.g., for blind deconvolution)
+class SerialConvModel(BaseModel):
+    r"""Model object implementing a serial convolution operator.
+
+    Attributes
+    ----------
+    image_size : numpy.ndarray of int, of size ``d``
+        Full image size.
+    kernel : numpy.ndarray
+        Input kernel. The array should have ``d`` axis, such that
+        ``kernel.shape[i] < image_size[i]`` for ``i in range(d)``.
+    data_size : empty numpy.ndarray of int, of size ``d``
+        Full data size.
+        - If ``data_size == image_size``: circular convolution;
+        - If ``data_size == image_size + kernel_size - 1``: linear convolution.
+    """
+
+    # TODO: make sure the interface works for both linear and circular
+    # TODO- convolutions
+    def __init__(
+        self,
+        image_size,
+        kernel,
+        data_size,
+    ):
+        r"""SerialConvModel constructor.
+
+        Parameters
+        ----------
+        image_size : numpy.ndarray of int, of size ``d``
+            Full image size.
+        kernel : numpy.ndarray of float
+            Input kernel. The array should have ``d`` axis, such that
+            ``kernel.shape[i] < image_size[i]`` for ``i in range(d)``.
+        data_size : empty numpy.ndarray of int, of size ``d``
+            Full data size.
+            - If ``data_size == image_size``: circular convolution;
+            - If ``data_size == image_size + kernel_size - 1``: linear convolution.
+
+        Raises
+        ------
+        ValueError
+            ``image_size`` and ``data_size`` must have the same number of
+            elements.
+        ValueError
+            ``kernel`` should have ``ndims = len(image_size)`` dimensions.
+        TypeError
+            Only real-valued kernel supported.
+        """
+        if not image_size.size == data_size.size:
+            raise ValueError(
+                "image_size and data_size must have the same number of elements"
+            )
+        super(SerialConvModel, self).__init__(image_size, data_size)
+
+        if not len(kernel.shape) == self.ndims:
+            raise ValueError("kernel should have ndims = len(image_size) dimensions")
+
+        if kernel.dtype.kind == "c":
+            raise TypeError("only real-valued kernel supported")
+
+        self.kernel = kernel
+        self.fft_kernel = np.fft.rfftn(self.kernel, self.data_size)
+        self.valid_coefficients = tuple(
+            [np.s_[: self.image_size[d]] for d in range(self.ndims)]
+        )
+
+    def apply_direct_operator(self, input_image):
+        r"""Implementation of the direct operator to update the input array
+        ``input_image`` (from image to data space).
+
+        Parameters
+        ----------
+        input_image : numpy.ndarray of float
+            Input array (image space).
+
+        Returns
+        -------
+        numpy.ndarray
+            Convolution result (direct operator).
+        """
+        return fft_conv(input_image, self.fft_kernel, self.data_size)
+
+    def apply_adjoint_operator(self, input_data):
+        r"""Implementation of the adjoint operator to update the input array
+        ``input_data`` (from data to image space).
+
+        Parameters
+        ----------
+        input_data : numpy.ndarray of float
+            Input array (data space).
+
+        Returns
+        -------
+        numpy.ndarray
+            Convolution result (adjoint operator).
+        """
+        return fft_conv(input_data, np.conj(self.fft_kernel), self.data_size)[
+            self.valid_coefficients
+        ]
+
+
+# ! keep kernel out of the object?
+class SyncConvModel(BaseModel):
+    r"""Synchronous distributed implementation of a (linear) convolution
+    model.
+
+    Attributes
+    ----------
+    ...
+    """
+
+    def __init__(
+        self,
+        image_size,
+        data_size,
+        kernel,
+        comm,
+        grid_size,
+        itemsize,
+        circular_boundaries,
+        direction=False,
+    ):
+        r"""Synchronous distributed implementation of a (linear) convolution
+        model.
+
+        Parameters
+        ----------
+        image_size : numpy.ndarray of int, of size ``d``
+            Full image size.
+        data_size : numpy.ndarray of int, of size ``d``
+            Full data size.
+        kernel : numpy.ndarray of float
+            Input convolution kernel.
+        comm : mpi4py.MPI.Comm
+            Underlying MPI communicator.
+        grid_size : list of int, of size ``d``
+            Number of workers along each of the ``d`` dimensions of the
+            communicator grid.
+        itemsize : numpy.dtype.itemsize
+            Size (in bytes) of the scalar type to be handled during the
+            communications.
+        circular_boundaries : bool
+            Indicates whether periodic boundary conditions need to be
+            considered for the communicator grid along each axis.
+        direction : bool, optional
+            Direction of the overlap between facets along all the axis (True
+            for backward overlap, False for forward overlap). By default False.
+
+        Raises
+        ------
+        ValueError
+            ``image_size`` and ``data_size`` must have the same number of
+            elements.
+        ValueError
+            ``kernel`` should have ``ndims = len(image_size)`` dimensions.
+        TypeError
+            Only real-valued kernel supported.
+        """
+        if not image_size.size == data_size.size:
+            raise ValueError(
+                "image_size and data_size must have the same number of elements"
+            )
+
+        super(SyncConvModel, self).__init__(image_size, data_size)
+        self.grid_size = np.array(grid_size, dtype="i")
+        self.comm = comm
+
+        # * Cartesian communicator and nd rank
+        self.cartcomm = self.comm.Create_cart(
+            dims=grid_size,
+            periods=self.ndims * [circular_boundaries],
+            reorder=False,
+        )
+        self.rank = comm.Get_rank()
+        # self.ranknd = np.unravel_index(self.rank, grid_size)
+        self.ranknd = np.array(self.cartcomm.Get_coords(self.rank), dtype="i")
+
+        # * useful dimensions
+        if not len(kernel.shape) == self.ndims:
+            raise ValueError("kernel should have ndims = len(image_size) dimensions")
+        if kernel.dtype.kind == "c":
+            raise TypeError("only real-valued kernel supported")
+        self.overlap_size = np.array(kernel.shape, dtype="i") - 1
+        tile_pixels = ucomm.local_split_range_nd(
+            self.grid_size, self.image_size, self.ranknd, backward=direction
+        )
+        self.tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+        (
+            self.local_data_size,
+            self.facet_size,
+            self.facet_size_adj,
+        ) = calculate_local_data_size(
+            self.tile_size,
+            self.ranknd,
+            self.overlap_size,
+            self.grid_size,
+            backward=direction,
+        )
+
+        # facet (convolution)
+        self.local_conv_size = self.facet_size + self.overlap_size
+        self.offset = self.facet_size - self.tile_size
+        self.offset_adj = self.facet_size_adj - self.tile_size
+
+        self.fft_kernel = np.fft.rfftn(kernel, self.local_conv_size)
+
+        # * useful slices
+        # extract valid coefficients after local convolutions
+        self.local_slice_valid_conv = ucomm.slice_valid_coefficients(
+            self.ranknd, self.grid_size, self.overlap_size
+        )
+        # retrieve image tile from fft-based convolution (adjoint conv. operator)
+        self.local_slice_conv = tuple(
+            [np.s_[: self.tile_size[d]] for d in range(self.ndims)]
+        )
+        # slice to set value of local convolution in the adjoint buffer
+        self.local_slice_conv_adj = ucomm.get_local_slice(
+            self.ranknd, self.grid_size, self.offset_adj, backward=not direction
+        )
+
+        # * implementation of the distributed direct operator
+        self.direct_communicator = comms.SyncCartesianCommunicator(
+            self.comm,
+            self.cartcomm,
+            grid_size,
+            itemsize,
+            self.facet_size,
+            self.overlap_size,
+            direction,
+        )
+
+        # * implementation of the distributed adjoint operator
+        self.adjoint_communicator = comms.SyncCartesianCommunicator(
+            self.comm,
+            self.cartcomm,
+            grid_size,
+            itemsize,
+            self.facet_size_adj,
+            self.overlap_size,
+            not direction,
+        )
+
+    # ? change interface to pass the output array, to be updated in place?
+    # (would need to do the same for the serial case)
+    def apply_direct_operator(self, input_image):
+        r"""Implementation of the direct operator to update the input array
+        ``input_image`` (from image to data space).
+
+        Parameters
+        ----------
+        input_image : numpy.ndarray of float
+            Input buffer array (image space), of size ``self.facet_size``.
+
+        Returns
+        -------
+        y : numpy.ndarray
+            Result of the direct operator using the information from the local
+            image facet.
+
+        Note
+        ----
+        The input buffer ``input_image`` is updated in-place.
+        """
+        self.direct_communicator.update_borders(input_image)
+        y = fft_conv(input_image, self.fft_kernel, self.local_conv_size)[
+            self.local_slice_valid_conv
+        ]
+        return y
+
+    # ? change interface to pass the output array, to be updated in place?
+    # (would need to do the same for the serial case)
+    def apply_adjoint_operator(self, input_data):
+        r"""Implementation of the adjoint operator to update the input array
+        ``input_data`` (from data to image space).
+
+        Parameters
+        ----------
+        input_data : numpy.ndarray of float
+            Input buffer array (data space), of size ``self.facet_size_adj``.
+
+        Returns
+        -------
+        x : numpy.ndarray
+            Result of the adjoint operator using the information from the local
+            data facet.
+
+        Note
+        ----
+        The input buffer ``input_data`` is updated in-place.
+        """
+        self.adjoint_communicator.update_borders(input_data)
+        x = fft_conv(input_data, np.conj(self.fft_kernel), self.local_conv_size)[
+            self.local_slice_conv
+        ]
+        return x
+
+
+# * Inpainting model object (serial and distributed)
+class SerialInpaintingModel(BaseModel):
+    r"""Model object implementing an inpainting operator in a serial algorithm.
+
+    Attributes
+    ----------
+    image_size : numpy.ndarray of int, of size ``d``
+        Full image size.
+    mask : numpy.ndarray
+        Input mask operator. The array should have ``d`` axis, such that
+        ``mask.shape[i] == image_size[i]`` for ``i in range(d)``.
+    mask_id : numpy.ndarray of int
+        Array of indices corresponding to the observed points (as given by
+        ``numpy.nonzero``).
+    """
+
+    def __init__(
+        self,
+        image_size,
+        mask,
+    ):
+        r"""SerialInpaintingModel constructor.
+
+        Parameters
+        ----------
+        image_size : numpy.ndarray of int, of size ``d``
+            Full image size.
+        mask : numpy.ndarray
+            Input mask operator for the current process. The array should have
+            ``d`` axis, such that ``mask.shape[i] == image_size[i]`` for
+            ``i in range(d)``.
+
+        Raises
+        ------
+        ValueError
+            ``mask`` and image should have the same size.
+        """
+        if not np.all(mask.shape == image_size):
+            raise ValueError("mask and image should have the same size")
+
+        # ! flattened data dimension after applying the inpainting (= cropping)
+        # ! operator
+        data_size = np.array([np.sum(mask)], dtype="i")
+        super(SerialInpaintingModel, self).__init__(image_size, data_size)
+
+        self.mask = mask
+        self.mask_id = np.nonzero(mask)
+
+    def apply_direct_operator(self, input_image):
+        r"""Implementation of the direct operator to update the input array
+        ``input_image`` (from image to data space, cropping).
+
+        Parameters
+        ----------
+        input_image : numpy.ndarray of float
+            Input array (image space).
+
+        Returns
+        -------
+        numpy.ndarray
+            Result of the inpaiting operator (direct operator).
+        """
+        # self.mask * input_image
+        return input_image[self.mask_id]
+
+    def apply_adjoint_operator(self, input_data):
+        r"""Implementation of the adjoint operator to update the input array
+        ``input_data`` (from data to image space, zero-padding).
+
+        Parameters
+        ----------
+        input_data : numpy.ndarray of float
+            Input array (data space).
+
+        Returns
+        -------
+        numpy.ndarray
+            Result of the inpaiting operator (inpainting operator is auto-adjoint).
+        """
+        # self.mask * input_data
+        output_image = np.zeros(self.image_size, dtype=input_data.dtype)
+        output_image[self.mask_id] = input_data
+        return output_image
+
+
+class SyncInpaintingModel(BaseModel):
+    r"""Synchronous distributed implementation of an inpainting operator. Once
+    distributed, MPI processes containing different image tiles never need to
+    communicate.
+
+    Attributes
+    ----------
+    image_size : numpy.ndarray of int, of size ``d``
+        Full image size.
+    mask : numpy.ndarray
+        Input chunk of a mask operator fir the current MPI process. The
+        overall array should have ``d`` axis.
+    comm : mpi4py.MPI.Comm
+        Underlying MPI communicator.
+    grid_size : list of int, of size ``d``
+        Number of workers along each of the ``d`` dimensions of the
+        communicator grid.
+    data_size : numpy.ndarray of int
+        Total number of observed data points (over all the MPI processes).
+    local_data_size : int
+        Number of observed data points defined from the local ``mask``.
+    mask_id : numpy.ndarray of int
+        Array of indices corresponding to the local observed points (as given
+        by ``numpy.nonzero``).
+    """
+
+    def __init__(
+        self,
+        image_size,
+        mask,
+        comm,
+        grid_size,
+    ):
+        r"""Synchronous distributed implementation of an inpainting operator.
+
+        Parameters
+        ----------
+        image_size : numpy.ndarray of int, of size ``d``
+            Full image size.
+        mask : numpy.ndarray
+            Input chunk of a mask operator fir the current MPI process. The
+            overall array should have ``d`` axis.
+        comm : mpi4py.MPI.Comm
+            Underlying MPI communicator.
+        grid_size : list of int, of size ``d``
+            Number of workers along each of the ``d`` dimensions of the
+            communicator grid.
+
+        Raises
+        ------
+        ValueError
+            local mask and image should have the same size.
+        """
+        # ! flattened data dimension after applying the inpainting (= cropping)
+        # ! operator
+        self.local_data_size = np.array(np.sum(mask), dtype="i")
+        data_size = np.empty(1, dtype="i")
+        comm.Reduce(
+            [self.local_data_size, MPI.INT],
+            [data_size, MPI.INT],
+            op=MPI.SUM,
+        )
+
+        super(SyncInpaintingModel, self).__init__(image_size, data_size)
+        self.grid_size = np.array(grid_size, dtype="i")
+        self.comm = comm
+
+        # * Cartesian communicator and nd rank
+        self.cartcomm = self.comm.Create_cart(
+            dims=grid_size,
+            periods=self.ndims * [False],
+            reorder=False,
+        )
+        self.rank = comm.Get_rank()
+        # self.ranknd = np.unravel_index(self.rank, grid_size)
+        self.ranknd = np.array(self.cartcomm.Get_coords(self.rank), dtype="i")
+
+        # * useful dimensions
+        tile_pixels = ucomm.local_split_range_nd(
+            self.grid_size, self.image_size, self.ranknd
+        )
+        self.tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+
+        if not np.all(mask.shape == self.tile_size):
+            raise ValueError("local mask and image tile should have the same size")
+
+        self.mask = mask
+        self.mask_id = np.nonzero(mask)
+
+    # ? change interface to pass the output array, to be updated in place?
+    # ? add a new function for in-place update?
+    def apply_direct_operator(self, input_image):
+        r"""Implementation of the direct operator to update the input array
+        ``input_image`` (from image to data space).
+
+        Parameters
+        ----------
+        input_image : numpy.ndarray of float
+            Input buffer array (image space), of size ``self.facet_size``.
+
+        Returns
+        -------
+        y : numpy.ndarray
+            Result of the direct operator using the information from the local
+            image facet.
+        """
+        # y = self.mask * input_image
+        y = input_image[self.mask_id]
+        return y
+
+    # ? change interface to pass the output array, to be updated in place?
+    # ? add a new function for in-place update?
+    def apply_adjoint_operator(self, input_data):
+        r"""Implementation of the adjoint operator to update the input array
+        ``input_data`` (from data to image space).
+
+        Parameters
+        ----------
+        input_data : numpy.ndarray of float
+            Input buffer array (data space), of size ``self.data_size``.
+
+        Returns
+        -------
+        x : numpy.ndarray
+            Result of the adjoint operator using the information from the local
+            data facet.
+        """
+        # x = self.mask * input_data
+        x = np.zeros(self.tile_size, dtype=input_data.dtype)
+        x[self.mask_id] = input_data
+        return x
diff --git a/src/aaxda/models/padding.py b/src/aaxda/models/padding.py
new file mode 100755
index 0000000000000000000000000000000000000000..1fd684d83c848222a78c15c21ed07e5f3dc8ebf4
--- /dev/null
+++ b/src/aaxda/models/padding.py
@@ -0,0 +1,437 @@
+""" Set of helper functions to pad or crop a numpy array to a predefined size.
+"""
+import numpy as np
+
+
+def crop_array(y, output_size, padmode="after", center_convention=False):
+    r"""Crop an array to a specified size.
+
+    Crop the input array ``y`` to the desired size ``output_size``. Removes values
+    on one end or on both sides of each dimension.
+
+    Parameters
+    ----------
+    y : numpy.ndarray
+        Input array.
+    output_size : tuple[int]
+        Size of the output array.
+    padmode : str, optional
+        Add zeros around or after the content of the array along each
+        dimension. Defaults to "after". By default "after".
+    center_convention : bool, optional
+        Convention adopted for the center of the array (only for around). By
+        default False (following the same convention as the `np.fft.fftshift`
+        function).
+
+    Returns
+    -------
+    numpy.ndarray
+        Cropped array.
+
+    Raises
+    ------
+    ValueError
+        Ensures ``y.shape`` and ``output_size`` have the same length.
+    ValueError
+        Size after cropping should be smaller than the size of the input array.
+
+    Example
+    -------
+    >>> array_shape = y.shape
+    >>> output_size = [n//2 for n in array_shape]
+    >>> x = crop_array(y, output_size)
+    """
+    array_shape = y.shape
+
+    if not len(y.shape) == len(output_size):
+        raise ValueError("`x.shape` and `array_shape` must have the same length")
+
+    if any(np.array(array_shape) < np.array(output_size)):
+        raise ValueError(
+            "All the elements in `array_shape` should be greater \
+            than `x.shape`."
+        )
+
+    crop_size = [array_shape[n] - output_size[n] for n in range(len(output_size))]
+    if padmode == "after":  # add trailing zeros
+        start_crop = [0 for n in range(len(output_size))]
+        stop_crop = crop_size
+    else:  # pad around
+        if (
+            center_convention
+        ):  # center in index "np.floor((output_size+1)/2)-1" (imfilter convention)
+            start_crop = [
+                int(np.floor(crop_size[n] / 2)) for n in range(len(output_size))
+            ]
+            stop_crop = [
+                int(np.ceil(crop_size[n] / 2)) for n in range(len(output_size))
+            ]
+        else:  # center in index "np.floor(output_size/2)" (np.fftshift convention)
+            start_crop = [
+                int(np.ceil(crop_size[n] / 2)) for n in range(len(output_size))
+            ]
+            stop_crop = [
+                int(np.floor(crop_size[n] / 2)) for n in range(len(output_size))
+            ]
+
+    return y[
+        tuple([np.s_[start_crop[n] : -stop_crop[n]] for n in range(len(output_size))])
+    ]
+
+
+def crop_array_nd(x, lsize, rsize):
+    r"""Cropping an array.
+
+    Crop the input array ``x`` using from the left (resp. right) using the
+    entries in ``lsize`` (resp. ``rsize``).
+
+    Parameters
+    ----------
+    x : numpy.ndarray
+        Input array.
+    lsize : tuple[int or None]
+        Number of elements added to the left along each dimension. If no
+        element added on a dimension, the corresponding value should be
+        ``None``.
+    rsize : tuple[int or None]
+        Opposite of the number of elements added to the right along each
+        dimension. If no element added on a dimension, the corresponding value
+        should be ``None``.
+
+    Returns
+    -------
+    y : numpy.ndarray
+        Cropped array.
+
+    Raises
+    ------
+    ValueError
+        Ensures ``x.shape``, ``lsize`` and ``rsize`` contain have the same length.
+
+    Example
+    -------
+    >>> lsize = [n for n in len(x.shape)]
+    >>> rsize = [-n for n in len(x.shape)]
+    >>> y = crop_array(x, lsize, rsize)
+    """
+    if not (len(lsize) == len(x.shape) and len(rsize) == len(x.shape)):
+        raise ValueError("`x.shape`, `lsize` and `rsize` must have the same length.")
+
+    return x[tuple([np.s_[lsize[n] : rsize[n]] for n in range(len(x.shape))])]
+
+
+def pad_array(
+    x, output_size, padmode="after", center_convention=False, mode="constant"
+):
+    r"""Zero-padding an array.
+
+    Zero-pad the input array ``x`` to the desired size ``output_size``. Inserts
+    zeros before or after the content of the array along each dimension.
+
+    Parameters
+    ----------
+    x : numpy.ndarray
+        Input array.
+    output_size : tuple[int]
+        Size of array after padding
+    padmode : str, optional
+        Padding mode, around or after the content of the array along each
+        dimension. By default "after".
+    center_convention : bool, optional
+        Convention adopted for the center of the array (only for the `around`
+        padding mode). Defaults to False, following the same convention as the
+        `np.fft.fftshift` function.
+    mode : str, optional
+        Type of padding ("constant", "symmetric", ...). Same options as the
+        `numpy.pad` function. By default "constant" (zero-padding).
+
+    Returns
+    -------
+    y : numpy.ndarray
+        Padded array.
+
+    Raises
+    ------
+    ValueError
+        Ensures ``x.shape`` and ``output_size`` have the same number of elements.
+    ValueError
+        Size after padding should be larger than the size of the input array.
+
+    Example
+    -------
+    >>> N = x.shape
+    >>> output_size = [2*n for n in N]
+    >>> y = pad_array(x, output_size)
+
+    Note
+    ----
+    See ``numpy.padding_func`` to pad a pre-allocated array in-place along a
+    specified dimension.
+    """
+    array_shape = x.shape
+
+    if not len(output_size) == len(x.shape):
+        raise ValueError(
+            "`x.shape` and `output_size` must have the same \
+            length."
+        )
+
+    if any(np.array(output_size) < np.array(array_shape)):
+        raise ValueError(
+            "All the elements in `output_size` should be greater \
+            than `x.shape`."
+        )
+
+    padding = [output_size[n] - array_shape[n] for n in range(len(array_shape))]
+
+    if padmode == "after":  # add trailing zeros
+        shift = [[0, padding[n]] for n in range(len(padding))]
+    else:  # pad around
+        if (
+            center_convention
+        ):  # center in index "np.floor((array_shape+1)/2)-1" (imfilter convention)
+            shift = [
+                [int(np.floor(padding[n] / 2)), int(np.ceil(padding[n] / 2))]
+                for n in range(len(array_shape))
+            ]
+        else:  # center in index "np.floor(array_shape/2)" (fftshift convention)
+            shift = [
+                [int(np.ceil(padding[n] / 2)), int(np.floor(padding[n] / 2))]
+                for n in range(len(array_shape))
+            ]
+
+    y = np.pad(x, shift, mode=mode)
+
+    return y
+
+
+def pad_array_nd(x, lsize, rsize, mode="constant"):
+    r"""Padding an array using a specific boundary condition.
+
+    Pad the input array ``x`` following the boundary condition ``mode``,
+    pre-inserting a number of elements based on the entries of ``lsize``, and
+    trailing elements based on the entries in ``rsize`` along each dimension.
+
+    Parameters
+    ----------
+    x : numpy.ndarray
+        Input array.
+    lsize : tuple[int]
+        Number of elements to be added to the left.
+    rsize : tuple[int]
+        Number of elements to be added to the right.
+    mode : str, optional
+        Type of padding ("constant", "symmetric", ...). Same options as the
+        `numpy.pad` function. By default "constant" (zero-padding).
+
+    Returns
+    -------
+    y : numpy.ndarray
+        Padded array.
+
+    Raises
+    ------
+    ValueError
+        Ensures ``x.shape``, ``lsize`` and ``rsize`` contain have the same length.
+
+    Example
+    -------
+    >>> array_shape = x.shape
+    >>> lsize = [n for n in array_shape]
+    >>> rsize = [n for n in array_shape]
+    >>> y = pad_array(x, lsize, rsize)
+    """
+    array_shape = x.shape
+
+    if not (len(lsize) == len(array_shape) and len(rsize) == len(array_shape)):
+        raise ValueError("`x.shape`, `lsize` and `rsize` must have the same length.")
+
+    shift = [[int(lsize[n]), int(rsize[n])] for n in range(len(array_shape))]
+    y = np.pad(x, shift, mode=mode)
+
+    return y
+
+
+def adjoint_padding(y, lsize, rsize, mode="constant"):
+    """Adjoint of the padding operator corresponding to a given extension mode.
+
+    Adjoint of a boundary extension operator, following the boundary condition
+    ``mode`` according to which ``lsize`` have been pre-instered, and ``rsize``
+    appended along each dimension of the array.
+
+    Parameters
+    ----------
+    y : numpy.ndarray
+        Input array.
+    lsize : tuple[int or None]
+        Number of elements added to the left along each dimension. If no
+        element added on a dimension, the corresponding value should be
+        ``None``.
+    rsize : tuple[int or None]
+        Opposite of the number of elements added to the right along each
+        dimension. If no element added on a dimension, the corresponding value
+        should be ``None``.
+    mode : str, optional
+        Type of padding ("constant", "symmetric", ...). Limited to the
+        "constant" (zero-padding), "symmetric" (hal-point symmetric) and "wrap"
+        (circular) extension modes of the `numpy.pad` function. By default
+        "constant".
+
+    Returns
+    -------
+    x : numpy.ndarray
+        Adjoint of the padding operator evaluated in ``y``.
+
+    Raises
+    ------
+    ValueError
+        Ensures ``x.shape``, ``lsize`` and ``rsize`` contain have the same length.
+    ValueError
+        Unknown extension ``mode``.
+    """
+    array_shape = y.shape
+    ndims = len(array_shape)
+
+    if not (len(lsize) == ndims and len(rsize) == ndims):
+        raise ValueError("`x.shape`, `lsize` and `rsize` must have the same length.")
+
+    if mode == "constant":
+        x = crop_array_nd(y, lsize, rsize)
+
+    elif mode == "symmetric":
+        sel_core = tuple([np.s_[lsize[d] : rsize[d]] for d in range(ndims)])
+        x_ = np.copy(y)
+
+        # fold extension successively along each dimension (flip + sum)
+        for d in range(ndims):
+
+            lselx = tuple(
+                d * [np.s_[:]]
+                + [np.s_[lsize[d] : 2 * lsize[d]]]
+                + np.max((ndims - d - 1), 0) * [np.s_[:]]
+            )
+
+            rselx = tuple(
+                d * [np.s_[:]]
+                + [np.s_[2 * rsize[d] : rsize[d]]]
+                + np.max((ndims - d - 1), 0) * [np.s_[:]]
+            )
+
+            lsely = tuple(
+                d * [np.s_[:]]
+                + [np.s_[: lsize[d]]]
+                + np.max((ndims - d - 1), 0) * [np.s_[:]]
+            )
+
+            rsely = tuple(
+                d * [np.s_[:]]
+                + [np.s_[rsize[d] :]]
+                + np.max((ndims - d - 1), 0) * [np.s_[:]]
+            )
+
+            # left-hand side extension folded on first entries
+            x_[lselx] += np.flip(x_[lsely], axis=d)
+
+            # right-hand side extension aliased on first entries
+            x_[rselx] += np.flip(x_[rsely], axis=d)
+        x = x_[sel_core]
+
+    elif mode == "wrap":
+        sel_core = tuple([np.s_[lsize[d] : rsize[d]] for d in range(ndims)])
+        x_ = np.copy(y)
+
+        # fold extension successively along each dimension (sum first elements
+        # with last ones)
+        for d in range(ndims):
+            if rsize[d] is not None:
+                ext_size = lsize[d] - rsize[d]
+            else:
+                ext_size = lsize[d]
+
+            lselx = tuple(
+                d * [np.s_[:]]
+                + [np.s_[lsize[d] : ext_size]]
+                + np.max((ndims - d - 1), 0) * [np.s_[:]]
+            )
+
+            rselx = tuple(
+                d * [np.s_[:]]
+                + [np.s_[-ext_size : rsize[d]]]
+                + np.max((ndims - d - 1), 0) * [np.s_[:]]
+            )
+
+            lsely = tuple(
+                d * [np.s_[:]]
+                + [np.s_[: lsize[d]]]
+                + np.max((ndims - d - 1), 0) * [np.s_[:]]
+            )
+
+            rsely = tuple(
+                d * [np.s_[:]]
+                + [np.s_[rsize[d] :]]
+                + np.max((ndims - d - 1), 0) * [np.s_[:]]
+            )
+
+            # right-hand side extension aliased on first entries
+            x_[lselx] += x_[rsely]
+
+            # left-hand side extension aliased on first entries
+            x_[rselx] += x_[lsely]
+        x = x_[sel_core]
+    else:
+        raise ValueError("Unknown extension `mode`: {}".format(mode))
+
+    return x
+
+
+if __name__ == "__main__":
+    import matplotlib.pyplot as plt
+    from imageio import imread
+
+    # Generate 2D Gaussian convolution kernel
+    vr = 1
+    M = 7
+    # if np.mod(M, 2) > 0:  # M odd
+    #     n = np.arange(-(M - 1) // 2, (M - 1) // 2 + 1)
+    # else:
+    #     n = np.arange(-M // 2, M // 2)
+    # h = np.exp(-(n ** 2 + n[:, np.newaxis] ** 2) / (2 * vr))
+
+    # plt.imshow(h, cmap=plt.cm.gray)
+    # plt.show()
+
+    # x = imread("img/cameraman.png")
+    # N = x.shape
+    # M = h.shape
+
+    # # version 1: circular convolution
+    # # circular convolution: pad around and fftshift kernel for nicer results
+    # input_size = N
+    # hpad = pad_array(h, input_size, padmode="after")  # around
+
+    # plt.imshow(hpad, cmap=plt.cm.gray)
+    # plt.show()
+
+    # testing symmetric padding (with adjoint) (constant is fine)
+    rng = np.random.default_rng(1234)
+    x = rng.standard_normal(size=(M,))
+    y = rng.standard_normal(size=(M + 4,))
+
+    Hx = pad_array(
+        x, [M + 4], padmode="around", mode="symmetric"
+    )  # around, 2 on both sides
+    Hadj_y = adjoint_padding(y, [2], [-2], mode="symmetric")
+
+    hp1 = np.sum(Hx * y)
+    hp2 = np.sum(x * Hadj_y)
+
+    print("Correct adjoint operator? {}".format(np.isclose(hp1, hp2)))
+
+    # 2D: ok
+    # hpad = pad_array(
+    #     h, [M[0] + 3, M[1] + 1], padmode="around", mode="symmetric"
+    # )  # around
+    # plt.imshow(hpad, cmap=plt.cm.gray)
+    # plt.show()
+
+    pass
diff --git a/src/aaxda/models/prox.py b/src/aaxda/models/prox.py
new file mode 100755
index 0000000000000000000000000000000000000000..6fdf47d0bd882a073673e415f921d7424b242af8
--- /dev/null
+++ b/src/aaxda/models/prox.py
@@ -0,0 +1,471 @@
+"""Math library: implementation of a few selected functions and their proximal
+operator."""
+import copy as cp
+
+import numpy as np
+from numba import float64, jit, vectorize
+
+from aaxda.models.tv import gradient_2d, gradient_2d_adjoint, tv
+
+
+@jit(nopython=True, cache=True)
+def kullback_leibler(x, y):
+    r"""Evaluate the Kullback-Leibler (KL) divergence.
+
+    Compute :math:`d_{\text{KL}}(y \mathrel{\Vert} x)`, the Kullback-Leibler
+    (KL) divergence between the arrays ``y`` and ``x``.
+
+    Parameters
+    ----------
+    x : numpy.ndarray
+        Input array.
+    y : numpy.ndarray
+        Input array (first term in the KL divergence).
+
+    Returns
+    -------
+    float
+        Value of the KL divergence :math:`d_{\text{KL}}(y \mathrel{\Vert} x)`.
+
+    Example
+    -------
+    >>> y = np.full((2, 2), 8)
+    >>> x = np.full((2, 2), 5)
+    >>> kl_yx = kullback_leibler(x, y)
+
+    Note
+    ----
+    - By convention :cite:p:`Figueiredo2010`, :math:`0 \log(0) = 0`.
+    - An assertion should be added to check ``x`` and ``y`` have the same size.
+    """
+    # ! issue here: need all entries > 0 to avoid Nan values
+    return np.sum(
+        x
+        - y
+        * (
+            1
+            - np.log(
+                np.maximum(y, np.finfo(y.dtype).eps)
+                / np.maximum(x, np.finfo(x.dtype).eps)
+            )
+        )
+    )  # add auxiliary constants to ensure KL is >= 0
+    # return np.sum(x - y * np.log(np.maximum(x, np.finfo(x.dtype).eps)))
+    # if np.any(x <= 0):
+    #     kl_yx = np.inf
+    # else:
+    # mask = y > 0
+    # kl_yx = np.sum(x) - np.sum(y[mask] * np.log(x[mask]))
+    # return kl_yx
+
+
+@jit(nopython=True, cache=True)
+def prox_kullback_leibler(x, y, lam=1.0):
+    r"""Proximal operator of the Kullback-Leibler divergence.
+
+    Evaluate the proximal operator of the Kulllback-Leibler divergence
+    :math:`d_{\text{KL}} (y \mathrel{\Vert} \cdot)` in :math:`x`, i.e.
+    :math:`\text{prox}_{\lambda d_{\text{KL}} (y \mathrel{\Vert} \cdot)} (x)`,
+    with :math:`\lambda > 0`.
+
+    Parameters
+    ----------
+    x : numpy.ndarray
+        Input array.
+    y : numpy.ndarray
+        Input array (first term in the KL divergence).
+    lam : float, optional
+        Multiplicative constant, by default 1.
+
+    Returns
+    -------
+    numpy.ndarray
+        Evaluation of the proximal operator
+        :math:`\text{prox}_{\lambda d_{\text{KL}} (y \mathrel{\Vert} \cdot)} (x)`.
+
+    Raises
+    ------
+    ValueError
+        Checks whether :math:`\lambda > 0`.
+
+    Example
+    -------
+    >>> y = np.full((2, 2), 8)
+    >>> x = np.full((2, 2), 5)
+    >>> z = prox_kullback_leibler(x, y, lam=1)
+    """
+    if lam <= 0:
+        raise ValueError("`lam` should be positive.")
+    x1 = x - lam
+
+    return (x1 + np.sqrt(x1**2 + 4 * lam * y)) / 2
+
+
+@vectorize([float64(float64, float64)], nopython=True, cache=True)
+def hard_thresholding(x, thres):  # pragma: no cover
+    """Hard-thresholding operator.
+
+    Apply a hard-thresholding operator to the input value ``x``, with
+    threshold value ``thres``.
+
+    Parameters
+    ----------
+    x : float64
+        Input scalar.
+    thres : float64
+        Threshold value.
+
+    Returns
+    -------
+    float64
+        Hard-thresholded input.
+
+    Example
+    -------
+    >>> x = -1.
+    >>> hard_thresholding(x, 0.5)
+    """
+    if x >= thres:
+        return x
+    else:
+        return 0.0
+
+
+@jit(nopython=True, cache=True)
+def prox_nonegativity(x):
+    r"""Projection onto the nonnegative orthant.
+
+    Evaluate the proximal operator of the indicator function
+    :math:`\iota_{ \cdot \geq 0}` on the array ``x``.
+
+    Parameters
+    ----------
+    x : numpy.ndarray
+        Input array.
+
+    Note
+    ----
+    The input array ``x`` is modified in-place.
+
+    Example
+    -------
+    >>> x = np.full((2, 2), -1)
+    >>> prox_nonegativity(x)
+    """
+    for i in range(len(x)):
+        x[i] = hard_thresholding(x[i], 0.0)
+
+
+@jit(nopython=True, cache=True)
+def l21_norm(x, axis=0):
+    r"""Compute the :math:`\ell_{2,1}` norm of an array.
+
+    Compute the :math:`\ell_{2,1}` norm of the input array ``x``, where the
+    underlying :math:`\ell_2` norm acts along the specified ``axis``.
+
+    Parameters
+    ----------
+    x : numpy.ndarray
+        Input array.
+    axis : int, optional
+        Axis along which the :math:`\ell_2` norm is taken. By default 0.
+
+    Returns
+    -------
+    float
+        :math:`\ell_{2,1}` norm of ``x``.
+
+    Example
+    -------
+    >>> rng = np.random.default_rng()
+    >>> x = rng.standard_normal((2, 2))
+    >>> l21_x = l21_norm(x, axis=0)
+    """
+    return np.sum(np.sqrt(np.sum(x**2, axis=axis)))
+
+
+@jit(nopython=True, cache=True)
+def prox_l21norm_conj(x, lam=1.0, axis=0):
+    return x / np.maximum(np.sqrt(np.sum(x**2, axis=axis)) / lam, 1.0)
+
+
+# ! to be debugged: jit creates segfault when using a large number of MPI cores
+# @jit(nopython=True, cache=True)
+def prox_l21norm(x, lam=1.0, axis=0):
+    r"""Proximal operator of :math:`\ell_{2,1}` norm.
+
+    Evaluate the proximal operator of the :math:`\ell_{2, 1}` norm in `x`, i.e.
+    :math:`\text{prox}_{\lambda \mathrel{\Vert} \cdot \Vert_{2,1}} (\mathbf{x})`, with
+    :math:`\lambda > 0`.
+
+    Parameters
+    ----------
+    x : numpy.ndarray
+        Input array.
+    lam : float, optional
+        Multiplicative constant, by default 1.
+    axis : int, optional
+        Axis along which the :math:`\ell_2` norm is taken, by default 0.
+
+    Returns
+    -------
+    numpy.ndarray
+        Evaluation of the proximal operator :math:`\text{prox}_{\lambda \Vert
+        \cdot \Vert_{2,1}}(\mathbf{x})`.
+
+    Raises
+    ------
+    ValueError
+        Checks whether :math:`\lambda > 0`.
+
+    Example
+    -------
+    >>> rng = np.random.default_rng()
+    >>> x = rng.standard_normal((2, 2))
+    >>> y = prox_l21norm(x, lam=1., axis=0)
+    """
+    if lam <= 0:
+        raise ValueError("`lam` should be positive.")
+
+    return x * (1 - 1 / np.maximum(np.sqrt(np.sum(x**2, axis=axis)) / lam, 1.0))
+
+
+def prox_tv_primal_dual(
+    y, tau, lam=1.0, tol=1e-5, max_iter=1e6, verbose=False, rho=1.99
+):  # pragma: no cover
+    r"""Proximal operator of the discrete TV.
+
+    Compute the proximal operator of :math:`\lambda \text{TV}(\mathbf{x})`
+    (:math:`\lambda > 0`) with the Condat-Vu primal-dual algorithm
+    :cite:p:`Condat2013,Vu2013`.
+
+    Parameters
+    ----------
+    y : numpy.ndarray
+        Input array.
+    tau : float
+        Proximal parameter :math:`\tau > 0`. Influences the convergence speed.
+    lam : float, optional
+        TV regularization parameter :math:`\lambda > 0`, by default 1.
+    tol : float, optional
+        Convergence tolerance (relative variation of the objective function),
+        by default 1e-5.
+    max_iter : int, optional
+        Maximum number of iteration, by default 1e6.
+    verbose : bool, optional
+        Display convergence monitoring, by default False.
+    rho : float, optional
+        Relaxation parameter, in :math:`[1, 2[`. By default 1.99.
+
+    Returns
+    -------
+    x : numpy.ndarray
+        Estimated proximal operator.
+    crit : numpy.ndarray (vector)
+        Evolution of the objective function along the iterations.
+    u : numpy.ndarray, of shape ``(2, *x.shape)``
+        Dual variable at convergence.
+
+    Note
+    ----
+    This function corresponds to the original MATLAB implementation associated
+    with :cite:p:`Condat2014spl`, available from the
+    `author's webpage <https://lcondat.github.io/software.html>`__.
+    """
+    sigma = 1 / tau / 8  # proximal parameter
+
+    # prox_tau_f = lambda z: (z + tau * y) / (1 + tau)
+    # prox_sigma_g_conj = lambda u: u / np.maximum(
+    #     np.sqrt(np.sum(u ** 2, axis=0)) / lam, 1
+    # )
+    # prox_sigma_g_conj = lambda u: u - prox_l21norm(u, lam=lam, axis=0)
+
+    # initialize primal and dual variable
+    x2 = cp.deepcopy(y)
+    # u2 = prox_sigma_g_conj(gradient_2d(x2))
+    u2 = prox_l21norm_conj(gradient_2d(x2), lam=lam, axis=0)
+
+    # auxiliary variables and convergence monitoring
+    cy = np.sum(y**2) / 2
+    primalcostlowerbound = 0
+    stopping_crit = tol + 1
+    crit = np.zeros(max_iter)
+    count = 0
+
+    while stopping_crit > tol and count < max_iter:
+
+        # x = prox_tau_f(x2 - tau * gradient_2d_adjoint(u2))
+        # u = prox_sigma_g_conj(u2 + sigma * gradient_2d(2 * x - x2))
+        x = x2 - tau * gradient_2d_adjoint(u2)
+        x += tau * y
+        x /= 1 + tau
+        u = prox_l21norm_conj(u2 + sigma * gradient_2d(2 * x - x2), lam=lam, axis=0)
+        x2 += rho * (x - x2)
+        u2 += rho * (u - u2)
+
+        # criterion / objective function
+        crit[count] = np.sum((x - y) ** 2) / 2 + lam * tv(x)
+        if count > 0:
+            stopping_crit = np.abs(crit[count] - crit[count - 1]) / np.abs(
+                crit[count - 1]
+            )
+
+        if verbose and (np.mod(count, 25) == 0):
+            dualcost = cy - np.sum((y - gradient_2d_adjoint(u)) ** 2) / 2
+            # best value of dualcost computed so far:
+            primalcostlowerbound = np.maximum(primalcostlowerbound, dualcost)
+            # The gap between primalcost and primalcostlowerbound is even
+            # better than between primalcost and dualcost to monitor
+            # convergence.
+            print(
+                r"No iter: {:d}, primal-cost: {:.3e}, primal-bound: {:.3e}, \
+                    gap: {:.3e}".format(
+                    count,
+                    crit[count],
+                    primalcostlowerbound,
+                    crit[count] - primalcostlowerbound,
+                )
+            )
+        count += 1
+
+    return x, crit, u
+
+
+def prox_tv_chambolle(
+    y, tau=0.249, lam=1.0, tol=1e-3, max_iter=10, verbose=False
+):  # pragma: no cover:
+    r"""Proximal  point operator for the TV regularizer.
+
+    Uses Chambolle's projection algorithm described in :cite:p:`Chambolle2004`.
+
+    Parameters
+    ----------
+    y : numpy.ndarray
+        Input array.
+    tau : float, optional
+        Algorithm parameter, by default 0.249.
+    lam : float, optional
+        Regularization parameter, by default 1.0.
+    tol : float, optional
+        Tolerance for the stopping criterion, by default 1e-3.
+    max_iter : int, optional
+        Maximum number of iterations, by default 10.
+    verbose : bool, optional
+        Display convergence monitoring, by default False.
+
+    Returns
+    -------
+    x : numpy.ndarray
+        Proximal operator of the TV.
+    p : numpy.ndarray of size (2, *y.shape)
+        Output dual variable.
+
+    Note
+    ----
+    Adapted from original Matlab code provided by Jose Bioucas-Dias, June 2009,
+    (email: bioucas@lx.it.pt)
+    """
+    # auxiliary variable and convergence monitoring
+    err = tol + 1
+    count = 0
+    p = np.zeros((2, *y.shape))
+
+    while err > tol and count < max_iter:
+
+        # * compute divergence (div = - grad_adjoint)
+        div_p = -gradient_2d_adjoint(p)
+        u = div_p - y / lam
+        # * compute gradient
+        up = gradient_2d(u)
+
+        tmp = np.sqrt(np.sum(up**2, axis=0))
+        err = np.sqrt(np.sum((-up + tmp * p) ** 2))
+        # p = (p + tau * up) / (1 + tau * tmp)
+        p += tau * up
+        p /= 1 + tau * tmp
+
+        if verbose and (np.mod(count, 25) == 0):
+            print(
+                r"No iter: {:d}, err TV: {:.3e}".format(
+                    count,
+                    err,
+                )
+            )
+        count += 1
+
+    x = y + lam * gradient_2d_adjoint(p)
+
+    return x, p
+
+
+if __name__ == "__main__":
+    import matplotlib.image as mpimg
+    import matplotlib.pyplot as plt
+    from numpy.random import PCG64, Generator
+    from PIL import Image
+
+    niter = 400
+    lam = 0.1
+    tau = 0.01
+    tol = 1e-8
+
+    # ! /!\ mpimg.imread normalizes the max of the image to 1!
+    # x0 = mpimg.imread('img/parrotgray.png').astype(float)
+
+    img = Image.open("img/parrotgray.png", "r")
+    x0 = np.asarray(img)
+    x0 = x0 / np.max(x0)
+
+    # x = np.full((2, 2), -1)
+    # prox_nonegativity(x)
+    # z1 = prox_l21norm(x0, lam=1, axis=0)
+    # z2 = prox_kullback_leibler(x0, x0, lam=1)
+    # z3 = l21_norm(x0)
+
+    plt.figure()
+    plt.imshow(x0, interpolation="None", cmap=plt.cm.gray)
+    plt.title("Ground truth")
+    plt.colorbar()
+    plt.axis("off")
+    plt.show()
+
+    rng = Generator(PCG64(0))
+    # isnr = 30
+    # sig2 = np.sum(x0 ** 2) * 10 ** (-isnr/10) / x0.size
+    # y = x0 + np.sqrt(sig2) * rng.standard_normal(size=x0.shape)
+    y = x0 + 0.1 * rng.standard_normal(size=x0.shape)
+
+    plt.figure()
+    plt.imshow(y, interpolation="None", cmap=plt.cm.gray)
+    plt.title("Noisy image")
+    plt.colorbar()
+    plt.axis("off")
+    plt.show()
+
+    x, crit, u = prox_tv_primal_dual(y, tau, lam, tol, niter, verbose=True, rho=1.99)
+
+    plt.figure()
+    plt.plot(crit)
+    plt.xlabel("Objective function")
+    plt.ylabel("Iteration number")
+    plt.show()
+
+    plt.figure()
+    plt.imshow(x, interpolation="None", cmap=plt.cm.gray)
+    plt.colorbar()
+    plt.axis("off")
+    plt.title("Reconstructed image (Condat2013)")
+    plt.show()
+
+    x2, u2 = prox_tv_chambolle(
+        y, tau=0.249, lam=lam, tol=tol, max_iter=niter, verbose=True
+    )
+
+    plt.figure()
+    plt.imshow(x2, interpolation="None", cmap=plt.cm.gray)
+    plt.colorbar()
+    plt.axis("off")
+    plt.title("Reconstructed image (Chambolle2004)")
+    plt.show()
+
+    pass
diff --git a/src/aaxda/models/tv.py b/src/aaxda/models/tv.py
new file mode 100755
index 0000000000000000000000000000000000000000..a57b1ef36b87af50d152cf41f8ec5b5bf5fd446b
--- /dev/null
+++ b/src/aaxda/models/tv.py
@@ -0,0 +1,272 @@
+""" Implementation of the discrete istropic total variation and a smoothed
+variant, without numba jit support. Includes a variant supporting tensors.
+"""
+from collections import deque
+
+import numpy as np
+
+
+def gradient_2d(x):
+    r"""Compute 2d discrete gradient.
+
+    Compute the 2d discrete gradient of a 2d input array :math:`\mathbf{x}`,
+    **i.e.**, by computing horizontal and vertical differences:
+
+    .. math::
+       \nabla(\mathbf{x}) = (\nabla_v\mathbf{x}, \mathbf{x}\nabla_h).
+
+    Parameters
+    ----------
+    x : numpy.ndarray
+        Input 2d array :math:`\mathbf{x}`.
+
+    Returns
+    -------
+    numpy.ndarray, of shape ``(2, *x.shape)``
+        Vertical and horizontal differences, concatenated along the axis 0.
+    """
+    assert len(x.shape) == 2, "gradient_2d: Invalid input, expected a 2d numpy array"
+    # horizontal differences
+    uh = np.c_[np.diff(x, axis=1), np.zeros([x.shape[0], 1])]
+    # vertical differences
+    uv = np.r_[np.diff(x, axis=0), np.zeros([1, x.shape[1]])]
+    # ! concatenate along the 1st dimension (slowest access)
+    return np.r_["0,2", uv[np.newaxis, ...], uh[np.newaxis, ...]]
+
+
+def gradient_2d_adjoint(y):
+    r"""Adjoint of the 2d discrete gradient operator.
+
+    Compute the adjoint of the 2d discrete gradient of a 2d input array
+    :math:`\mathbf{x}`,
+
+    .. math::
+       \nabla^*(\mathbf{y}) = - \text{div} (\mathbf{y})
+       = \nabla_v^*\mathbf{y}_v + \mathbf{y}_h\nabla_h^*.
+
+    Parameters
+    ----------
+    y : numpy.ndarray, 3d
+        Input array.
+
+    Returns
+    -------
+    numpy.ndarray, of shape ``(y.shape[1], y.shape[2])``
+        Adjoint of the 2d gradient operator, evaluated in :math:`\mathbf{y}`.
+    """
+    # v: horizontal, vertical
+    # np.r_['0,2',-y[0,:,0],-np.diff(y[:-1,:,0],1,0),y[-2,:,0]] + \
+    # np.c_['1,2',-y[:,0,1],-np.diff(y[:,:-1,1],1,1),y[:,-2,1]]
+    return (
+        np.r_["0,2", -y[0, 0, :], -np.diff(y[0, :-1, :], 1, 0), y[0, -2, :]]
+        + np.c_["1,2", -y[1, :, 0], -np.diff(y[1, :, :-1], 1, 1), y[1, :, -2]]
+    )
+
+
+def tv(x):
+    r"""Discrete anisotropic total variation (TV).
+
+    Compute the discrete anisotropic total variation of a 2d array
+
+    .. math::
+       \text{TV}(\mathbf{x}) = \Vert \nabla (\mathbf{x}) \Vert_{2, 1},
+
+    where :math:`\nabla` is the 2d discrete gradient operator.
+
+    Parameters
+    ----------
+    x : numpy.ndarray, 2d
+        Input array.
+
+    Returns
+    -------
+    float
+        total variation evaluated in ``x``.
+    """
+    u = gradient_2d(x)
+    return np.sum(np.sqrt(np.sum(np.abs(u) ** 2, axis=0)))
+
+
+# TODO: see of the n-D version of the TV can be possibly simplified (i.e., to
+# TODO: enable jit support)
+def gradient_nd(x):
+    r"""Nd discrete gradient operator.
+
+    Compute the discrete gradient of an input tensor :math:`\mathbf{x}`,
+    **i.e.**, by computing differences along each dimension of the tensor.
+
+    Parameters
+    ----------
+    x : numpy.ndarray
+        Input tensor.
+
+    Returns
+    -------
+    tuple[numpy.ndarray]
+        Nd discrete gradient :math:`\nabla \mathbf{x}`.
+
+    Note
+    ----
+    This function is likely to be slow.
+    """
+    s = x.shape
+    sz = np.array(s)  # number of zeros to be added
+    # u = []
+    u = np.zeros((len(s), *s), dtype=x.dtype)
+    for k in range(len(s)):
+        sz[k] = 1
+        # u.append(np.concatenate((np.diff(x, axis=k), np.zeros(sz)), axis=k))
+        u[k] = np.concatenate((np.diff(x, axis=k), np.zeros(sz)), axis=k)
+        sz[k] = s[k]
+    return u
+
+
+def gradient_nd_adjoint(u):
+    r"""Adjoint of the nd discrete gradient operator.
+
+    Compute the adjoint of the nd discrete gradient of an input tensor.
+
+    Parameters
+    ----------
+    u : tuple[numpy.ndarray]
+        Input elements.
+
+    Returns
+    -------
+    numpy.ndarray
+        adjoint of the nd gradient operator, evaluated in ``u``.
+
+    Note
+    ----
+    This function is likely to be slow.
+    """
+    ndims = int(len(u.shape) - 1)
+    # auxiliary indices to handle slices and np.newaxis needed for the concatenation
+    # ! use deque to allow circshift of the content of the indexing tuples
+    id1 = deque((0,) + (ndims - 1) * (slice(None),))  # [0, :, :, ..., :]
+    id2 = deque((slice(0, -1),) + (ndims - 1) * (slice(None),))  # [:-1, :, ...]
+    id3 = deque((-2,) + (ndims - 1) * (slice(None),))  # [-2, :, :, ..., :]
+    idna = deque((np.newaxis,) + (ndims - 1) * (slice(None),))  # [np.newaxis, :, ...]
+    # evaluate adjoint operator
+    x = np.concatenate(
+        (
+            -u[0, 0, ...][np.newaxis, ...],
+            -np.diff(u[0, :-1, ...], 1, axis=0),
+            u[0, -2, ...][np.newaxis, ...],
+        ),
+        axis=0,
+    )
+    for k in range(1, ndims):
+        # shift indexing deque towards the right
+        id1.rotate(1)
+        id2.rotate(1)
+        id3.rotate(1)
+        idna.rotate(1)
+        # add new contribution
+        x += np.concatenate(
+            (
+                -u[(k,) + tuple(id1)][tuple(idna)],
+                -np.diff(u[(k,) + tuple(id2)], 1, axis=k),
+                u[(k,) + tuple(id3)][tuple(idna)],
+            ),
+            axis=k,
+        )
+    return x
+
+
+def tv_nd(x):
+    r"""Generalisation of the discrete total variation (TV) to tensors.
+
+    Compute the discrete anisotropic TV for any input tensor.
+
+    Parameters
+    ----------
+    x : numpy.ndarray
+        Input tensor.
+
+    Returns
+    -------
+    float
+        Total variation evaluated in ``x``.
+
+    Note
+    ----
+    This function is likely to be slow.
+    """
+    u = gradient_nd(x)
+    return np.sum(np.sqrt(np.sum(np.abs(u) ** 2, axis=0)))
+
+
+def smooth_tv(x, eps):
+    r"""Smooth approximation to the 2d discrete total variation (TV).
+
+    Compute a smooth approximation to the discrete anisotropic total variation
+    of a 2d array:
+
+    .. math::
+        \text{TV}_{\varepsilon}(\mathbf{x}) = \sum_{n=1}^N \sum_{m=1}^M \sqrt{
+        [\nabla(\mathbf{x})]_{1, m, n}^2 + [\nabla(\mathbf{x})]_{2, m, n}^2
+        + \varepsilon}, \; \varepsilon > 0,
+
+    where :math:`\nabla` is the 2d discrete gradient operator.
+
+    Parameters
+    ----------
+    x : numpy.ndarray, 2d
+        Input array.
+    eps : float, > 0
+        Smoothing parameter.
+
+    Returns
+    -------
+    float
+        smooth TV evaluated in ``x``, :math:`\text{TV}_{\varepsilon}(\mathbf{x})`.
+    """
+    u = gradient_2d(x)
+    return np.sum(np.sqrt(np.sum(np.abs(u) ** 2, axis=0) + eps))
+
+
+def gradient_smooth_tv(x, eps):
+    r"""Gradient of a smoothed 2d anisotropic total variation.
+
+    Compute the gradient of a smooth approximation to the 2d discrete
+    anisotropic total variation, evaluated in the input array ``x``.
+
+    Parameters
+    ----------
+    x : numpy.ndarray, 2d
+        Input array.
+    eps : float, > 0
+        Smoothing parameter.
+
+    Returns
+    -------
+    numpy.ndarray, 2d
+        Gradient of :math:`\text{TV}_\varepsilon`, evaluated in ``x``.
+    """
+    u = gradient_2d(x)
+    v = gradient_2d_adjoint(u / np.sqrt(np.sum(np.abs(u) ** 2, axis=0) + eps))
+    return v
+
+
+if __name__ == "__main__":
+    # import timeit
+    # from inspect import cleandoc  # handle identation in multi-line strings
+
+    rng = np.random.default_rng(1234)
+    x = rng.standard_normal((5, 5))
+    eps = np.finfo(float).eps
+
+    u2 = gradient_2d(x)
+    y2 = gradient_2d_adjoint(u2)
+
+    u = gradient_nd(x)
+    y = gradient_nd_adjoint(u)
+
+    err_ = np.linalg.norm(y - y2)
+    print("Error: {0:1.5e}".format(err_))
+
+    tv_x = tv_nd(x)
+    tv_x_2d = tv(x)
+    err = np.linalg.norm(tv_x - tv_x_2d)
+    print("Error: {0:1.5e}".format(err))
diff --git a/src/aaxda/samplers/__init__.py b/src/aaxda/samplers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/aaxda/samplers/parallel/__init__.py b/src/aaxda/samplers/parallel/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/aaxda/samplers/parallel/spa_psgla_sync_s.py b/src/aaxda/samplers/parallel/spa_psgla_sync_s.py
new file mode 100644
index 0000000000000000000000000000000000000000..71a5a86a8bb03e6deb5a98ffb8acab5b231b716c
--- /dev/null
+++ b/src/aaxda/samplers/parallel/spa_psgla_sync_s.py
@@ -0,0 +1,1390 @@
+"""Distributed Python implementation of the proposed PSGLA within Gibbs
+sampler for supervised Poisson deconvolution with under a TV prior.
+"""
+from logging import Logger
+from time import perf_counter
+
+import numpy as np
+from mpi4py import MPI
+from numba import jit
+from numpy.random import SeedSequence, default_rng
+from tqdm import tqdm
+
+# import aaxda.utils.checkpoint_parallel as chkpt
+import aaxda.utils.communications as ucomm
+from aaxda.models.distributed_convolutions import (  # create_local_to_global_slice,
+    calculate_local_data_size,
+)
+from aaxda.models.jtv import chunk_gradient_2d, chunk_gradient_2d_adjoint
+from aaxda.models.models import SyncConvModel
+from aaxda.models.prox import (
+    kullback_leibler,
+    l21_norm,
+    prox_kullback_leibler,
+    prox_l21norm,
+    prox_nonegativity,
+)
+from aaxda.utils.checkpoint import DistributedCheckpoint, SerialCheckpoint
+from aaxda.utils.communicators import SyncCartesianCommunicatorTV
+
+# from utils.memory import display_memory
+
+
+def loading(
+    checkpointer: DistributedCheckpoint,
+    warmstart_iter: int,
+    rng: np.random.Generator,
+    global_slice_tile,
+    global_slice_data,
+):
+    """Function interface around the :meth:`DistributedCheckpoint.load` method.
+
+    Parameters
+    ----------
+    checkpointer : DistributedCheckpoint
+        Checkpoint object.
+    warmstart_iter : int
+        Warm-start iteration (indicating the name of the file to be loaded).
+    rng : np.random.Generator
+        Random number generator.
+    global_slice_tile : list of slice
+        Slice to select a tile from the full image.
+    global_slice_data : list of slice
+        Slice to select a tile from the full image.
+
+    Returns
+    -------
+    numpy.ndarray, int, float
+        Variables required to restart the sampler.
+    """
+    # ! version when saving all iterations for all variables
+    # [(np.s_[-1], *global_slice_tile)]
+    #     + 2 * [(np.s_[-1], *global_slice_data)]
+    #     + 2 * [(np.s_[-1], np.s_[:], *global_slice_tile)]
+    #     + 3 * [np.s_[-1]],
+    # ! saving all iterations only for x
+    # [(np.s_[-1], *global_slice_tile)]
+    # + 2 * [global_slice_data]
+    # + 2 * [(np.s_[:], *global_slice_tile)]
+    # + 3 * [np.s_[-1]],
+    # ! saving only estimator and last state of each variable
+    dic = checkpointer.load(
+        warmstart_iter,
+        [(np.s_[-1], *global_slice_tile)]
+        + 2 * [global_slice_data]
+        + 2 * [(np.s_[:], *global_slice_tile)]
+        + 7 * [np.s_[-1]],
+        rng,
+        "x",
+        "z1",
+        "u1",
+        "z2",
+        "u2",
+        "beta",
+        "rho1",
+        "rho2",
+        "alpha1",
+        "alpha2",
+        "score",
+        "iter",
+    )
+
+    return (
+        dic["x"],
+        dic["z1"],
+        dic["u1"],
+        dic["z2"],
+        dic["u2"],
+        dic["score"],
+        dic["beta"],
+        dic["rho1"],
+        dic["rho2"],
+        dic["alpha1"],
+        dic["alpha2"],
+        dic["iter"],
+    )
+
+
+def loading_per_process(
+    rank,
+    checkpointer: SerialCheckpoint,
+    warmstart_iter: int,
+    rng: np.random.Generator,
+):
+    """Function interface around the :meth:`SerialCheckpoint.load` method to
+    load the content of one file per process.
+
+    Parameters
+    ----------
+    checkpointer : SerialCheckpoint
+        Checkpoint object.
+    warmstart_iter : int
+        Warm-start iteration (indicating the name of the file to be loaded).
+    rng : np.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    numpy.ndarray, int, float
+        Variables required to restart the sampler.
+    """
+    # ! only estimator and last state of each variable has been saved to disk
+    if rank == 0:
+        dic = checkpointer.load(
+            warmstart_iter,
+            [np.s_[:]] + 4 * [np.s_[:]] + 3 * [np.s_[-1]],
+            rng,
+            "x",
+            "z1",
+            "u1",
+            "z2",
+            "u2",
+            "regularization",
+            "score",
+            "iter",
+        )
+    else:
+        dic = checkpointer.load(
+            warmstart_iter,
+            [np.s_[:]] + 4 * [np.s_[:]],
+            rng,
+            "x",
+            "z1",
+            "u1",
+            "z2",
+            "u2",
+        )
+        dic["score"] = 0.0
+        dic["regularization"] = np.zeros((5,), dtype="d")
+        dic["iter"] = 0
+
+    return (
+        dic["x"],
+        dic["z1"],
+        dic["u1"],
+        dic["z2"],
+        dic["u2"],
+        dic["score"],
+        dic["regularization"],
+        dic["iter"],
+    )
+
+
+# ! ISSUE: code hanging forever in MPI, to be revised..
+def saving(
+    checkpointer: DistributedCheckpoint,
+    iter_mc: int,
+    rng: np.random.Generator,
+    local_x,
+    local_u1,
+    local_u2,
+    local_z1,
+    local_z2,
+    beta,
+    rho1,
+    rho2,
+    alpha1,
+    alpha2,
+    score,
+    counter,
+    atime,
+    asqtime,
+    nsamples,
+    global_slice_tile,
+    global_slice_data,
+    image_size,
+    data_size,
+):
+    root_id = 0
+    it = iter_mc + 1
+
+    # find index of the candidate MAP within the current checkpoint
+    id_map = np.empty(1, dtype="i")
+    if checkpointer.rank == root_id:
+        id_map[0] = np.argmin(score[:nsamples])
+
+    checkpointer.comm.Bcast([id_map, 1, MPI.INT], root=0)
+
+    # save MMSE, MAP (only for x)
+    select_ = (
+        [(np.s_[:], *global_slice_tile)]
+        + 2 * [(*global_slice_tile,)]
+        + 2 * [(*global_slice_data,)]
+        + 2 * [(np.s_[:], *global_slice_tile)]
+    )
+    shape_ = (
+        [(nsamples, *image_size)]
+        + 2 * [(*image_size,)]
+        + 2 * [(*data_size,)]
+        + 2 * [(2, *image_size)]
+    )
+    chunk_sizes_ = (
+        [(1, *local_x.shape[1:])]  # ! see if saving only the last point or more...
+        + 2 * [(*local_x.shape[1:],)]
+        + 2 * [(*local_z1.shape,)]
+        + 2 * [(1, *local_x.shape[1:])]
+    )
+
+    x_mmse = np.mean(local_x[:nsamples, ...], axis=0)
+    x_map = local_x[id_map[0]]
+    checkpointer.save(
+        it,
+        shape_,
+        select_,
+        chunk_sizes_,
+        rng=rng,
+        mode="w",
+        rdcc_nbytes=1024**2 * 200,  # 200 MB cache
+        x=local_x[:nsamples],
+        x_map=x_map,
+        x_mmse=x_mmse,
+        z1=local_z1,
+        u1=local_u1,  # u1 and/or z1 create an issue: check the data selection + size fpr these 2 variables
+        z2=local_z2,
+        u2=local_u2,
+    )
+
+    # ! z1 and u1 create a problem when code run from MPI (code hanging
+    # ! forever...): why?
+    # z1=local_z1,
+    # u1=local_u1,
+
+    # ! saving from process 0 only (beta, score, iter, time)
+    if checkpointer.rank == root_id:
+        select_ = 10 * [np.s_[:]]
+        chunk_sizes_ = 10 * [None]
+        checkpointer.save_from_process(
+            root_id,
+            it,
+            select_,
+            chunk_sizes_,
+            mode="a",
+            rdcc_nbytes=1024**2 * 200,  # 200 MB cache
+            asqtime=np.array(asqtime),
+            atime=np.array(atime),
+            iter=it,
+            score=score[:nsamples],
+            beta=beta[:nsamples],
+            rho1=rho1[:nsamples],
+            rho2=rho2[:nsamples],
+            alpha1=alpha1[:nsamples],
+            alpha2=alpha2[:nsamples],
+            counter=counter,
+        )
+
+    pass
+
+
+def saving_per_process(
+    rank,
+    comm,
+    checkpointer: SerialCheckpoint,
+    iter_mc: int,
+    rng: np.random.Generator,
+    local_x,
+    local_u1,
+    local_z1,
+    local_u2,
+    local_z2,
+    regularization,
+    score,
+    counter,
+    atime,
+    asqtime,
+    nsamples,
+):
+    """Save useful variables on a per-process basis (i.e., one file per
+    process).
+
+    Parameters
+    ----------
+    rank : int
+        Rank of the current process.
+    comm : _type_
+        Communicator used.
+    checkpointer : SerialCheckpoint
+        Checkpointer object associated with the current process.
+    iter_mc : int
+        _description_
+    rng : np.random.Generator
+        Random number generator of the current process.
+    local_x : numpy.ndarray
+        Primal variable.
+    local_u1 : numpy.ndarray
+        Augmentation variable (data fidelity).
+    local_z1 : numpy.ndarray
+        Splitting variable (data fidelity).
+    local_u2 : numpy.ndarray
+        Augmentation variable (prior).
+    local_z2 : numpy.ndarray
+        Splitting variable (prior).
+    beta : float
+        Regularization parameter.
+    score : numpy.ndarray
+        Objective function over the iterations.
+    counter : int
+        Number of iterations since the last checkpoint.
+    atime : float
+        Average iteration runtime (on root process).
+    asqtime : float
+        Average of square iteration runtime (on root process).
+    nsamples : int
+        Number of samples comsidered for the checkpoint.
+    """
+    root_id = 0
+    it = iter_mc + 1
+
+    # find index of the candidate MAP within the current checkpoint
+    id_map = np.empty(1, dtype="i")
+    if rank == root_id:
+        id_map[0] = np.argmin(score[:nsamples])
+    comm.Bcast([id_map, 1, MPI.INT], root=0)
+
+    # estimators to be saved to disk
+    x_mmse = np.mean(local_x[:nsamples, ...], axis=0)
+    x_sq_m = np.mean(local_x[:nsamples, ...] ** 2, axis=0)
+    x_map = local_x[id_map[0]]
+
+    # checkpointer configuration
+    chunk_sizes_ = (
+        # [(1, *local_x.shape[1:])]  # ! see if saving only the last point or more...
+        [local_x.shape[1:]]  # ! see if saving only the last point or more...
+        + 3 * [(*local_x.shape[1:],)]
+        + 2 * [local_z1.shape]
+        + 2 * [(1, *local_x.shape[1:])]
+    )
+
+    checkpointer.save(
+        it,
+        chunk_sizes_,
+        rng=rng,
+        mode="w",
+        rdcc_nbytes=1024**2 * 200,  # 200 MB cache
+        x=local_x[-1],  # [:nsamples]
+        x_map=x_map,
+        x_m=x_mmse,
+        x_sq_m=x_sq_m,
+        z1=local_z1,
+        u1=local_u1,
+        z2=local_z2,
+        u2=local_u2,
+    )
+
+    if rank == 0:
+        chunk_sizes_ = 6 * [None]
+        checkpointer.save(
+            it,
+            chunk_sizes_,
+            mode="a",
+            rdcc_nbytes=1024**2 * 200,  # 200 MB cache
+            asqtime=np.array(asqtime),
+            atime=np.array(atime),
+            iter=it,
+            score=score[:nsamples],
+            regularization=regularization[:nsamples],
+            counter=counter,
+        )
+    pass
+
+
+# ! a separate distributed tv operator is required (forward communication can
+# ! be grouped with the convolution when considering a linear convolution, but
+# ! not the adjoint!)
+def gradient_x(
+    sync_conv_model: SyncConvModel,
+    adjoint_tv_communicator: SyncCartesianCommunicatorTV,
+    isfirst,
+    islast,
+    buffer_Hx,
+    buffer_Gx,
+    z1,
+    u1,
+    z2,
+    u2,
+    rho1,
+    rho2,
+    local_slice_tv,
+    local_slice_tv_adj,
+):
+    # Hx and Gx already computed during the previous iteration: buffer_Hx and
+    # Gx contain the right info in terms of those terms
+    # local direct operator applied out of this function
+
+    # local update step
+    buffer_Hx[sync_conv_model.local_slice_conv_adj] += u1 - z1
+    buffer_Hx[sync_conv_model.local_slice_conv_adj] /= rho1
+    buffer_Gx[tuple((np.s_[:], *local_slice_tv_adj))] += u2 - z2
+    buffer_Gx[tuple((np.s_[:], *local_slice_tv_adj))] /= rho2
+
+    # communicate facet borders to neighbours Gx
+    adjoint_tv_communicator.update_borders(buffer_Gx)
+
+    # * local adjoint operator
+    # convolution
+    local_grad_x = sync_conv_model.apply_adjoint_operator(buffer_Hx)
+    # 2d tv
+    chunk_gradient_2d_adjoint(
+        buffer_Gx[0],
+        buffer_Gx[1],
+        local_grad_x,
+        isfirst,
+        islast,
+    )
+
+    return local_grad_x
+
+
+def sample_x(x, gamma_x, grad_x, rng):
+    r"""PSGLA update step for the image :math:`x`.
+
+    Parameters
+    ----------
+    x : numpy.array
+        Input image.
+    gamma_x : float
+        PSGLA step size (gradient and stochastic perturbation).
+    grad_x : numpy.array
+        Gradient of the smooth part involved in
+        :math:`-\log \pi_{\alpha, \beta}(x \mid y, (u_i, z_i)_{1\leq i \leq 2})`.
+    rng : np.random.Generator
+        Random number generator.
+
+    Note
+    ----
+    The variable ``x`` is updated in-place.
+    """
+    # ! in-place update for x
+    x += np.sqrt(2 * gamma_x) * rng.standard_normal(size=x.shape) - gamma_x * grad_x
+    prox_nonegativity(x)
+    return
+
+
+def sample_z1(z1, y, Hx, u1: float, rho1: float, gamma1: float, rng):
+    r"""PSGLA update step for the splitting variable :math:`z_1`.
+
+    Parameters
+    ----------
+    z1 : numpy.array
+        Current state of the splitting variable :math:`z_1`.
+    y : numpy.array
+        Observed data.
+    Hx : numpy.array
+        Pre-computed result of the convolution operator applied to the current
+        image :math:`x`.
+    u1 : numpy.array
+        Current state of the augmentation variable :math:`u_1`.
+    rho1 : float
+        AXDA parameter.
+    gamma1 : float
+        PSGLA step size (gradient and stochastic perturbation).
+    rng : np.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    numpy.array
+        New state for the splitting variable :math:`z_1`.
+    """
+    grad_z1 = (z1 - u1 - Hx) / rho1
+    z = z1 + np.sqrt(2 * gamma1) * rng.standard_normal(size=z1.shape) - gamma1 * grad_z1
+    return prox_kullback_leibler(z, y, lam=gamma1)
+
+
+def sample_z2(z2, Gx, u2, rho2: float, gamma2: float, lambda_gamma2: float, rng):
+    r"""PSGLA update step for the splitting variable :math:`z_2`.
+
+    Parameters
+    ----------
+    z2 : numpy.array
+        Current state of the splitting variable :math:`z_2`.
+    Gx : numpy.array
+        Pre-computed result of the discrete gradient operator applied to the
+        current image :math:`x`.
+    u2 : numpy.array
+        Current state of the augmentation variable :math:`u_2`.
+    rho2 : float
+        AXDA splitting parameter.
+    gamma2 : float
+        PSGLA step size (gradient and stochastic perturbation).
+    lambda_gamma2 : float
+        Current value of the regularization parameter :math:`\lambda`
+        multiplied by the PSGLA step size.
+    rng : np.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    numpy.array
+        New state for the splitting variable :math:`z_2`.
+    """
+    grad_z2 = (z2 - u2 - Gx) / rho2
+    z = z2 + np.sqrt(2 * gamma2) * rng.standard_normal(size=z2.shape) - gamma2 * grad_z2
+    return prox_l21norm(z, lam=lambda_gamma2, axis=0)
+
+
+def sample_u(z, Hx, rho: float, alph: float, rng):
+    r"""PSGLA update step for the auxiliary variables
+    :math:`(u_i)_{1 \leq i \leq 2}`.
+
+    Parameters
+    ----------
+    z : numpy.array
+        [description]
+    Hx : numpy.array
+        Pre-computed result of the convolution operator applied to the current
+        image :math:`x`.
+    rho : float
+        AXDA splitting parameter.
+    alph : float
+        AXDA augmentation parameter.
+    rng : np.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    numpy.array
+        New state for the auxiliary variable :math:`u_i`.
+    """
+    normalization_factor = np.sqrt(rho + alph)
+    mu = alph * (z - Hx) / normalization_factor**2
+    return mu + rng.standard_normal(size=mu.shape) * (
+        np.sqrt(rho * alph) / normalization_factor
+    )
+
+
+def sample_beta(a: float, b: float, N, l21_z2: float, rng) -> float:
+    r"""Sample the regularization parameter :math:`\lambda`.
+
+    Parameters
+    ----------
+    a : float
+        Hyper-parameter of the Gamma prior for the regularization parameter
+        :math:`\lambda`.
+    b : float
+        Hyper-parameter of the Gamma prior for the regularization parameter
+        :math:`\lambda`.
+    N : numpy.array(dtype=int)
+        Size of the image :math:`x`.
+    l21_z2 : float
+        Value of :math:`\|z_2\|_{2, 1}`.
+    rng : np.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    float
+        New state for the regularization parameter.
+    """
+    return rng.gamma(a + 2 * np.prod(N), 1 / (b + l21_z2))
+
+
+def sgd_beta(beta, delta, beta_min, beta_max, d, alpha, gX):
+    """Sampling beta in linear scale, using a single sample for the
+    stochastic gradient."""
+    beta = np.max(np.min(beta + delta * (d / (alpha * beta) - gX), beta_min), beta_max)
+
+    return beta
+
+
+def sgd_beta_log_scale(log_beta, delta, log_beta_min, log_beta_max, d, alpha, gX):
+    """Sampling beta in logarithmic scale, using a single sample for the
+    stochastic gradient."""
+    beta = np.exp(
+        np.maximum(
+            np.minimum(
+                log_beta + delta * (d / alpha - np.exp(log_beta) * gX),
+                log_beta_max,
+            ),
+            log_beta_min,
+        )
+    )
+
+    return beta
+
+
+def sample_var(a: float, b: float, d, fX: float, rng) -> float:
+    r"""Sample the regularization parameter :math:`\lambda`.
+
+    Parameters
+    ----------
+    a : float
+        Hyper-parameter of the Gamma prior for the regularization parameter
+        :math:`\lambda`.
+    b : float
+        Hyper-parameter of the Gamma prior for the regularization parameter
+        :math:`\lambda`.
+    d : int
+        Dimension in which the parameter lives.
+    fX : float
+        Value of the potential involved in the conditional distribution.
+    rng : np.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    float
+        New state for the regularization parameter.
+    """
+    return 1 / rng.gamma(a + d, 1 / (b + fX))
+
+
+def sgd_var(beta, delta, beta_min, beta_max, d, alpha, gX):
+    """Sampling beta in linear scale (beta homogenoeous to a variance), using a
+    single sample for the stochastic gradient."""
+    beta = np.max(
+        np.min(beta + delta / beta * (-d / alpha + gX / beta), beta_min), beta_max
+    )
+
+    return beta
+
+
+def sgd_var_log_scale(log_beta, delta, log_beta_min, log_beta_max, d, alpha, gX):
+    """Sampling beta in logarithmic scale (beta homogenoeous to a variance),
+    using a single sample for the stochastic gradient."""
+    beta = np.exp(
+        np.maximum(
+            np.minimum(
+                log_beta + delta * (-d / alpha + np.exp(-log_beta) * gX),
+                log_beta_max,
+            ),
+            log_beta_min,
+        )
+    )
+
+    return beta
+
+
+@jit(nopython=True, cache=True)
+def score_function(y, Hx, Gx, z1, u1, z2, u2, rho1, rho2, alpha1, alpha2, beta):
+    """Compute the current potential value for the AXDA model.
+
+    Parameters
+    ----------
+    y : numpy.array
+        Observed data.
+    Hx : numpy.array
+        Pre-computed result of the convolution operator applied to the current
+        image :math:`x`.
+    Gx : numpy.array
+        Pre-computed result of the discrete gradient operator applied to the
+        current image :math:`x`.
+    z1 : numpy.array
+        Splitting variable :math:`z_1`.
+    u1 : numpy.array
+        Auxiliary augmentation variable :math:`u_1`.
+    z2 : numpy.array
+        Splitting variable :math:`z_2`.
+    u2 : numpy.array
+        Auxiliary augmentation variable :math:`u_2`.
+    rho1 : float
+        AXDA splitting parameter.
+    rho2 : float
+        AXDA splitting parameter.
+    alpha1 : float
+        AXDA augmentation parameter.
+    alpha2 : float
+        AXDA augmentation parameter.
+    beta : float
+        Value of the regularization parameter.
+
+    Returns
+    -------
+    float
+        Current potential value for the AXDA model.
+    """
+    score = (
+        kullback_leibler(z1, y)
+        + np.sum(np.abs(Hx - z1 + u1) ** 2) / (2 * rho1)
+        + np.sum((Gx - z2 + u2) ** 2) / (2 * rho2)
+        + np.sum(np.abs(u1) ** 2) / (2 * alpha1)
+        + np.sum(u2**2) / (2 * alpha2)
+        + beta * l21_norm(z2)
+    )
+
+    return score
+
+
+@jit(nopython=True, cache=True)
+def scores_function(y, Hx, Gx, z1, u1, z2, u2):
+    """Compute the value of the potentials invovled in the AXDA model.
+
+    Parameters
+    ----------
+    y : numpy.array
+        Observed data.
+    Hx : numpy.array
+        Pre-computed result of the convolution operator applied to the current
+        image :math:`x`.
+    Gx : numpy.array
+        Pre-computed result of the discrete gradient operator applied to the
+        current image :math:`x`.
+    z1 : numpy.array
+        Splitting variable :math:`z_1`.
+    u1 : numpy.array
+        Auxiliary augmentation variable :math:`u_1`.
+    z2 : numpy.array
+        Splitting variable :math:`z_2`.
+    u2 : numpy.array
+        Auxiliary augmentation variable :math:`u_2`.
+    rho1 : float
+        AXDA splitting parameter.
+    rho2 : float
+        AXDA splitting parameter.
+    alpha1 : float
+        AXDA augmentation parameter.
+    alpha2 : float
+        AXDA augmentation parameter.
+    beta : float
+        Value of the regularization parameter.
+
+    Returns
+    -------
+    float
+        Current potential value for the AXDA model.
+    """
+    data_fidelity = kullback_leibler(z1, y)
+    lrho1 = np.sum(np.abs(Hx - z1 + u1) ** 2) / 2
+    lrho2 = np.sum((Gx - z2 + u2) ** 2) / 2
+    lalpha1 = np.sum(np.abs(u1) ** 2) / 2
+    lalpha2 = np.sum(u2**2) / 2
+    prior = l21_norm(z2)
+
+    return np.array([data_fidelity, lrho1, lrho2, lalpha1, lalpha2, prior], dtype="d")
+
+
+def spa_psgla_mpi(
+    local_y,
+    checkpointname: str,
+    checkpoint_frequency: int,
+    warmstart_iter: int,
+    monitor_frequency: int,
+    checkpointflag: bool,
+    sync_conv_model: SyncConvModel,
+    checkpointer,
+    rho1: float,
+    rho2: float,
+    alpha1: float,
+    alpha2: float,
+    beta: float,
+    a: float,
+    b: float,
+    Nmc: int,
+    Nbi_p: int,  # take Nbi as a factor of checkpoint_frequency to simplify
+    M,
+    seed: int,
+    logger: Logger,
+):
+    r"""Distributed SPA sampler based on PSGLA to address a supervised 2D
+    Poisson deconvolution problem relying on a TV prior.
+
+    Parameters
+    ----------
+    local_y : numpy.ndarray
+        Local moisy observations.
+    checkpointfile : str
+        [description]
+    checkpointname : str
+        Root string to build the name of the checkpoint files.
+    checkpoint_frequency : int
+        Number of iterations after which a checkpoint file is written to disk.
+    warmstart_iter : int
+        Iteration identifier of the warm-start checkpoint file.
+    monitor_frequency : int
+        Number of iterations after which the potential is computed is written
+        to log file.
+    checkpointflag : bool
+        Flag to activate warmstart.
+    sync_conv_model : SyncConvModel
+        Distributed linear convolution model object (MPI).
+    checkpointer : DistributedCheckpoint
+        Distributed checkpoint object (MPI).
+    rho1 : float
+        Splitting parameter.
+    rho2 : float
+        Splitting parameter.
+    alpha1 : float
+        Augmentation parameter.
+    alpha2 : float
+        Augmentation parameter.
+    beta : float
+        Initial value of the regularization parameter.
+    a : float
+        Hyperparameters for the regularization parameter.
+    b : float
+        Hyperparameters for the regularization parameter.
+    Nmc : int
+        Total number of Monte-Carlo samples to be generated.
+    M : float
+        Maximum intensity of the image to be recovered.
+    seed : int
+        Integer seed to instantiate the random number generator.
+    logger : logging.Logger
+        Logger object.
+    """
+    # TODO: introduce distributed checkpointer, remove unnecessary variables
+    # from sampler
+    # add direction to the list of attributed of the communicators
+
+    # ! https://stackoverflow.com/questions/38543506/change-logging-print-function-to-tqdm-write-so-logging-doesnt-interfere-wit
+    # log = logging.getLogger(__name__)
+    # log.setLevel(logging.INFO)
+    # log.addHandler(TqdmLoggingHandler())
+    ncores = sync_conv_model.comm.Get_size()
+    rank = sync_conv_model.comm.Get_rank()
+    ndims = sync_conv_model.ndims
+    grid_size = MPI.Compute_dims(ncores, ndims)
+
+    # * Cartesian topology communicator and nD rank
+    ranknd = sync_conv_model.ranknd
+    grid_size = np.array(grid_size, dtype="i")
+    isfirst = ranknd == 0
+    islast = ranknd == (grid_size - 1)
+
+    # * model parameters (AXDA regularization parameter)
+    max_sq_kernel = np.max(np.abs(sync_conv_model.fft_kernel)) ** 2
+
+    # * auxiliary quantities for stochastic gradient updates
+    # dimensions
+    # N = sync_conv_model.image_size
+    # d_N = np.prod(N)
+    # dimension of the proper space (removing 0s)
+    # d_tv = (N[0] - 1) * N[1] + (N[1] - 1) * N[0]
+    # d_M = np.prod(sync_conv_model.data_size)
+
+    # tile
+    tile_pixels = ucomm.local_split_range_nd(
+        grid_size, sync_conv_model.image_size, ranknd
+    )
+    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+
+    # data
+    # ! larger number of data points on the border (forward overlap)
+    # local_data_size = tile_size + (ranknd == 0) * overlap_size
+    local_data_size, facet_size, facet_size_adj = calculate_local_data_size(
+        tile_size, ranknd, sync_conv_model.overlap_size, grid_size, backward=False
+    )
+
+    # facet (convolution)
+    offset = facet_size - tile_size
+    offset_adj = facet_size_adj - tile_size
+
+    # facet (tv)
+    offset_tv = offset - (offset > 0).astype("i")
+    offset_tv_adj = np.logical_and(ranknd > 0, grid_size > 1).astype("i")
+    tv_facet_size_adj = tile_size + offset_tv_adj
+
+    # * Useful slices (direct operators)
+    # extract tile from local facet (direct conv. operator)
+    local_slice_tile = ucomm.get_local_slice(ranknd, grid_size, offset, backward=False)
+    # extract values from local conv facet to apply local gradient operator
+    local_slice_tv = ucomm.get_local_slice(ranknd, grid_size, offset_tv, backward=False)
+
+    # * Useful slices (adjoint operators)
+    # set value of local convolution in the adjoint buffer
+    local_slice_conv_adj = ucomm.get_local_slice(
+        ranknd, grid_size, offset_adj, backward=True
+    )
+    # set value of local discrete gradient into the adjoint gradient buffer
+    local_slice_tv_adj = ucomm.get_local_slice(
+        ranknd, grid_size, offset_tv_adj, backward=True
+    )
+
+    # indexing into global arrays
+    # if not save_mode == "process":
+    #     global_slice_tile = tuple(
+    #         [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    #     )
+    #     global_slice_data = create_local_to_global_slice(
+    #         tile_pixels,
+    #         ranknd,
+    #         sync_conv_model.overlap_size,
+    #         local_data_size,
+    #         backward=False,
+    #     )
+
+    # * MC buffers
+    local_x_mc = np.empty((checkpoint_frequency, *tile_size), dtype="d")
+    local_z1_mc = np.empty(local_data_size, dtype="d")
+    local_u1_mc = np.empty(local_data_size, dtype="d")
+    local_z2_mc = np.empty((2, *tile_size), dtype="d")
+    local_u2_mc = np.empty((2, *tile_size), dtype="d")
+
+    local_score = np.empty((1), dtype="d")
+    regularization_ = np.empty((5), dtype="d")
+    start_iter = np.empty((1), dtype="i")
+    if rank == 0:
+        score_array = np.zeros((checkpoint_frequency,), dtype="d")
+        global_score = np.empty((1), dtype="d")
+        global_scores = np.empty((6), dtype="d")
+        regularization_mc = np.empty((checkpoint_frequency, 5), dtype="d")
+    else:
+        score_array = None
+        global_score = None
+        global_scores = None
+        regularization_mc = None
+
+    # * parallel rng
+    child_seed = None
+    if rank == 0:
+        ss = SeedSequence(seed)
+        # Spawn off nworkers child SeedSequences to pass to child processes.
+        child_seed = ss.spawn(ncores)
+    local_seed = sync_conv_model.comm.scatter(child_seed, root=0)
+
+    # * load state from last backup
+    local_rng = default_rng(local_seed)
+    local_x = np.empty(facet_size, dtype="d")
+
+    # TODO: to be modified
+    if checkpointflag:
+        # ! insert last element obtained at the end of the buffer
+        # using one checkpointer per process (multiple files)
+        (
+            local_x[local_slice_tile],
+            local_z1_mc,
+            local_u1_mc,
+            local_z2_mc,
+            local_u2_mc,
+            ref_score,
+            regularization_,  # beta, rho1, rho2, alpha1, alpha2
+            start_iter[0],
+        ) = loading_per_process(
+            rank,
+            checkpointer,
+            warmstart_iter,
+            local_rng,
+        )
+        local_x_mc[-1] = local_x[local_slice_tile]
+
+        sync_conv_model.comm.Bcast([regularization_, 5, MPI.DOUBLE], root=0)
+        sync_conv_model.comm.Bcast([start_iter, 1, MPI.INT], root=0)
+    else:
+        # * initialization
+        start_iter[0] = 1
+        local_x[local_slice_tile] = local_rng.integers(
+            0, high=M, size=tile_size, endpoint=True
+        ).astype(float)
+        local_u1_mc[:] = local_rng.integers(
+            0, high=M, size=local_data_size, endpoint=True
+        ).astype(float)
+        local_z1_mc[:] = local_rng.integers(
+            0, high=M, size=local_data_size, endpoint=True
+        ).astype(float)
+        local_u2_mc[:] = local_rng.integers(
+            0, high=M, size=[2, *tile_size], endpoint=True
+        ).astype(float)
+        local_z2_mc[:] = local_rng.integers(
+            0, high=M, size=[2, *tile_size], endpoint=True
+        ).astype(float)
+
+        local_x_mc[0] = local_x[local_slice_tile]
+        if rank == 0:
+            regularization_mc[0] = rho1
+            regularization_[0] = rho1
+            regularization_mc[1] = rho2
+            regularization_[1] = rho2
+            regularization_mc[2] = alpha1
+            regularization_[2] = alpha1
+            regularization_mc[3] = alpha2
+            regularization_[3] = alpha2
+            regularization_mc[4] = beta
+            regularization_[4] = beta
+        sync_conv_model.comm.Bcast([regularization_, 5, MPI.DOUBLE], root=0)
+
+    # * setup communication scheme
+    # ! convolution (direct + adjoint) + direct TV covered by sync_conv_model
+    # adjoint TV communicator
+    # ! need a different object for the moment (because of the size required...)
+    adjoint_tv_communicator = SyncCartesianCommunicatorTV(
+        sync_conv_model.comm,
+        sync_conv_model.cartcomm,
+        sync_conv_model.grid_size,
+        local_x.itemsize,
+        tv_facet_size_adj,
+        direction=True,
+    )  # True
+
+    # * setup auxiliary buffers
+    # ! communicating facet borders to neighbours already done in-place with
+    # ! the direction convolution operator
+    # ! Hx, Gx updated whenever buffer_Hx and buffer_Gx are
+    buffer_Hx = np.empty(facet_size_adj)
+    buffer_Hx[local_slice_conv_adj] = sync_conv_model.apply_direct_operator(local_x)
+
+    buffer_Gx = np.empty((2, *tv_facet_size_adj))
+    (
+        buffer_Gx[tuple((0, *local_slice_tv_adj))],
+        buffer_Gx[tuple((1, *local_slice_tv_adj))],
+    ) = chunk_gradient_2d(local_x[local_slice_tv], islast)
+
+    # communicate facet borders to neighbours (Hx, Gx)
+    # ! Hx updated in place
+    sync_conv_model.adjoint_communicator.update_borders(buffer_Hx)
+    # ! Gx updated in place
+    adjoint_tv_communicator.update_borders(buffer_Gx)
+
+    # * initialize score (checking consistency after warmstart)
+    if not checkpointflag:
+        local_score[0] = score_function(
+            local_y,
+            buffer_Hx[local_slice_conv_adj],
+            buffer_Gx[tuple((np.s_[:], *local_slice_tv_adj))],
+            local_z1_mc,
+            local_u1_mc,
+            local_z2_mc,
+            local_u2_mc,
+            regularization_[0],  # rho1
+            regularization_[1],  # rho2
+            regularization_[2],  # alpha1
+            regularization_[3],  # alpha2
+            regularization_[4],  # beta
+        )
+    else:
+        local_score[0] = score_function(
+            local_y,
+            buffer_Hx[local_slice_conv_adj],
+            buffer_Gx[tuple((np.s_[:], *local_slice_tv_adj))],
+            local_z1_mc,
+            local_u1_mc,
+            local_z2_mc,
+            local_u2_mc,
+            regularization_[0],  # rho1
+            regularization_[1],  # rho2
+            regularization_[2],  # alpha1
+            regularization_[3],  # alpha2
+            regularization_[4],  # beta
+        )
+    sync_conv_model.comm.Reduce(
+        [local_score, MPI.DOUBLE],
+        [global_score, MPI.DOUBLE],
+        op=MPI.SUM,
+        root=0,
+    )
+    if rank == 0:
+        logger.info(
+            r"t: {0:1.3e} | obj[t]: {1:1.3e}".format(
+                start_iter[0] - 1,
+                global_score[0],
+            )
+        )
+        if not checkpointflag:
+            score_array[0] = global_score[0]
+        else:
+            score_array[-1] = global_score[0]
+
+        pbar = tqdm(total=Nmc - 1, desc="Sampling", unit="it")
+        pbar.update(start_iter[0])
+        # manager = enlighten.get_manager()
+        # pbar = manager.counter(total=Nmc, desc="Sampling")
+        logger.info("Start AXDA from t={}".format(start_iter[0]))
+
+        # auxiliary variables to measure timing (average + std) per iteration
+        counter = 0
+        time_ = 0.0
+        sqtime_ = 0.0
+    else:
+        # ! won't be used on other worker, only serve as placeholder for the
+        # ! save function
+        atime = 0.0
+        asqtime = 0.0
+        time_ = 0.0
+        sqtime_ = 0.0
+        counter = 1
+
+    for iter_mc in range(start_iter[0], Nmc):
+
+        if rank == 0:
+            counter += 1
+            t_start = perf_counter()
+
+        # * PSGLA step-sizes
+        # ! to be moved elsewhere (only necessary when AXDA params have been
+        # ! updated)
+        gamma_x = 0.99 / (max_sq_kernel / regularization_[0] + 8 / regularization_[1])
+        gamma1 = 0.99 * regularization_[0]
+        gamma2 = 0.99 * regularization_[1]
+
+        # notational shortcuts (for in-place assignments)
+        Hx = buffer_Hx[local_slice_conv_adj]
+        Gx = buffer_Gx[tuple((np.s_[:], *local_slice_tv_adj))]
+
+        # sample image x (update local tile)
+        grad_x = gradient_x(
+            sync_conv_model,
+            adjoint_tv_communicator,
+            isfirst,
+            islast,
+            buffer_Hx,
+            buffer_Gx,
+            local_z1_mc,
+            local_u1_mc,
+            local_z2_mc,
+            local_u2_mc,
+            regularization_[0],
+            regularization_[1],
+            local_slice_tv,
+            local_slice_tv_adj,
+        )
+
+        sample_x(local_x[local_slice_tile], gamma_x, grad_x, local_rng)
+        local_x_mc[iter_mc % checkpoint_frequency] = local_x[local_slice_tile]
+
+        # communicate borders of each facet to appropriate neighbours
+        # ! in the synchronous case, need to update Hx and Gx for the next step
+        # ! local_x updated in-place here (border communication involved in
+        # ! direct operator)
+        buffer_Hx[local_slice_conv_adj] = sync_conv_model.apply_direct_operator(local_x)
+        # ! Hx and Gx updated in-place whenever buffer_Hx, buffer_Gx are
+        (
+            buffer_Gx[tuple((0, *local_slice_tv_adj))],
+            buffer_Gx[tuple((1, *local_slice_tv_adj))],
+        ) = chunk_gradient_2d(local_x[local_slice_tv], islast)
+
+        # communicate borders of buffer_Hx, buffer_Gx (adjoint op) for the next
+        # iteration
+        sync_conv_model.adjoint_communicator.update_borders(buffer_Hx)
+        adjoint_tv_communicator.update_borders(buffer_Gx)
+
+        # * sample auxiliary variables (z1, u1)
+        local_z1_mc = sample_z1(
+            local_z1_mc,
+            local_y,
+            Hx,
+            local_u1_mc,
+            regularization_[0],  # rho1
+            gamma1,
+            local_rng,
+        )
+        local_u1_mc = sample_u(
+            local_z1_mc,
+            Hx,
+            regularization_[0],  # rho1
+            regularization_[2],  # alpha1
+            local_rng,
+        )
+
+        # * sample auxiliary variables (z2, u2)
+        local_z2_mc = sample_z2(
+            local_z2_mc,
+            Gx,
+            local_u2_mc,
+            regularization_[1],  # rho2
+            gamma2,
+            regularization_[4] * gamma2,  # beta * gamma2
+            local_rng,
+        )
+
+        local_u2_mc = sample_u(
+            local_z2_mc,
+            Gx,
+            regularization_[1],  # rho2
+            regularization_[3],  # alpha2
+            local_rng,
+        )
+
+        # ! sample TV regularization parameter beta (to be debugged)
+        # local_l21_z2 = np.sum(np.sqrt(np.sum(local_z2_mc ** 2, axis=0)))
+        # data_fidelity, lrho1, lrho2, lalpha1, lalpha2, prior
+        local_scores = scores_function(
+            local_y,
+            buffer_Hx[local_slice_conv_adj],
+            buffer_Gx[tuple((np.s_[:], *local_slice_tv_adj))],
+            local_z1_mc,
+            local_u1_mc,
+            local_z2_mc,
+            local_u2_mc,
+        )
+
+        sync_conv_model.comm.Reduce(
+            [local_scores, MPI.DOUBLE],
+            [global_scores, MPI.DOUBLE],
+            op=MPI.SUM,
+            root=0,
+        )
+
+        if rank == 0:
+            if iter_mc + 1 > Nbi_p * checkpoint_frequency:
+                # rho1
+                # regularization_mc[iter_mc % checkpoint_frequency, 0] = sample_var(
+                #     a, b, d_M / 2, global_scores[1], local_rng
+                # )
+                regularization_mc[
+                    iter_mc % checkpoint_frequency, 0
+                ] = regularization_mc[(iter_mc - 1) % checkpoint_frequency, 0]
+
+                # rho2
+                # regularization_mc[iter_mc % checkpoint_frequency, 1] = sample_var(
+                #     a, b, d_tv / 2, global_scores[2], local_rng
+                # )
+                regularization_mc[
+                    iter_mc % checkpoint_frequency, 1
+                ] = regularization_mc[(iter_mc - 1) % checkpoint_frequency, 1]
+
+                # alpha1
+                # regularization_mc[iter_mc % checkpoint_frequency, 2] = sample_var(
+                #     a, b, d_M / 2, global_scores[3], local_rng
+                # )
+                regularization_mc[
+                    iter_mc % checkpoint_frequency, 2
+                ] = regularization_mc[(iter_mc - 1) % checkpoint_frequency, 2]
+
+                # alpha2
+                # regularization_mc[iter_mc % checkpoint_frequency, 3] = sample_var(
+                #     a, b, d_N, global_scores[4], local_rng
+                # )
+                regularization_mc[
+                    iter_mc % checkpoint_frequency, 3
+                ] = regularization_mc[(iter_mc - 1) % checkpoint_frequency, 3]
+
+                # beta
+                # regularization_mc[iter_mc % checkpoint_frequency, 4] = sample_beta(
+                #     a, b, sync_conv_model.image_size, global_scores[5], local_rng
+                # )
+                regularization_mc[
+                    iter_mc % checkpoint_frequency, 4
+                ] = regularization_mc[(iter_mc - 1) % checkpoint_frequency, 4]
+            else:
+                regularization_mc[iter_mc % checkpoint_frequency] = regularization_
+            regularization_ = regularization_mc[iter_mc % checkpoint_frequency]
+        sync_conv_model.comm.Bcast([regularization_, 5, MPI.DOUBLE], root=0)
+
+        if rank == 0:
+            # update timing
+            t_stop = perf_counter()
+            elapsed_time = t_stop - t_start
+            time_ += elapsed_time
+            sqtime_ += elapsed_time**2
+
+        # evolution of the score, :math:`-\log p(x | y)`
+        # if np.mod(iter_mc, monitor_frequency) == 0:
+        # local_score[0] = score_function(
+        #     local_y,
+        #     buffer_Hx[local_slice_conv_adj],
+        #     buffer_Gx[tuple((np.s_[:], *local_slice_tv_adj))],
+        #     local_z1_mc,
+        #     local_u1_mc,
+        #     local_z2_mc,
+        #     local_u2_mc,
+        #     rho1_[0],
+        #     rho2_[0],
+        #     alpha1_[0],
+        #     alpha2_[0],
+        #     beta_[0],
+        # )
+        # sync_conv_model.comm.Reduce(
+        #     [local_score, MPI.DOUBLE],
+        #     [global_score, MPI.DOUBLE],
+        #     op=MPI.SUM,
+        #     root=0,
+        # )
+
+        if rank == 0:
+            global_score[0] = (
+                global_scores[0]
+                + global_scores[1] / regularization_[0]  # rho1
+                + global_scores[2] / regularization_[1]  # rho2
+                + global_scores[3] / regularization_[2]  # alpha1
+                + global_scores[4] / regularization_[3]  # alpha2
+                + regularization_[4] * global_scores[5]  # beta
+            )
+            logger.info(
+                r"t: {0:1.3e} | obj[t]: {1:1.3e}".format(
+                    iter_mc,
+                    global_score[0],
+                )
+            )
+            score_array[iter_mc % checkpoint_frequency] = global_score[0]
+            # display_memory(logger)
+            # breakpoint() # built-in breakpoint in Python (equivalent of MATLAB `keyboard`)
+
+        # * checkpoint
+        # TODO: need to handle the case where algo terminates before this
+        # TODO: instruction is triggered (i.e., saving the data)
+        # cannot be triggered afterwards, otherwise loose point 0
+        if np.mod(iter_mc + 1, checkpoint_frequency) == 0:
+            if rank == 0:
+                logger.info("Writing checkpoint")
+                atime = time_ / counter
+                asqtime = sqtime_ / counter
+            saving_per_process(
+                rank,
+                sync_conv_model.comm,
+                checkpointer,
+                iter_mc,
+                local_rng,
+                local_x_mc,
+                local_u1_mc,
+                local_z1_mc,
+                local_u2_mc,
+                local_z2_mc,
+                regularization_mc,
+                score_array,
+                counter,
+                atime,
+                asqtime,
+                checkpoint_frequency,
+            )
+            if rank == 0:
+                counter = 0
+                time_ = 0.0
+                sqtime_ = 0.0
+
+        if rank == 0:
+            pbar.update()
+
+    # * finalize sampler
+    if rank == 0:
+        pbar.close()
+        # manager.stop()
+        logger.info("Writing checkpoint")
+
+    # ! if Nmc is a multiple of checkpoint_frequency, do not trigger final
+    # ! backup, as it will be redundant with the previous one
+    if np.mod(Nmc, checkpoint_frequency) > 0:
+        atime = time_ / (Nmc % checkpoint_frequency)
+        asqtime = sqtime_ / (Nmc % checkpoint_frequency)
+
+        saving_per_process(
+            rank,
+            sync_conv_model.comm,
+            checkpointer,
+            iter_mc,
+            local_rng,
+            local_x_mc,
+            local_u1_mc,
+            local_z1_mc,
+            local_u2_mc,
+            local_z2_mc,
+            regularization_mc,
+            score_array,
+            counter,
+            atime,
+            asqtime,
+            Nmc % checkpoint_frequency,
+        )
+
+    return
+
+
+if __name__ == "__main__":
+    print("Full MATLAB vs Python validation not implemented yet")
+
+    # pr = cProfile.Profile()
+    # pr.enable()
+    # YOUR MAIN FUNCTION
+    # pr.disable()
+
+    # # Dump results:
+    # # - for binary dump
+    # pr.dump_stats('cpu_%d.prof' %comm.rank)
+    # # - for text dump
+    # with open( 'cpu_%d.txt' %comm.rank, 'w') as output_file:
+    #     sys.stdout = output_file
+    #     pr.print_stats( sort='time' )
+    #     sys.stdout = sys.__stdout__
+
+    # snakeviz output.prof # to visualize the results
+
+    # r = score_function(np.ones((2,2)), np.ones((2,2)), np.ones((2,2)), np.ones((2,2)), np.ones((2,2)), np.ones((2,2)), np.ones((2,2)), 1, 1, 1, 1, 1)
+
+    pass
diff --git a/src/aaxda/samplers/serial/__init__.py b/src/aaxda/samplers/serial/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/aaxda/samplers/serial/spa_pmyula.py b/src/aaxda/samplers/serial/spa_pmyula.py
new file mode 100644
index 0000000000000000000000000000000000000000..75d5ea55baf6b4e793952281ce0ef29175af257e
--- /dev/null
+++ b/src/aaxda/samplers/serial/spa_pmyula.py
@@ -0,0 +1,1116 @@
+"""Serial Python implementation of the sampler described in
+:cite:p:`Vono2019icassp`.
+
+The use of a linear convolution model leads to a minor modification in the
+sampling of the image compared to :cite:p:`Vono2019icassp`, in that the 0-padded
+image is directly sampled.
+"""
+from logging import Logger
+from time import perf_counter
+
+import numpy as np
+from numba import jit
+from numpy.random import default_rng
+from tqdm import tqdm
+
+from aaxda.models.jtv import tv
+from aaxda.models.models import SerialConvModel
+from aaxda.models.prox import (
+    kullback_leibler,
+    prox_kullback_leibler,
+    prox_nonegativity,
+    prox_tv_chambolle,
+)
+
+# prox_tv_primal_dual,
+from aaxda.utils.checkpoint import SerialCheckpoint
+
+
+def loading(
+    checkpointer: SerialCheckpoint, warmstart_iter: int, rng: np.random.Generator
+):
+    r"""Function interface around the :meth:`SerialCheckpoint.load` method.
+
+    Parameters
+    ----------
+    checkpointer :
+        Serial checkpoint object.
+    warmstart_iter :
+        Warm-start iteration (indicating the name of the file to be loaded).
+    rng :
+        Random number generator
+
+    Returns
+    -------
+    numpy.ndarray, int, float
+        Variables required to restart the sampler.
+    """
+    # select = 7 * [np.s_[:]] + 3 * [np.s_[-1]]
+    # ! if full history is saved, load only the last element from the checkpoint
+    # select = 10 * [np.s_[-1]]
+    select = [np.s_[-1]] + 6 * [np.s_[:]] + 3 * [np.s_[-1]]
+    dic_var = checkpointer.load(
+        warmstart_iter,
+        select,
+        rng,
+        "x",
+        "u1",
+        "z1",
+        "u2",
+        "z2",
+        "u3",
+        "z3",
+        "beta",
+        "score",
+        "iter",
+    )
+    return (
+        dic_var["x"],
+        dic_var["u1"],
+        dic_var["z1"],
+        dic_var["u2"],
+        dic_var["z2"],
+        dic_var["u3"],
+        dic_var["z3"],
+        dic_var["beta"],
+        dic_var["score"],
+        dic_var["iter"],
+    )
+
+
+def saving(
+    checkpointer: SerialCheckpoint,
+    warmstart_iter: int,
+    rng: np.random.Generator,
+    x,
+    u1,
+    u2,
+    u3,
+    z1,
+    z2,
+    z3,
+    beta,
+    score,
+    iter_mc,
+    counter,
+    atime,
+    asqtime,
+    atime1,
+    asqtime1,
+    atime2,
+    asqtime2,
+    atime3,
+    asqtime3,
+    nsamples,
+):
+    """Function interface around the :meth:`SerialCheckpoint.save` method.
+
+    Parameters
+    ----------
+    checkpointer : SerialCheckpoint
+        Serial checkpoint object.
+    warmstart_iter :
+        Warm-start iteration (indicating the name of the file to be loaded).
+    rng :
+        Random number generator.
+    x : numpy.ndarray
+        Image to be inferred.
+    u1 : numpy.ndarray
+        Augmentation variable.
+    u2 : numpy.ndarray
+        Augmentation variable.
+    u3 : numpy.ndarray
+        Augmentation variable.
+    z1 : numpy.ndarray
+        Splitting variable (convolution).
+    z2 : numpy.ndarray
+        Splitting variable (TV).
+    z3 : numpy.ndarray
+        Splitting variable (non-negativity constraint).
+    beta : numpy.ndarray
+        Regularization parameter.
+    score : numpy.ndarray
+        Value of the potential (minus log likelihood) across the iterations.
+    iter_mc : int
+        Index of current iteration.
+    counter : int
+        Number of samples saved to disk.
+    atime : numpy.ndarray of float, (1,)
+        Average time per iteration.
+    asqtime : numpy.ndarray of float, (1,)
+        Average of squared time per iteration (to compute the variance).
+    atime1 : numpy.ndarray of float, (1,)
+        Average time per iteration to update variables resulting from splitting
+        1.
+    asqtime1 : numpy.ndarray of float, (1,)
+        Average of squared time per iteration to handle splitting 1 (to compute
+        the variance).
+    atime2 : numpy.ndarray of float, (1,)
+        Average time per iteration to update variables resulting from splitting
+        2.
+    asqtime2 : numpy.ndarray of float, (1,)
+        Average of squared time per iteration to handle splitting 2 (to compute
+        the variance).
+    atime3 : numpy.ndarray of float, (1,)
+        Average time per iteration to update variables resulting from splitting
+        3.
+    asqtime3 : numpy.ndarray of float, (1,)
+        Average of squared time per iteration to handle splitting 3 (to compute
+        the variance).
+    nsamples : int
+        Number of samples saved to disk.
+    """
+    # order: x, z1, u1, z2, u2, z3, u3, ..., atime, asqtime, counter, beta, score, iter
+    # ! need to specify compression opt from the start
+    chunk_sizes = [(1, *x.shape[1:])] + 37 * [None]
+
+    # mean (for MMSE)
+    x_m = np.mean(x[:nsamples, ...], axis=0)
+    z1_m = np.mean(z1[:nsamples, ...], axis=0)
+    u1_m = np.mean(u1[:nsamples, ...], axis=0)
+    z2_m = np.mean(z2[:nsamples, ...], axis=0)
+    u2_m = np.mean(u2[:nsamples, ...], axis=0)
+    z3_m = np.mean(z3[:nsamples, ...], axis=0)
+    u3_m = np.mean(u3[:nsamples, ...], axis=0)
+
+    # mean of squares (for variance acorss iterations)
+    x_m_sq = np.mean(x[:nsamples, ...] ** 2, axis=0)
+    z1_m_sq = np.mean(z1[:nsamples, ...] ** 2, axis=0)
+    u1_m_sq = np.mean(u1[:nsamples, ...] ** 2, axis=0)
+    z2_m_sq = np.mean(z2[:nsamples, ...] ** 2, axis=0)
+    u2_m_sq = np.mean(u2[:nsamples, ...] ** 2, axis=0)
+    z3_m_sq = np.mean(z3[:nsamples, ...] ** 2, axis=0)
+    u3_m_sq = np.mean(u3[:nsamples, ...] ** 2, axis=0)
+
+    # MAP estimator (based on local batch)
+    id_map = np.argmin(score[:nsamples])
+
+    # save useful quantities to disk
+    checkpointer.save(
+        iter_mc + 1,
+        chunk_sizes,
+        rng=rng,
+        x=x,
+        z1=z1[nsamples - 1],
+        u1=u1[nsamples - 1],
+        z2=z2[nsamples - 1],
+        u2=u2[nsamples - 1],
+        z3=z3[nsamples - 1],
+        u3=u3[nsamples - 1],
+        x_m=x_m,
+        z1_m=z1_m,
+        u1_m=u1_m,
+        z2_m=z2_m,
+        u2_m=u2_m,
+        z3_m=z3_m,
+        u3_m=u3_m,
+        x_m_sq=x_m_sq,
+        z1_m_sq=z1_m_sq,
+        u1_m_sq=u1_m_sq,
+        z2_m_sq=z2_m_sq,
+        u2_m_sq=u2_m_sq,
+        z3_m_sq=z3_m_sq,
+        u3_m_sq=u3_m_sq,
+        x_map=x[id_map],
+        z1_map=z1[id_map, ...],
+        u1_map=u1[id_map, ...],
+        z2_map=z2[id_map, ...],
+        u2_map=u2[id_map, ...],
+        z3_map=z3[id_map, ...],
+        u3_map=u3[id_map, ...],
+        atime=atime,
+        asqtime=asqtime,
+        atime1=atime1,
+        asqtime1=asqtime1,
+        atime2=atime2,
+        asqtime2=asqtime2,
+        atime3=atime3,
+        asqtime3=asqtime3,
+        beta=beta,
+        score=score,
+        iter=iter_mc + 1,
+        counter=counter,
+    )
+
+    pass
+
+
+def sample_x(
+    cconv_model: SerialConvModel,
+    u1,
+    z1,
+    u2,
+    z2,
+    u3,
+    z3,
+    rho1: float,
+    rho2: float,
+    rho3: float,
+    rng: np.random.Generator,
+):
+    r"""Sample the main parameter of the model (image).
+
+    Parameters
+    ----------
+    cconv_model : SerialConvModel
+        Serial circular convolution model.
+    u1 : numpy.ndarray
+        Augmentation variable.
+    z1 : numpy.ndarray
+        Splitting variable.
+    u2 : numpy.ndarray
+        Augmentation variable.
+    z2 : numpy.ndarray
+        Splitting variable.
+    u3 : numpy.ndarray
+        Augmentation variable.
+    z3 : numpy.ndarray
+        Splitting variable.
+    rho1 : float
+        AXDA splitting parameter.
+    rho2 : float
+        AXDA splitting parameter.
+    rho3 : float
+        AXDA splitting parameter.
+    rng : np.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    x_ : numpy.ndarray
+        New sample for the main variable.
+    Hx : numpy.ndarray
+        Result of the convolution operator applied to ``x_``.
+
+    Note
+    ----
+    Sample the image after 0-padding (all the problem formulated with an image
+    of the size of the full convolution.
+    """
+    cov = 1 / (np.abs(cconv_model.fft_kernel) ** 2 / rho1 + 1 / rho2 + 1 / rho3)
+    Fx = (
+        np.fft.rfftn(z1 - u1, cconv_model.data_size)
+        * np.conj(cconv_model.fft_kernel)
+        / rho1
+    )
+    Fx += np.fft.rfftn((z2 - u2) / rho2 + (z3 - u3) / rho3, cconv_model.data_size)
+    Fx *= cov
+    Fx += np.sqrt(0.5 * cov) * (
+        rng.standard_normal(size=cconv_model.fft_kernel.shape)
+        + 1j * rng.standard_normal(size=cconv_model.fft_kernel.shape)
+    )
+    x_ = np.fft.irfftn(Fx, cconv_model.data_size)
+    Hx = cconv_model.apply_direct_operator(x_)
+    return x_, Hx
+
+
+def sample_z1(
+    z1, y, Hx, u1, rho1: float, gamma1: float, lambda1: float, rng: np.random.Generator
+):
+    r"""Sample first splitting variable ``z1``.
+
+    Parameters
+    ----------
+    z1 : numpy.ndarray
+        Splitting variable.
+    y : numpy.ndarray
+        Input observations.
+    Hx : numpy.ndarray
+        Result of the convolution operator applied to current ``x``.
+    u1 : numpy.ndarray
+        Augmentation variable.
+    rho1 : float
+        Splitting parameter.
+    gamma1 : float
+        P-MYULA parameter.
+    lambda1 : float
+        P-MYULA parameter.
+    rng : np.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    numpy.ndarray
+        New state for ``z1``.
+    """
+    g = gamma1 / lambda1
+    gradH1 = (z1 - u1 - Hx) / rho1
+    proxH1 = prox_kullback_leibler(z1, y, lam=lambda1)
+    z = (
+        (1 - g) * z1
+        - gamma1 * gradH1
+        + g * proxH1
+        + np.sqrt(2 * gamma1) * rng.standard_normal(size=z1.shape)
+    )
+    return z
+
+
+def sample_z2(
+    z2,
+    x,
+    u2,
+    rho2: float,
+    gamma2: float,
+    lambda2: float,
+    beta_: float,
+    rng: np.random.Generator,
+):
+    r"""Sample second splitting variable ``z2``.
+
+    Parameters
+    ----------
+    z2 : numpy.ndarray
+        Splitting variable.
+    x : numpy.ndarray
+        Current image sample.
+    u2 : numpy.ndarray
+        Augmentation variable.
+    rho2 : float
+        Splitting parameter.
+    gamma2 : float
+        P-MYULA parameter.
+    lambda2 : float
+        P-MYULA parameter.
+    beta_ : float
+        Current value of the regularization parameter.
+    rng : np.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    numpy.ndarray
+        New state for ``z2``.
+    """
+    g = gamma2 / lambda2
+    gradH2 = (z2 - u2 - x) / rho2
+    # ! Condat with 1e4 iterations and Chambolle with 10 lead roughly to the
+    # ! same order of magnitude for the reg. parameter later on
+    # proxH2 = prox_tv_primal_dual(
+    #     z2, 0.01, lam=beta_ * lambda2, tol=1e-5, max_iter=100, verbose=False, rho=1.99
+    # )[0]  # ! change prox operator?
+    proxH2 = prox_tv_chambolle(z2, lam=beta_ * lambda2, tol=1e-5, max_iter=10)[0]
+    z = (
+        (1 - g) * z2
+        - gamma2 * gradH2
+        + g * proxH2
+        + np.sqrt(2 * gamma2) * rng.standard_normal(size=z2.shape)
+    )
+    return z
+
+
+def sample_z3(
+    z3, x, u3, rho3: float, gamma3: float, lambda3: float, rng: np.random.Generator
+):
+    r"""Sample third splitting variable ``z3``.
+
+    Parameters
+    ----------
+    z3 : numpy.ndarray
+        Splitting variable
+    x : numpy.ndarray
+        Current image sample.
+    u3 : numpy.ndarray
+        Augmentation variable.
+    rho3 : float
+        Splitting parameter.
+    gamma3 : float
+        P-MYULA parameter.
+    lambda3 : float
+        P-MYULA parameter.
+    rng : np.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    numpy.ndarray
+        New state for ``z3``.
+    """
+    g = gamma3 / lambda3
+    gradH3 = (z3 - x - u3) / rho3
+    proxH3 = z3.copy()
+    prox_nonegativity(proxH3)
+    z = (
+        (1 - g) * z3
+        - gamma3 * gradH3
+        + g * proxH3
+        + np.sqrt(2 * gamma3) * rng.standard_normal(size=u3.shape)
+    )
+    return z
+
+
+def sample_u(z, Hx, rho: float, alph: float, rng: np.random.Generator):
+    r"""Sample any augmentation variable :math:`(u_i)_{(1 \leq i \leq 3)}`.
+
+    Parameters
+    ----------
+    z : numpy.ndarray
+        Splitting variable
+    Hx : numpy.ndarray
+        Result of a splitting linear operator applied to the current image
+        ``x``.
+    rho : float
+        Splitting parameter.
+    alph : float
+        Augmentation parameter.
+    rng : np.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    numpy.ndarray
+        New state for the augmentation :math:`u_i` considered.
+    """
+    normalization_factor = np.sqrt(rho + alph)
+    mu = alph * (z - Hx) / normalization_factor**2
+    return mu + rng.standard_normal(size=mu.shape) * (
+        np.sqrt(rho * alph) / normalization_factor
+    )
+
+
+def sample_beta(a: float, b: float, N, tv_z2: float, rng: np.random.Generator) -> float:
+    r"""Sample the regularization parameter :math:`\beta`.
+
+    Parameters
+    ----------
+    a : float
+        Hyperparameter.
+    b : float
+        Hyperparameter.
+    N : numpy.ndarray of int
+        Image size
+    tv_z2 : float
+        Value of :math:`\text{TV}(z_2)`.
+    rng : np.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    float
+        New state for the regularization parameter.
+    """
+    return rng.gamma(a + np.prod(N), 1 / (b + tv_z2))
+
+
+# TODO: check if this needs to be adapted to be consistent with Matakos2013
+@jit(nopython=True, cache=True)
+def score_function(
+    y,
+    Hx,
+    x,
+    z1,
+    u1,
+    z2,
+    u2,
+    z3,
+    u3,
+    rho1: float,
+    rho2: float,
+    rho3: float,
+    alpha1: float,
+    alpha2: float,
+    alpha3: float,
+    beta: float,
+):
+    """Compute the current value of the potential (minus log-likelihood).
+
+    Parameters
+    ----------
+    y : numpy.ndarray
+        Input observations.
+    Hx : numpy.ndarray
+        Result of the convolution operator applied on ``x``.
+    x : numpy.ndarray
+        Current value of the image ``x``.
+    z1 : numpy.ndarray
+        Splitting variable 1 (convolution).
+    u1 : numpy.ndarray
+        Augmentation variable 1.
+    z2 : numpy.ndarray
+        Splitting variable 2 (TV).
+    u2 : numpy.ndarray
+        Augmentation variable 2.
+    z3 : numpy.ndarray
+        Splitting variable 3 (non-negativity).
+    u3 : numpy.ndarray
+        Augmentation variable 3.
+    rho1 : float
+        Splitting parameter.
+    rho2 : float
+        Splitting parameter.
+    rho3 : float
+        Splitting parameter.
+    alpha1 : float
+        Augmentation parameter.
+    alpha2 : float
+        Augmentation parameter.
+    alpha3 : float
+        Augmentation parameter.
+    beta : float
+        Current regularization parameter.
+
+    Returns
+    -------
+    float
+        Current value of the potential.
+    """
+    score = (
+        kullback_leibler(z1, y)
+        + np.sum(np.abs(Hx - z1 + u1) ** 2) / (2 * rho1)
+        + np.sum((x - z2 + u2) ** 2) / (2 * rho2)
+        + np.sum((x - z3 + u3) ** 2) / (2 * rho3)
+        + np.sum(np.abs(u1) ** 2) / (2 * alpha1)
+        + np.sum(u2**2) / (2 * alpha2)
+        + np.sum(u3**2) / (2 * alpha3)
+        + beta * tv(z2)
+    )
+
+    return score
+
+
+def spa_pmyula(
+    y,
+    checkpointname: str,
+    checkpoint_frequency: int,
+    warmstart_iter: int,
+    monitor_frequency: int,
+    checkpointflag: bool,
+    cconv_model: SerialConvModel,
+    checkpointer: SerialCheckpoint,
+    rho1: float,
+    rho2: float,
+    rho3: float,
+    alpha1: float,
+    alpha2: float,
+    alpha3: float,
+    beta: float,
+    a: float,
+    b: float,
+    Nmc: int,
+    M,
+    seed: int,
+    logger: Logger,
+):
+    """SPA sampler based on P-MYULA to address a supervised 2D Poisson
+    deconvolution problem relying on a TV prior.
+
+    Parameters
+    ----------
+    y : numpy.ndarray
+        Input noisy obervations.
+    checkpointname : str
+        Root string to build the name of the checkpoint files.
+    checkpoint_frequency : int
+        Number of iterations after which a checkpoint file is written to disk.
+    warmstart_iter : int
+        Iteration identifier of the warm-start checkpoint file.
+    monitor_frequency : int
+        Number of iterations after which the potential is computed is written
+        to log file.
+    checkpointflag : bool
+        Bool to activate warmstart.
+    cconv_model : SerialConvModel
+        Serial circular convolution model object.
+    checkpointer : SerialCheckpoint
+        Checkpoint object.
+    rho1 : float
+        Splitting parameter.
+    rho2 : float
+        Splitting parameter.
+    rho3 : float
+        Splitting parameter.
+    alpha1 : float
+        Augmentation parameter.
+    alpha2 : float
+        Augmentation parameter.
+    alpha3 : float
+        Augmentation parameter.
+    beta : float
+        Initial value of the regularization parameter.
+    a : float
+        Hyperparameters for the regularization parameter.
+    b : float
+        Hyperparameters for the regularization parameter.
+    Nmc : int
+        Total number of Monte-Carlo samples to be generated.
+    M : float
+        Maximum intensity of the image to be recovered.
+    seed : int
+        Integer seed to instantiate the random number generator.
+    logger : logging.Logger
+        Logger object.
+    """
+    # ! https://stackoverflow.com/questions/38543506/change-logging-print-function-to-tqdm-write-so-logging-doesnt-interfere-wit
+    # log = logging.getLogger(__name__)
+    # log.setLevel(logging.INFO)
+    # log.addHandler(TqdmLoggingHandler())
+
+    # * P-MYULA parameters
+    # ! see if this is the best way to choose these parameters
+    lambda1 = rho1
+    lambda2 = rho2
+    lambda3 = rho3
+    gamma1 = rho1 / 4
+    gamma2 = rho2 / 4
+    gamma3 = rho3 / 4
+    data_size = cconv_model.data_size
+    N = cconv_model.data_size
+
+    # * initialization
+    x_mc = np.empty((checkpoint_frequency, *N), dtype="d")
+    z1_mc = np.empty((checkpoint_frequency, *data_size), dtype="d")
+    u1_mc = np.empty((checkpoint_frequency, *data_size), dtype="d")
+    z2_mc = np.empty((checkpoint_frequency, *N), dtype="d")
+    u2_mc = np.empty((checkpoint_frequency, *N), dtype="d")
+    z3_mc = np.empty((checkpoint_frequency, *N), dtype="d")
+    u3_mc = np.empty((checkpoint_frequency, *N), dtype="d")
+
+    # save score array along iterations
+    score_array = np.zeros((checkpoint_frequency,), dtype="d")
+    beta_mc = np.empty((checkpoint_frequency,), dtype="d")
+
+    # * rng
+    rng = default_rng(seed)
+
+    if checkpointflag:
+        # ! insert last element obtained at the end of the buffer
+        (
+            x_mc[-1],
+            u1_mc[-1],
+            z1_mc[-1],
+            u2_mc[-1],
+            z2_mc[-1],
+            u3_mc[-1],
+            z3_mc[-1],
+            beta_mc[-1],
+            score_array[-1],
+            start_iter,
+        ) = loading(checkpointer, warmstart_iter, rng)
+        # ! should not be required
+        x_mc[0] = x_mc[-1]
+    else:
+        # * initialization
+        start_iter = 1
+        x_mc[0] = rng.integers(0, high=M, size=N, endpoint=True).astype(float)
+        u1_mc[0] = rng.integers(0, high=M, size=data_size, endpoint=True).astype(float)
+        z1_mc[0] = rng.integers(0, high=M, size=data_size, endpoint=True).astype(float)
+        u2_mc[0] = rng.integers(0, high=M, size=N, endpoint=True).astype(float)
+        z2_mc[0] = rng.integers(0, high=M, size=N, endpoint=True).astype(float)
+        u3_mc[0] = rng.integers(0, high=M, size=N, endpoint=True).astype(float)
+        z3_mc[0] = rng.integers(0, high=M, size=N, endpoint=True).astype(float)
+
+        # ! should not be required
+        beta_mc[0] = beta
+
+    # ! Hx, Gx updated whenever Hx and Gx are
+    Hx = cconv_model.apply_direct_operator(x_mc[0])
+
+    # * initialize score (checking consistency after warmstart)
+    if not checkpointflag:
+        score_array[0] = score_function(
+            y,
+            Hx,
+            x_mc[0],
+            z1_mc[0],
+            u1_mc[0],
+            z2_mc[0],
+            u2_mc[0],
+            z3_mc[0],
+            u3_mc[0],
+            rho1,
+            rho2,
+            rho3,
+            alpha1,
+            alpha2,
+            alpha3,
+            beta_mc[0],
+        )
+        score = score_array[0]
+    else:
+        score_array[-1] = score_function(
+            y,
+            Hx,
+            x_mc[-1],
+            z1_mc[-1],
+            u1_mc[-1],
+            z2_mc[-1],
+            u2_mc[-1],
+            z3_mc[-1],
+            u3_mc[-1],
+            rho1,
+            rho2,
+            rho3,
+            alpha1,
+            alpha2,
+            alpha3,
+            beta_mc[-1],
+        )
+        score = score_array[-1]
+
+    logger.info(
+        r"t: {0:1.3e} | obj[t]: {1:1.3e}".format(
+            start_iter - 1,
+            score,
+        )
+    )
+
+    pbar = tqdm(total=Nmc - 1, desc="Sampling", unit="it")
+    pbar.update(start_iter)
+    logger.info("Start SPA sampler (pmyula) from t={}".format(start_iter))
+
+    # Auxiliary variables to measure timing (average + std) per iteration
+    counter = 0
+    atime = np.zeros((1,), dtype="d")
+    asqtime = np.zeros((1,), dtype="d")
+    time_ = 0.0
+    sqtime_ = 0.0
+
+    # timing for update steps related to splitting 1
+    atime1 = np.zeros((1,), dtype="d")
+    asqtime1 = np.zeros((1,), dtype="d")
+    time1_ = 0.0
+    sqtime1_ = 0.0
+
+    # timing for update steps related to splitting 2
+    atime2 = np.zeros((1,), dtype="d")
+    asqtime2 = np.zeros((1,), dtype="d")
+    time2_ = 0.0
+    sqtime2_ = 0.0
+
+    # timing for update steps related to splitting 3
+    atime3 = np.zeros((1,), dtype="d")
+    asqtime3 = np.zeros((1,), dtype="d")
+    time3_ = 0.0
+    sqtime3_ = 0.0
+
+    for iter_mc in range(start_iter, Nmc):
+
+        counter += 1
+        t_start = perf_counter()
+
+        # notational shortcuts (for in-place assignments)
+        past_iter = (iter_mc - 1) % checkpoint_frequency
+        current_iter = iter_mc % checkpoint_frequency
+        # next_iter = (iter_mc + 1) % checkpoint_frequency
+
+        # sample image x
+        x_mc[current_iter], Hx = sample_x(
+            cconv_model,
+            u1_mc[past_iter],
+            z1_mc[past_iter],
+            u2_mc[past_iter],
+            z2_mc[past_iter],
+            u3_mc[past_iter],
+            z3_mc[past_iter],
+            rho1,
+            rho2,
+            rho3,
+            rng,
+        )
+        t_stop0 = perf_counter()
+
+        # * sample auxiliary variables (z1, u1)
+        z1_mc[current_iter] = sample_z1(
+            z1_mc[past_iter],
+            y,
+            Hx,
+            u1_mc[past_iter],
+            rho1,
+            gamma1,
+            lambda1,
+            rng,
+        )
+        u1_mc[current_iter] = sample_u(
+            z1_mc[current_iter],
+            Hx,
+            rho1,
+            alpha1,
+            rng,
+        )
+
+        # update timing 1
+        t_stop1 = perf_counter()
+        elapsed_time = t_stop1 - t_stop0
+        time1_ += elapsed_time
+        sqtime1_ += elapsed_time**2
+
+        # * sample auxiliary variables (z2, u2)
+        z2_mc[current_iter] = sample_z2(
+            z2_mc[past_iter],
+            x_mc[current_iter],
+            u2_mc[past_iter],
+            rho2,
+            gamma2,
+            lambda2,
+            beta_mc[past_iter],
+            rng,
+        )
+
+        u2_mc[current_iter] = sample_u(
+            z2_mc[current_iter],
+            x_mc[current_iter],
+            rho2,
+            alpha2,
+            rng,
+        )
+
+        # update timing 2
+        t_stop2 = perf_counter()
+        elapsed_time = t_stop2 - t_stop1
+        time2_ += elapsed_time
+        sqtime2_ += elapsed_time**2
+
+        # * sample auxiliary variables (z3, u3)
+        z3_mc[current_iter] = sample_z3(
+            z3_mc[past_iter],
+            x_mc[current_iter],
+            u3_mc[past_iter],
+            rho3,
+            gamma3,
+            lambda3,
+            rng,
+        )
+
+        u3_mc[current_iter] = sample_u(
+            z3_mc[current_iter],
+            x_mc[current_iter],
+            rho3,
+            alpha3,
+            rng,
+        )
+
+        # update timing 3
+        t_stop3 = perf_counter()
+        elapsed_time = t_stop3 - t_stop2
+        time3_ += elapsed_time
+        sqtime3_ += elapsed_time**2
+
+        # sample TV regularization parameter beta (to be debugged)
+        # tv_z2 = tv(z2_mc[current_iter])
+
+        # beta_mc[current_iter] = sample_beta(a, b, N, tv_z2, rng)
+        beta_mc[current_iter] = beta
+
+        # update timing
+        t_stop = perf_counter()
+        elapsed_time = t_stop - t_start
+        time_ += elapsed_time
+        sqtime_ += elapsed_time**2
+
+        # evolution of the score, :math:`-\log p(x | y)`
+        if np.mod(iter_mc, monitor_frequency) == 0:
+            score = score_function(
+                y,
+                Hx,
+                x_mc[current_iter],
+                z1_mc[current_iter],
+                u1_mc[current_iter],
+                z2_mc[current_iter],
+                u2_mc[current_iter],
+                z3_mc[current_iter],
+                u3_mc[current_iter],
+                rho1,
+                rho2,
+                rho3,
+                alpha1,
+                alpha2,
+                alpha3,
+                beta_mc[current_iter],
+            )
+            logger.info(r"t: {0:1.3e} | obj[t]: {1:1.3e}".format(iter_mc, score))
+            score_array[current_iter] = score
+
+        # * checkpoint
+        if np.mod(iter_mc + 1, checkpoint_frequency) == 0:
+            logger.info("Writing checkpoint")
+            atime[0] = time_ / counter
+            asqtime[0] = sqtime_ / counter
+            atime1[0] = time1_ / counter
+            asqtime1[0] = sqtime1_ / counter
+            atime2[0] = time2_ / counter
+            asqtime2[0] = sqtime2_ / counter
+            atime3[0] = time3_ / counter
+            asqtime3[0] = sqtime3_ / counter
+            # backupname = "{}{}.{}".format(checkpointname, iter_mc + 1, "h5")
+            # checkpoint.save_mean_to_h5(
+            #     backupname,
+            #     rng,
+            #     iter_mc,
+            #     x_mc,
+            #     u1_mc,
+            #     z1_mc,
+            #     u2_mc,
+            #     z2_mc,
+            #     u3_mc,
+            #     z3_mc,
+            #     beta_mc,
+            #     score_array,
+            #     data_size,
+            #     N,
+            #     checkpoint_frequency,
+            # )
+            # checkpoint.save_to_h5(
+            #     backupname,
+            #     rng,
+            #     iter_mc,
+            #     x_mc,
+            #     u1_mc,
+            #     z1_mc,
+            #     u2_mc,
+            #     z2_mc,
+            #     u3_mc,
+            #     z3_mc,
+            #     beta_mc,
+            #     score_array,
+            #     data_size,
+            #     N,
+            #     checkpoint_frequency,
+            #     atime,
+            #     asqtime,
+            #     counter,
+            # )
+            saving(
+                checkpointer,
+                iter_mc,
+                rng,
+                x_mc,
+                u1_mc,
+                u2_mc,
+                u3_mc,
+                z1_mc,
+                z2_mc,
+                z3_mc,
+                beta_mc,
+                score_array,
+                iter_mc,
+                counter,
+                atime,
+                asqtime,
+                atime1,
+                asqtime1,
+                atime2,
+                asqtime2,
+                atime3,
+                asqtime3,
+                checkpoint_frequency,
+            )
+            time_ = 0.0
+            sqtime_ = 0.0
+            time1_ = 0.0
+            sqtime1_ = 0.0
+            time2_ = 0.0
+            sqtime2_ = 0.0
+            time3_ = 0.0
+            sqtime3_ = 0.0
+            counter = 0
+        pbar.update()
+
+    # * finalize sampler
+    pbar.close()
+    logger.info("Writing checkpoint")
+
+    # ! if Nmc is a multiple of checkpoint_frequency, do not trigger final
+    # backup, as it will be redundant with the previous one
+    if np.mod(Nmc, checkpoint_frequency) > 0:
+        atime[0] = time_ / counter
+        asqtime[0] = sqtime_ / counter
+        atime1[0] = time1_ / counter
+        asqtime1[0] = sqtime1_ / counter
+        atime2[0] = time2_ / counter
+        asqtime2[0] = sqtime2_ / counter
+        atime3[0] = time3_ / counter
+        asqtime3[0] = sqtime3_ / counter
+        # backupname = "{}{}.{}".format(checkpointname, Nmc, "h5")
+        # checkpoint.save_mean_to_h5(
+        #     backupname,
+        #     rng,
+        #     iter_mc,
+        #     x_mc,
+        #     u1_mc,
+        #     z1_mc,
+        #     u2_mc,
+        #     z2_mc,
+        #     u3_mc,
+        #     z3_mc,
+        #     beta_mc,
+        #     score_array,
+        #     data_size,
+        #     N,
+        #     Nmc % checkpoint_frequency,
+        # )
+        # checkpoint.save_to_h5(
+        #     backupname,
+        #     rng,
+        #     iter_mc,
+        #     x_mc,
+        #     u1_mc,
+        #     z1_mc,
+        #     u2_mc,
+        #     z2_mc,
+        #     u3_mc,
+        #     z3_mc,
+        #     beta_mc,
+        #     score_array,
+        #     data_size,
+        #     N,
+        #     Nmc % checkpoint_frequency,
+        #     atime,
+        #     asqtime,
+        #     counter,
+        # )
+        saving(
+            checkpointer,
+            iter_mc,
+            rng,
+            x_mc,
+            u1_mc,
+            u2_mc,
+            u3_mc,
+            z1_mc,
+            z2_mc,
+            z3_mc,
+            beta_mc,
+            score_array,
+            iter_mc,
+            counter,
+            atime,
+            asqtime,
+            atime1,
+            asqtime1,
+            atime2,
+            asqtime2,
+            atime3,
+            asqtime3,
+            Nmc % checkpoint_frequency,
+        )
+        time_ = 0.0
+        sqtime_ = 0.0
+        time1_ = 0.0
+        sqtime1_ = 0.0
+        time2_ = 0.0
+        sqtime2_ = 0.0
+        time3_ = 0.0
+        sqtime3_ = 0.0
+        counter = 0
+
+    pass
+
+
+if __name__ == "__main__":
+
+    # pr = cProfile.Profile()
+    # pr.enable()
+    # YOUR MAIN FUNCTION
+    # pr.disable()
+
+    # # Dump results:
+    # # - for binary dump
+    # pr.dump_stats('cpu_%d.prof' %comm.rank)
+    # # - for text dump
+    # with open( 'cpu_%d.txt' %comm.rank, 'w') as output_file:
+    #     sys.stdout = output_file
+    #     pr.print_stats( sort='time' )
+    #     sys.stdout = sys.__stdout__
+
+    # snakeviz output.prof # to visualize the results
+
+    pass
diff --git a/src/aaxda/samplers/serial/spa_psgla.py b/src/aaxda/samplers/serial/spa_psgla.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b6776000709bd499a6282bcfad968a7cf142e75
--- /dev/null
+++ b/src/aaxda/samplers/serial/spa_psgla.py
@@ -0,0 +1,833 @@
+"""Serial Python implementation of the proposed PSGLA within Gibbs sampler for
+supervised Poisson deconvolution with under a TV prior.
+"""
+from logging import Logger
+from time import perf_counter
+
+import numpy as np
+from numba import jit
+from numpy.random import default_rng
+from tqdm import tqdm
+
+# import aaxda.utils.checkpoint_serial as checkpoint
+from aaxda.models.jtv import gradient_2d, gradient_2d_adjoint
+from aaxda.models.models import SerialConvModel
+from aaxda.models.prox import (
+    kullback_leibler,
+    l21_norm,
+    prox_kullback_leibler,
+    prox_l21norm,
+    prox_nonegativity,
+)
+from aaxda.utils.checkpoint import SerialCheckpoint
+
+
+def loading(
+    checkpointer: SerialCheckpoint, warmstart_iter: int, rng: np.random.Generator
+):
+    r"""Function interface around the :meth:`SerialCheckpoint.load` method.
+
+    Parameters
+    ----------
+    checkpointer :
+        Serial checkpoint object.
+    warmstart_iter :
+        Warm-start iteration (indicating the name of the file to be loaded).
+    rng :
+        Random number generator.
+
+    Returns
+    -------
+    numpy.ndarray, int, float
+        Variables required to restart the sampler.
+    """
+    # select = 5 * [np.s_[:]] + 3 * [np.s_[-1]]
+    # ! if full history is saved, load only the last element from the checkpoint
+    # select = 8 * [np.s_[-1]]
+    select = [np.s_[-1]] + 4 * [np.s_[:]] + 3 * [np.s_[-1]]
+    dic_var = checkpointer.load(
+        warmstart_iter,
+        select,
+        rng,
+        "x",
+        "u1",
+        "z1",
+        "u2",
+        "z2",
+        "beta",
+        "score",
+        "iter",
+    )
+    return (
+        dic_var["x"],
+        dic_var["u1"],
+        dic_var["z1"],
+        dic_var["u2"],
+        dic_var["z2"],
+        dic_var["beta"],
+        dic_var["score"],
+        dic_var["iter"],
+    )
+
+
+def saving(
+    checkpointer: SerialCheckpoint,
+    warmstart_iter: int,
+    rng: np.random.Generator,
+    x,
+    u1,
+    u2,
+    z1,
+    z2,
+    beta,
+    score,
+    iter_mc,
+    counter,
+    atime,
+    asqtime,
+    nsamples,
+):
+    """Function interface around the :meth:`SerialCheckpoint.save` method.
+
+    Parameters
+    ----------
+    checkpointer : SerialCheckpoint
+        Serial checkpoint object.
+    warmstart_iter :
+        Warm-start iteration (indicating the name of the file to be loaded).
+    rng :
+        Random number generator.
+    x : numpy.ndarray
+        Image to be inferred.
+    u1 : numpy.ndarray
+        Augmentation variable.
+    u2 : numpy.ndarray
+        Augmentation variable.
+    z1 : numpy.ndarray
+        Splitting variable (convolution).
+    z2 : numpy.ndarray
+        Splitting variable (discrete gradient).
+    beta : numpy.ndarray
+        Regularization parameter.
+    score : numpy.ndarray
+        Value of the potential (minus log likelihood) across the iterations.
+    iter_mc : int
+        Index of current iteration.
+    counter : int
+        Number of samples saved to disk.
+    atime : numpy.ndarray of float, (1,)
+        Average time per iteration.
+    asqtime : numpy.ndarray of float, (1,)
+        Average of squared time per iteration (to compute the variance).
+    nsamples : int
+        Number of samples saved to disk.
+    """
+    # order: x, z1, u1, z2, u2, z3, u3, ..., atime, asqtime, counter, beta, score, iter
+    # ! need to specify compression opt from the start
+    chunk_sizes = [(1, *x.shape[1:])] + 31 * [None]
+
+    # mean (for MMSE)
+    x_m = np.mean(x[:nsamples, ...], axis=0)
+    z1_m = np.mean(z1[:nsamples, ...], axis=0)
+    u1_m = np.mean(u1[:nsamples, ...], axis=0)
+    z2_m = np.mean(z2[:nsamples, ...], axis=0)
+    u2_m = np.mean(u2[:nsamples, ...], axis=0)
+
+    # mean of squares (for variance acorss iterations)
+    x_m_sq = np.mean(x[:nsamples, ...] ** 2, axis=0)
+    z1_m_sq = np.mean(z1[:nsamples, ...] ** 2, axis=0)
+    u1_m_sq = np.mean(u1[:nsamples, ...] ** 2, axis=0)
+    z2_m_sq = np.mean(z2[:nsamples, ...] ** 2, axis=0)
+    u2_m_sq = np.mean(u2[:nsamples, ...] ** 2, axis=0)
+
+    # MAP estimator (based on local batch)
+    id_map = np.argmin(score[:nsamples])
+
+    # save useful quantities to disk
+    checkpointer.save(
+        iter_mc + 1,
+        chunk_sizes,
+        rng=rng,
+        x=x,
+        z1=z1[nsamples - 1],
+        u1=u1[nsamples - 1],
+        z2=z2[nsamples - 1],
+        u2=u2[nsamples - 1],
+        x_m=x_m,
+        z1_m=z1_m,
+        u1_m=u1_m,
+        z2_m=z2_m,
+        u2_m=u2_m,
+        x_m_sq=x_m_sq,
+        z1_m_sq=z1_m_sq,
+        u1_m_sq=u1_m_sq,
+        z2_m_sq=z2_m_sq,
+        u2_m_sq=u2_m_sq,
+        x_map=x[id_map],
+        z1_map=z1[id_map, ...],
+        u1_map=u1[id_map, ...],
+        z2_map=z2[id_map, ...],
+        u2_map=u2[id_map, ...],
+        atime=atime,
+        asqtime=asqtime,
+        beta=beta,
+        score=score,
+        iter=iter_mc + 1,
+        counter=counter,
+    )
+
+    pass
+
+
+def gradient_x(
+    Hx,
+    Gx,
+    serial_conv_model: SerialConvModel,
+    z1,
+    u1,
+    z2,
+    u2,
+    rho1: float,
+    rho2: float,
+):
+    # ! in-place update for Hx and Gx
+    # Hx and Gx already computed during the previous iteration: Hx and
+    # Gx contain the right info in terms of those terms
+    # local direct operator applied out of this function
+
+    # local update step
+    Hx1 = Hx + u1 - z1
+    Hx1 /= rho1
+    Gx1 = Gx + u2 - z2
+    Gx1 /= rho2
+
+    # local adjoint operator
+    grad_x = serial_conv_model.apply_adjoint_operator(Hx1)
+    grad_x += gradient_2d_adjoint(Gx1[0], Gx1[1])
+
+    return grad_x
+
+
+def sample_x(x, gamma_x: float, grad_x, rng: np.random.Generator):
+    r"""PSGLA update step for the image :math:`x`.
+
+    Parameters
+    ----------
+    x : numpy.ndarray
+        Input image.
+    gamma_x : float
+        PSGLA step size (gradient and stochastic perturbation).
+    grad_x : numpy.ndarray
+        Gradient of the smooth part of the potential (minus log-likelihood).
+    rng : numpy.random.Generator
+        Random number generator.
+    """
+    x1 = x + np.sqrt(2 * gamma_x) * rng.standard_normal(size=x.shape) - gamma_x * grad_x
+    prox_nonegativity(x1)
+    return x1
+
+
+def sample_z1(z1, y, Hx, u1, rho1: float, gamma1: float, rng: np.random.Generator):
+    r"""PSGLA update step for the auxiliary variable :math:`z_1`.
+
+    Parameters
+    ----------
+    z1 : numpy.ndarray
+        Current state of the splitting variable :math:`z_1`.
+    y : numpy.ndarray
+        Observed data.
+    Hx : numpy.ndarray
+        Pre-computed result of the convolution operator applied to the current
+        image :math:`x`.
+    u1 : numpy.ndarray
+        Current state of the augmentation variable :math:`u_1`.
+    rho1 : float
+        AXDA parameter.
+    gamma1 : float
+        PSGLA step size (gradient and stochastic perturbation).
+    rng : numpy.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    numpy.ndarray
+        New state for the splitting variable :math:`z_1`.
+    """
+    grad_z1 = (z1 - u1 - Hx) / rho1
+    z = z1 + np.sqrt(2 * gamma1) * rng.standard_normal(size=z1.shape) - gamma1 * grad_z1
+    return prox_kullback_leibler(z, y, lam=gamma1)
+
+
+def sample_z2(
+    z2, Gx, u2, rho2: float, gamma2: float, lambda_: float, rng: np.random.Generator
+):
+    r"""PSGLA update step for the splitting variable :math:`z_2`.
+
+    Parameters
+    ----------
+    z2 : numpy.ndarray
+        Current state of the splitting variable :math:`z_2`.
+    Gx : numpy.ndarray
+        Pre-computed result of the discrete gradient operator applied to the
+        current image :math:`x`.
+    u2 : numpy.ndarray
+        Current state of the augmentation variable :math:`u_2`.
+    rho2 : float
+        AXDA splitting parameter.
+    gamma2 : float
+        PSGLA step size (gradient and stochastic perturbation).
+    lambda_ : float
+        Current value of the regularization parameter :math:`\lambda`.
+    rng : numpy.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    numpy.ndarray
+        New state for the splitting variable :math:`z_2`.
+    """
+    grad_z2 = (z2 - u2 - Gx) / rho2
+    z = z2 + np.sqrt(2 * gamma2) * rng.standard_normal(size=z2.shape) - gamma2 * grad_z2
+    return prox_l21norm(z, lam=lambda_ * gamma2, axis=0)
+
+
+def sample_u(z, Hx, rho: float, alph: float, rng: np.random.Generator):
+    r"""PSGLA update step for any auxiliary variable
+    :math:`(u_i)_{1 \leq i \leq 2}`.
+
+    Parameters
+    ----------
+    z : numpy.ndarray
+        Auxiliary augmentation variable.
+    Hx : numpy.ndarray
+        Pre-computed result of the convolution operator applied to the current
+        image :math:`x`.
+    rho : float
+        AXDA splitting parameter.
+    alph : float
+        AXDA augmentation parameter.
+    rng : numpy.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    numpy.ndarray
+        New state for the auxiliary variable :math:`u_i` considered.
+    """
+    normalization_factor = np.sqrt(rho + alph)
+    mu = alph * (z - Hx) / normalization_factor**2
+    return mu + rng.standard_normal(size=mu.shape) * (
+        np.sqrt(rho * alph) / normalization_factor
+    )
+
+
+def sample_beta(
+    a: float, b: float, N, l21_z2: float, rng: np.random.Generator
+) -> float:
+    r"""Sample the regularization parameter :math:`\lambda`.
+
+    Parameters
+    ----------
+    a : float
+        Hyper-parameter of the Gamma prior for the regularization parameter
+        :math:`\lambda`.
+    b : float
+        Hyper-parameter of the Gamma prior for the regularization parameter
+        :math:`\lambda`.
+    N : numpy.ndarray(dtype=int)
+        Size of the image :math:`x`.
+    l21_z2 : float
+        Value of :math:`\|z_2\|_{2, 1}`.
+    rng : numpy.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    float
+        New state for the regularization parameter.
+    """
+    return rng.gamma(a + 2 * np.prod(N), 1 / (b + l21_z2))
+
+
+@jit(nopython=True, cache=True)
+def score_function(
+    y,
+    Hx,
+    Gx,
+    z1,
+    u1,
+    z2,
+    u2,
+    rho1: float,
+    rho2: float,
+    alpha1: float,
+    alpha2: float,
+    beta: float,
+):
+    """Compute the current value of the potential (minus log-likelihood) for
+    the AXDA model.
+
+    Parameters
+    ----------
+    y : numpy.ndarray
+        Observed data.
+    Hx : numpy.ndarray
+        Pre-computed result of the convolution operator applied to the current
+        image :math:`x`.
+    Gx : numpy.ndarray
+        Pre-computed result of the discrete gradient operator applied to the
+        current image :math:`x`.
+    z1 : numpy.ndarray
+        splitting variable :math:`z_1`.
+    u1 : numpy.ndarray
+        Auxiliary augmentation variable :math:`u_1`.
+    z2 : numpy.ndarray
+        splitting variable :math:`z_2`.
+    u2 : numpy.ndarray
+        Auxiliary augmentation variable :math:`u_2`.
+    rho1 : float
+        AXDA splitting parameter.
+    rho2 : float
+        AXDA splitting parameter.
+    alpha1 : float
+        AXDA augmentation parameter.
+    alpha2 : float
+        AXDA augmentation parameter.
+    beta : float
+        Value of the regularization parameter.
+
+    Returns
+    -------
+    float
+        Current potential value for the AXDA model.
+    """
+    score = (
+        kullback_leibler(z1, y)
+        + np.sum(np.abs(Hx - z1 + u1) ** 2) / (2 * rho1)
+        + np.sum((Gx - z2 + u2) ** 2) / (2 * rho2)
+        + np.sum(np.abs(u1) ** 2) / (2 * alpha1)
+        + np.sum(u2**2) / (2 * alpha2)
+        + beta * l21_norm(z2)
+    )
+
+    return score
+
+
+def spa_psgla(
+    y,
+    checkpointname: str,
+    checkpoint_frequency: int,
+    warmstart_iter: int,
+    monitor_frequency: int,
+    checkpointflag: bool,
+    conv_model: SerialConvModel,
+    checkpointer: SerialCheckpoint,
+    rho1: float,
+    rho2: float,
+    alpha1: float,
+    alpha2: float,
+    beta: float,
+    a: float,
+    b: float,
+    Nmc: int,
+    M,
+    seed: int,
+    logger: Logger,
+):
+    r"""SPA sampler based on PSGLA to address a supervised 2D Poisson
+    deconvolution problem relying on a TV prior.
+
+    Parameters
+    ----------
+    y : numpy.ndarray
+        Input noisy obervations.
+    checkpointname : str
+        Root string to build the name of the checkpoint files.
+    checkpoint_frequency : int
+        Number of iterations after which a checkpoint file is written to disk.
+    warmstart_iter : int
+        Iteration identifier of the warm-start checkpoint file.
+    monitor_frequency : int
+        Number of iterations after which the potential is computed is written
+        to log file.
+    checkpointflag : bool
+        Flag to activate warmstart.
+    conv_model : SerialConvModel
+        Serial linear convolution model object.
+    checkpointer : SerialCheckpoint
+        Checkpoint object.
+    rho1 : float
+        Splitting parameter.
+    rho2 : float
+        Splitting parameter.
+    beta : float
+        Initial value of the regularization parameter.
+    a : float
+        Hyperparameters for the regularization parameter.
+    b : float
+        Hyperparameters for the regularization parameter.
+    Nmc : int
+        Total number of Monte-Carlo samples to be generated.
+    M : float
+        Maximum intensity of the image to be recovered.
+    seed : int
+        Integer seed to instantiate the random number generator.
+    logger : logging.Logger
+        Logger object.
+    """
+    # ! https://stackoverflow.com/questions/38543506/change-logging-print-function-to-tqdm-write-so-logging-doesnt-interfere-wit
+    # log = logging.getLogger(__name__)
+    # log.setLevel(logging.INFO)
+    # log.addHandler(TqdmLoggingHandler())
+
+    # * AXDA parameters
+    data_size = conv_model.data_size
+    N = conv_model.image_size
+
+    # * PSGLA step-sizes
+    gamma_x = 0.99 / (np.max(np.abs(conv_model.fft_kernel)) ** 2 / rho1 + 8 / rho2)
+    gamma1 = 0.99 * rho1
+    gamma2 = 0.99 * rho2
+
+    x_mc = np.empty((checkpoint_frequency, *N), dtype="d")
+    z1_mc = np.empty((checkpoint_frequency, *data_size), dtype="d")
+    u1_mc = np.empty((checkpoint_frequency, *data_size), dtype="d")
+    z2_mc = np.empty((checkpoint_frequency, 2, *N), dtype="d")
+    u2_mc = np.empty((checkpoint_frequency, 2, *N), dtype="d")
+
+    # save score array along iterations
+    score_array = np.zeros((checkpoint_frequency,), dtype="d")
+    # l21_z2 = 0.0
+    beta_mc = np.empty((checkpoint_frequency,), dtype="d")
+
+    # * rng
+    rng = default_rng(seed)
+
+    if checkpointflag:
+        (
+            x_mc[-1],
+            u1_mc[-1],
+            z1_mc[-1],
+            u2_mc[-1],
+            z2_mc[-1],
+            beta_mc[-1],
+            score_array[-1],
+            start_iter,
+        ) = loading(checkpointer, warmstart_iter, rng)
+        # ! should not be required
+        x_mc[0] = x_mc[-1]
+    else:
+        # * initialization
+        start_iter = 1
+        x_mc[0] = rng.integers(0, high=M, size=N, endpoint=True).astype(float)
+        u1_mc[0] = rng.integers(0, high=M, size=data_size, endpoint=True).astype(float)
+        z1_mc[0] = rng.integers(0, high=M, size=data_size, endpoint=True).astype(float)
+        u2_mc[0] = rng.integers(0, high=M, size=[2, *N], endpoint=True).astype(float)
+        z2_mc[0] = rng.integers(0, high=M, size=[2, *N], endpoint=True).astype(float)
+        # ! should not be required
+        beta_mc[0] = beta
+
+    # ! Hx, Gx updated whenever Hx and Gx are
+    Hx = conv_model.apply_direct_operator(x_mc[0])
+    Gx = np.empty((2, *N))
+    (
+        Gx[0],
+        Gx[1],
+    ) = gradient_2d(x_mc[0])
+
+    # * initialize score (checking consistency after warmstart)
+    if not checkpointflag:
+        score_array[0] = score_function(
+            y,
+            Hx,
+            Gx,
+            z1_mc[0],
+            u1_mc[0],
+            z2_mc[0],
+            u2_mc[0],
+            rho1,
+            rho2,
+            alpha1,
+            alpha2,
+            beta_mc[0],
+        )
+        score = score_array[0]
+    else:
+        score_array[-1] = score_function(
+            y,
+            Hx,
+            Gx,
+            z1_mc[-1],
+            u1_mc[-1],
+            z2_mc[-1],
+            u2_mc[-1],
+            rho1,
+            rho2,
+            alpha1,
+            alpha2,
+            beta_mc[-1],
+        )
+        score = score_array[-1]
+
+    logger.info(
+        r"t: {0:1.3e} | obj[t]: {1:1.3e}".format(
+            start_iter - 1,
+            score,
+        )
+    )
+
+    pbar = tqdm(total=Nmc - 1, desc="Sampling", unit="it")
+    pbar.update(start_iter)
+    logger.info("Start SPA sampler (psgla) from t={}".format(start_iter))
+
+    # Auxiliary variables to measure timing (average + std) per iteration
+    counter = 0
+    atime = np.zeros((1,), dtype="d")
+    asqtime = np.zeros((1,), dtype="d")
+    time_ = 0.0
+    sqtime_ = 0.0
+
+    for iter_mc in range(start_iter, Nmc):
+
+        counter += 1
+        t_start = perf_counter()
+
+        # notational shortcuts (for in-place assignments)
+        past_iter = (iter_mc - 1) % checkpoint_frequency
+        current_iter = iter_mc % checkpoint_frequency
+        # next_iter = (iter_mc + 1) % checkpoint_frequency
+
+        # sample image x (update local tile)
+        grad_x = gradient_x(
+            Hx,
+            Gx,
+            conv_model,
+            z1_mc[past_iter],
+            u1_mc[past_iter],
+            z2_mc[past_iter],
+            u2_mc[past_iter],
+            rho1,
+            rho2,
+        )
+
+        # ! beware in-place assignment: x_mc[current_iter] first properly
+        # ! assigned to x_mc[past_iter] (in the previous iteration)
+        x_mc[current_iter] = sample_x(x_mc[past_iter], gamma_x, grad_x, rng)
+
+        # * sample auxiliary variables (z1, u1)
+        Hx = conv_model.apply_direct_operator(x_mc[current_iter])
+        z1_mc[current_iter] = sample_z1(
+            z1_mc[past_iter],
+            y,
+            Hx,
+            u1_mc[past_iter],
+            rho1,
+            gamma1,
+            rng,
+        )
+        u1_mc[current_iter] = sample_u(
+            z1_mc[current_iter],
+            Hx,
+            rho1,
+            alpha1,
+            rng,
+        )
+
+        # * sample auxiliary variables (z2, u2)
+        (Gx[0], Gx[1]) = gradient_2d(x_mc[current_iter])
+        z2_mc[current_iter] = sample_z2(
+            z2_mc[past_iter, ...],
+            Gx,
+            u2_mc[past_iter, ...],
+            rho2,
+            gamma2,
+            beta_mc[past_iter],
+            rng,
+        )
+
+        u2_mc[current_iter, ...] = sample_u(
+            z2_mc[current_iter, ...],
+            Gx,
+            rho2,
+            alpha2,
+            rng,
+        )
+
+        # sample TV regularization parameter beta (to be debugged)
+        # l21_z2 = np.sum(np.sqrt(np.sum(z2_mc[current_iter, ...] ** 2, axis=0)))
+
+        # beta_mc[current_iter] = sample_beta(a, b, N, l21_z2, rng)
+        beta_mc[current_iter] = beta
+
+        # update timing
+        t_stop = perf_counter()
+        elapsed_time = t_stop - t_start
+        time_ += elapsed_time
+        sqtime_ += elapsed_time**2
+
+        # evolution of the score, :math:`-\log p(x | y)`
+        if np.mod(iter_mc, monitor_frequency) == 0:
+            score = score_function(
+                y,
+                Hx,
+                Gx,
+                z1_mc[current_iter],
+                u1_mc[current_iter],
+                z2_mc[current_iter],
+                u2_mc[current_iter],
+                rho1,
+                rho2,
+                alpha1,
+                alpha2,
+                beta_mc[current_iter],
+            )
+            logger.info(r"t: {0:1.3e} | obj[t]: {1:1.3e}".format(iter_mc, score))
+            score_array[current_iter] = score
+
+        # * checkpoint
+        if np.mod(iter_mc + 1, checkpoint_frequency) == 0:
+            logger.info("Writing checkpoint")
+            atime[0] = time_ / counter
+            asqtime[0] = sqtime_ / counter
+            # backupname = "{}{}.{}".format(checkpointname, iter_mc + 1, "h5")
+            # checkpoint.save_mean_to_h5(
+            #     backupname,
+            #     rng,
+            #     iter_mc,
+            #     x_mc,
+            #     u1_mc,
+            #     z1_mc,
+            #     u2_mc,
+            #     z2_mc,
+            #     beta_mc,
+            #     score_array,
+            #     data_size,
+            #     N,
+            #     checkpoint_frequency,
+            # )
+            # checkpoint.save_to_h5(
+            #     backupname,
+            #     rng,
+            #     iter_mc,
+            #     x_mc,
+            #     u1_mc,
+            #     z1_mc,
+            #     u2_mc,
+            #     z2_mc,
+            #     beta_mc,
+            #     score_array,
+            #     data_size,
+            #     N,
+            #     checkpoint_frequency,
+            #     atime,
+            #     asqtime,
+            #     counter,
+            # )
+            saving(
+                checkpointer,
+                iter_mc,
+                rng,
+                x_mc,
+                u1_mc,
+                u2_mc,
+                z1_mc,
+                z2_mc,
+                beta_mc,
+                score_array,
+                iter_mc,
+                counter,
+                atime,
+                asqtime,
+                checkpoint_frequency,
+            )
+            time_ = 0.0
+            sqtime_ = 0.0
+            counter = 0
+        pbar.update()
+
+    # * finalize sampler
+    pbar.close()
+    logger.info("Writing checkpoint")
+
+    # ! if Nmc is a multiple of checkpoint_frequency, do not trigger final
+    # backup, as it will be redundant with the previous one
+    if np.mod(Nmc, checkpoint_frequency) > 0:
+        atime[0] = time_ / counter
+        asqtime[0] = sqtime_ / counter
+        # backupname = "{}{}.{}".format(checkpointname, Nmc, "h5")
+        # checkpoint.save_mean_to_h5(
+        #     backupname,
+        #     rng,
+        #     iter_mc,
+        #     x_mc,
+        #     u1_mc,
+        #     z1_mc,
+        #     u2_mc,
+        #     z2_mc,
+        #     beta_mc,
+        #     score_array,
+        #     data_size,
+        #     N,
+        #     Nmc % checkpoint_frequency,
+        # )
+        # checkpoint.save_to_h5(
+        #     backupname,
+        #     rng,
+        #     iter_mc,
+        #     x_mc,
+        #     u1_mc,
+        #     z1_mc,
+        #     u2_mc,
+        #     z2_mc,
+        #     beta_mc,
+        #     score_array,
+        #     data_size,
+        #     N,
+        #     Nmc % checkpoint_frequency,
+        #     atime,
+        #     asqtime,
+        #     counter,
+        # )
+        saving(
+            checkpointer,
+            iter_mc,
+            rng,
+            x_mc,
+            u1_mc,
+            u2_mc,
+            z1_mc,
+            z2_mc,
+            beta_mc,
+            score_array,
+            iter_mc,
+            counter,
+            atime,
+            asqtime,
+            Nmc % checkpoint_frequency,
+        )
+        time_ = 0.0
+        sqtime_ = 0.0
+        counter = 0
+
+    return
+
+
+if __name__ == "__main__":
+
+    # pr = cProfile.Profile()
+    # pr.enable()
+    # YOUR MAIN FUNCTION
+    # pr.disable()
+
+    # # Dump results:
+    # # - for binary dump
+    # pr.dump_stats('cpu_%d.prof' %comm.rank)
+    # # - for text dump
+    # with open( 'cpu_%d.txt' %comm.rank, 'w') as output_file:
+    #     sys.stdout = output_file
+    #     pr.print_stats( sort='time' )
+    #     sys.stdout = sys.__stdout__
+
+    # snakeviz output.prof # to visualize the results
+
+    pass
diff --git a/src/aaxda/samplers/utils.py b/src/aaxda/samplers/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c29e27b6f8da629adf1ece56865213860c1f558
--- /dev/null
+++ b/src/aaxda/samplers/utils.py
@@ -0,0 +1,30 @@
+import numpy as np
+
+
+def sample_u(z, Hx, rho: float, alph: float, rng: np.random.Generator):
+    r"""Sample augmentation variable :math:`u`.
+
+    Parameters
+    ----------
+    z : numpy.ndarray
+        Splitting variable
+    Hx : numpy.ndarray
+        Result of a splitting linear operator applied to the current image
+        ``x``.
+    rho : float
+        AXDA splitting parameter.
+    alph : float
+        Augmentation parameter.
+    rng : np.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    numpy.ndarray
+        New state for the augmentation :math:`u` considered.
+    """
+    normalization_factor = np.sqrt(rho + alph)
+    mu = alph * (z - Hx) / normalization_factor**2
+    return mu + rng.standard_normal(size=mu.shape) * (
+        np.sqrt(rho * alph) / normalization_factor
+    )
diff --git a/src/aaxda/utils/__init__.py b/src/aaxda/utils/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/aaxda/utils/args.py b/src/aaxda/utils/args.py
new file mode 100644
index 0000000000000000000000000000000000000000..10bb6da3666a49105559dd1773270b5a00d4e239
--- /dev/null
+++ b/src/aaxda/utils/args.py
@@ -0,0 +1,164 @@
+"""List of parameters (with default values) to be passed to the main synthetic
+data scripts (serial and distributed). Some parameters will only be taken into
+account for one configuration or the other (i.e., serial or distributed).
+"""
+import argparse
+
+
+def parse_args():
+    """Parse main scripts input arguments.
+
+    Returns
+    -------
+    argparse.ArgumentParser
+        List of parsed arguments.
+    """
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--rho",
+        help="AAXDA splitting parameter",
+        default=1.0,
+        type=float,
+    )
+    parser.add_argument(
+        "--alpha",
+        help="AAXDA augmentation parameter",
+        default=1.0,
+        type=float,
+    )
+    parser.add_argument(
+        "--beta",
+        help="TV regularization parameter",
+        default=1.0,
+        type=float,
+    )
+    parser.add_argument(
+        "-cfreq",
+        "--checkpoint_frequency",
+        help="frequency at which checkpoint .h5 files are saved",
+        default=20,
+        type=int,
+    )
+    parser.add_argument(
+        "-cf",
+        "--checkpointfile",
+        help="name of the checkpoint .h5 file to be loaded",
+        default="checkpoint.h5",
+        type=str,
+    )
+    parser.add_argument(
+        "-cn",
+        "--checkpointname",
+        help="template name for any checkpoint file",
+        default="checkpoint",
+        type=str,
+    )
+    parser.add_argument(
+        "-d", "--data", help="activate data generation", action="store_true"
+    )
+    parser.add_argument(
+        "-df",
+        "--datafilename",
+        help="name for the data file (without file extension)",
+        default="data",
+        type=str,
+    )
+    parser.add_argument(
+        "--dpath",
+        help="path to data folder (created if it does not exist)",
+        default="data",
+        type=str,
+    )
+    parser.add_argument(
+        "--downsampling",
+        help="Image downsampling factor (data generation)",
+        default=1,
+        type=int,
+    )
+    parser.add_argument(
+        "-ext",
+        "--extension",
+        help="file extension for the checkpoint file; only used for the serial version of the algorithm (not active anymore)",
+        default="h5",
+        type=str,
+        # choices={"h5", "zarr"},
+    )
+    parser.add_argument(
+        "--imfile",
+        help="full path to the ground truth image",
+        type=str,
+        default="img/boat.png",
+    )
+    parser.add_argument(
+        "-ksize",
+        "--kernel_size",
+        help="size of the convolution kernel",
+        type=int,
+        default=8,
+    )
+    parser.add_argument(
+        "--load", help="option to load checkpoint file", action="store_true"
+    )
+    parser.add_argument(
+        "--logfile",
+        help="full path and name of the log file, with extension",
+        default="std.log",
+        type=str,
+    )
+    parser.add_argument(
+        "--M",
+        help="max value of the image used to generate the synthetic data",
+        default=30,
+        type=int,
+    )
+    parser.add_argument(
+        "--Nmc",
+        help="number of Monte-Carlo iterations",
+        default=50,
+        type=int,
+    )
+    parser.add_argument(
+        "--restart",
+        help="restart iteration",
+        default=0,
+        type=int,
+    )
+    parser.add_argument("--prof", help="activate code profiling", action="store_true")
+    parser.add_argument(
+        "--rpath",
+        help="path to results folder (created if it does not exist)",
+        default="results",
+        type=str,
+    )
+    parser.add_argument(
+        "--sampler",
+        help="name of the selected sampler (choice only active for the serial versions)",
+        type=str,
+        default="psgla",
+        choices={"psgla", "pmyula"},
+    )
+    parser.add_argument(
+        "--save",
+        help="mode to save file: write samples to a single file or one file per process",
+        default="process",
+        choices={"process", "single"},
+    )
+    parser.add_argument(
+        "--seed",
+        help="random seed used initialize the sampler; overriden when loading a checkpoint file",
+        type=int,
+        default=1234,
+    )
+    parser.add_argument(
+        "--dataseed",
+        help="random seed used initialize the sampler; overriden when loading a checkpoint file",
+        type=int,
+        default=1234,
+    )
+    parser.add_argument(
+        "-v", "--verbose", help="increase output verbosity", action="store_true"
+    )
+
+    args = parser.parse_args()
+
+    return args
diff --git a/src/aaxda/utils/args_metrics.py b/src/aaxda/utils/args_metrics.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d4775996aeed78c5b29570e0d16d386bd3b5c97
--- /dev/null
+++ b/src/aaxda/utils/args_metrics.py
@@ -0,0 +1,81 @@
+"""List of parameters (with default values) to be passed to the main script
+aimed at evaluating the reconstruction metrics.
+"""
+import argparse
+
+
+def parse_args():
+    """Parse main scripts input arguments.
+
+    Returns
+    -------
+    argparse.ArgumentParser
+        List of parsed arguments.
+    """
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "-cfreq",
+        "--checkpoint_frequency",
+        help="frequency at which checkpoint .h5 files are saved",
+        default=20,
+        type=int,
+    )
+    parser.add_argument(
+        "-cn",
+        "--checkpointname",
+        help="template name for any checkpoint file",
+        default="checkpoint",
+        type=str,
+    )
+    parser.add_argument(
+        "-df",
+        "--datafilename",
+        help="name of the data file from which to load the ground truth",
+        default="checkpoint.h5",
+        type=str,
+    )
+    parser.add_argument(
+        "--dpath",
+        help="path to data folder (created if it does not exist)",
+        default="data",
+        type=str,
+    )
+    parser.add_argument(
+        "--downsampling",
+        help="Image downsampling factor (data generation)",
+        default=1,
+        type=int,
+    )
+    parser.add_argument(
+        "--logfile",
+        help="full path and name of the log file, with extension",
+        default="std.log",
+        type=str,
+    )
+    parser.add_argument(
+        "--Nmc",
+        help="number of Monte-Carlo iterations",
+        default=50,
+        type=int,
+    )
+    parser.add_argument(
+        "--Nbi",
+        help="number of burn-in iterations",
+        default=0,
+        type=int,
+    )
+    parser.add_argument(
+        "--rfile",
+        help="name of results file (w/o file extension)",
+        default="final_results",
+        type=str,
+    )
+    parser.add_argument(
+        "--rpath",
+        help="path to results folder (created if it does not exist)",
+        default="results",
+        type=str,
+    )
+    args = parser.parse_args()
+
+    return args
diff --git a/src/aaxda/utils/checkpoint.py b/src/aaxda/utils/checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b97902de03eb68a4c59f701bb606c5cfe34b808
--- /dev/null
+++ b/src/aaxda/utils/checkpoint.py
@@ -0,0 +1,743 @@
+"""Generic checkpointing objects (serial and distributed version), relying on
+``h5py``. Handles any number of variables to be saved, but only expects
+``numpy.ndarray``, ``int`` or variables describing the state of a
+``numpy.random.Generator`` object.
+"""
+import sys
+from abc import ABC, abstractmethod
+from os.path import join
+
+import h5py
+import numpy as np
+
+# import hdf5plugin
+# from mpi4py import MPI
+
+
+def extract_rng_state(rng):
+    r"""Extract the state of a random number generator in the form of two
+    ``numpy.ndarray`` objects.
+
+    Parameters
+    ----------
+    rng : numpy.random.Generator
+        Random number generator.
+
+    Returns
+    -------
+    state_array : numpy.ndarray
+        State parameter of the input random generator.
+    inc_array : numpy.ndarray
+        Increment parameter of the input random generator.
+
+    Note
+    ----
+    The ``state`` and ``inc`` fields of a ``numpy.random.Generator`` object
+    are very large integers, and thus need to be converted to the
+    hexadecimal format (and later to an array of ``int``) to be saved into
+    a ``.h5`` file.
+    """
+    # * state and inc are very large integers, and thus need to be
+    # converted to hex format (later to an array of ints) to be saved in an
+    # .h5 file
+    # https://docs.python.org/3/library/stdtypes.html#int.to_bytes
+    # ! need 32 bytes in length: otherwise, the inverse operation
+    # ! int.from_bytes(state_array,sys.byteorder) does not coincide with the
+    # ! original int value
+    state_array = np.array(
+        bytearray(rng.__getstate__()["state"]["state"].to_bytes(32, sys.byteorder))
+    )
+    inc_array = np.array(
+        bytearray(rng.__getstate__()["state"]["inc"].to_bytes(32, sys.byteorder))
+    )
+    return state_array, inc_array
+
+
+def restore_rng_state(rng, loaded_inc_array, loaded_state_array):
+    """Set the state of a random number generator using the 32 bytes
+    increment and state arrays stored in ``loaded_inc_array`` and
+    ``loaded_state_array``, respectively.
+
+    Parameters
+    ----------
+    rng : numpy.random.Generator
+        Random number generator object.
+    loaded_inc_array : np.ndarray of numpy.uint8, of size 32.
+        Increment variable to restore the state of the generator.
+    loaded_state_array : np.ndarray of numpy.uint8, of size 32.
+        State variable to restore the state of the generator.
+    Note
+    ----
+    Input generator updated in-place.
+    """
+    loaded_state = int.from_bytes(loaded_state_array, sys.byteorder)
+    loaded_inc = int.from_bytes(loaded_inc_array, sys.byteorder)
+    current_state = rng.__getstate__()
+    current_state["state"]["state"] = loaded_state
+    current_state["state"]["inc"] = loaded_inc
+    rng.__setstate__(current_state)
+
+    return
+
+
+class BaseCheckpoint(ABC):
+    r"""Base checkpoint object gathering the parameters common to the checkpoint
+    schemes used in this library.
+
+    .. _hdf5plugin: http://www.silx.org/doc/hdf5plugin/latest/usage.html#hdf5plugin.Blosc
+
+    Attributes
+    ----------
+    root_filename : str
+        Root of the filename (containing path to the appropriate directory)
+        where the checkpoint file is / will be stored.
+    cname : str
+        Name of the hdf5 compression filter (aka compressor). Default to
+        "gzip".
+    clevel : int
+        Compression level. Default to 5 (default for Blosc).
+    shuffle : int
+        Byte shuffle option (see `hdf5plugin`_ documentation). Default to 1.
+        Not used for the moment.
+    """
+
+    def __init__(
+        self,
+        root_filename,
+        cname="gzip",
+        clevel=5,
+        shuffle=1,
+    ):
+        """
+        Parameters
+        ----------
+        root_filename : str
+            Root of the filename (containing path to the appropriate directory)
+            where the checkpoint file is / will be stored.
+        cname : str
+            Name of the hdf5 compression filter (aka compressor). Default to
+            "gzip".
+        clevel : int
+            Compression level. Default to 5 (default for Blosc).
+        shuffle : int
+            Byte shuffle option (see hdf5plugin_ documentation). Default to 1.
+            Not used for the moment.
+        """
+        self.root_filename = root_filename
+        self.cname = cname
+        self.clevel = clevel
+        self.shuffle = shuffle
+
+    def filename(self, file_id):
+        """Get name of target file.
+
+        Parameters
+        ----------
+        file_id : str or int
+            String or integer describing the id of the target ``.h5`` file.
+
+        Returns
+        -------
+        str
+            Target filename.
+        """
+        return "{}{}.h5".format(self.root_filename, file_id)
+
+    @abstractmethod
+    def save(
+        self, file_id, chunk_sizes, rng=None, mode="w", rdcc_nbytes=None, **kwargs
+    ):  # pragma: no cover
+        r"""Saving content of the input variables within the dictionary
+        kwargs to disk.
+
+        Parameters
+        ----------
+        file_id : str or int
+            String or integer describing the id of the target ``.h5`` file.
+        chunk_sizes : list of tuples, of length ``n``
+            List of tuples representing the chunk size for each input variable.
+            For scalar input, the corresponding chunk size needs to be
+            ``None``.
+        rng : numpy.random.Generator or None, optional
+            Random number generator to be restored using specific state stored
+            on disk, by default None.
+        mode : str, optional
+            Mode to open the h5 file ("a", or "w"). By default "w".
+        rdcc_nbytes : float, optional
+            Sets the total size (measured in bytes) of the raw data chunk cache
+            for each dataset. The default size is 1 MB. This should be set to
+            the size of each chunk times the number of chunks that are likely
+            to be needed in cache. By default None (see
+            `h5py documentation <https://docs.h5py.org/en/stable/high/file.html>`_).
+        kwargs : list of numpy.ndarray, of length ``n``
+            List of keyword arguments reprensenting Python variables to be
+            saved. Only allows numpy.ndarray or integers.
+        """
+        pass
+
+    @abstractmethod
+    def load(self, file_id, *args):  # pragma: no cover
+        r"""Loading some variables from a checkpoint file.
+
+        Parameters
+        ----------
+        file_id : str or int
+            String or integer describing the id of the target ``.h5`` file.
+        args : list of str
+            Name of variables to be loaded from disk.
+
+        Returns
+        -------
+        NotImplemented
+        """
+        return NotImplemented
+
+
+class SerialCheckpoint(BaseCheckpoint):
+    r"""Checkpoint in serial environments, using ``h5py``.
+
+    Attributes
+    ----------
+    root_filename : str
+        Root of the filename (containing path to the appropriate directory)
+        where the checkpoint file is / will be stored.
+    cname : str
+        Name of the hdf5 compression filter (aka compressor). Default to
+        "gzip".
+    clevel : int
+        Compression level. Default to 5 (default for Blosc).
+    shuffle : int
+        Byte shuffle option (see hdf5plugin_ documentation). Default to 1.
+        Not used for the moment.
+
+    .. _hdf5plugin: http://www.silx.org/doc/hdf5plugin/latest/usage.html#hdf5plugin.Blosc
+    """
+
+    def __init__(
+        self,
+        root_filename,
+        cname="gzip",
+        clevel=5,
+        shuffle=1,
+    ):
+        """
+        Parameters
+        ----------
+        root_filename : str
+            Root of the filename (containing path to the appropriate directory)
+            where the checkpoint file is / will be stored.
+        cname : str
+            Name of the hdf5 compression filter (aka compressor). Default to
+            "gzip".
+        clevel : int
+            Compression level. Default to 5 (default for Blosc).
+        shuffle : int
+            Byte shuffle option (see hdf5plugin_ documentation). Default to 1.
+            Not used for the moment.
+
+        .. _hdf5plugin: http://www.silx.org/doc/hdf5plugin/latest/usage.html#hdf5plugin.Blosc
+        """
+        super(SerialCheckpoint, self).__init__(root_filename, cname, clevel, shuffle)
+
+    def save(
+        self, file_id, chunk_sizes, rng=None, mode="w", rdcc_nbytes=None, **kwargs
+    ):
+        r"""Saving content of the input variables within the dictionary
+        kwargs to disk.
+
+        Parameters
+        ----------
+        file_id : str or int
+            String or integer describing the id of the target ``.h5`` file.
+        chunk_sizes : list of tuples, of length ``n``
+            List of tuples representing the chunk size for each input variable.
+            For scalar input, the corresponding chunk size needs to be
+            ``None``.
+        rng : numpy.random.Generator or None, optional
+            Save to disk the current state of a random number generator, if
+            any. By default None.
+        mode : str, optional
+            Mode to open the h5 file ("a", or "w"). By default "w".
+        rdcc_nbytes : float, optional
+            Sets the total size (measured in bytes) of the raw data chunk cache
+            for each dataset. The default size is 1 MB. This should be set to
+            the size of each chunk times the number of chunks that are likely
+            to be needed in cache. By default None (see
+            `h5py documentation <https://docs.h5py.org/en/stable/high/file.html>`_).
+        kwargs : list of numpy.ndarray, of length ``n`` or int
+            List of keyword arguments reprensenting Python variables to be
+            saved. Only allows numpy.ndarray or integers.
+        """
+        filename_ = self.filename(file_id)
+
+        # ! 200 MB cache size
+        with h5py.File(filename_, mode, rdcc_nbytes=rdcc_nbytes) as f:
+            # * backup state random number generator
+            if rng is not None:
+                # convert rng to numpy array
+                state_array, inc_array = extract_rng_state(rng)
+                # backup rng state
+                dset = f.create_dataset("state", (32,), dtype=np.uint8)
+                dset[:] = state_array
+                dset = f.create_dataset("inc", (32,), dtype=np.uint8)
+                dset[:] = inc_array
+
+            # * backup other variables
+            for count, (var_name, var) in enumerate(kwargs.items()):
+                # print("%s, %s = %s" % (count, var_name, var))
+                # ! only allow numpy.ndarray or scalar integers
+                if isinstance(var, np.ndarray):
+                    if var.size > 1:
+                        dset = f.create_dataset(
+                            var_name,
+                            var.shape,
+                            dtype=var.dtype,
+                            compression=self.cname,
+                            compression_opts=self.clevel,
+                            chunks=chunk_sizes[count],
+                        )
+                    else:
+                        dset = f.create_dataset(var_name, (1,), dtype=var.dtype)
+                else:
+                    # ! when input is a scalar, only allow integer type
+                    dset = f.create_dataset(var_name, (1,), dtype="i")
+                dset[()] = var
+
+    def load(self, file_id, select, rng, *args, rdcc_nbytes=None):
+        r"""Loading some variables from a checkpoint file.
+
+        Parameters
+        ----------
+        file_id : str or int
+            String or integer describing the id of the target ``.h5`` file.
+        select : list of slices, of size ``n``
+            List of slices to save each variable to parts of larger arrays to
+            on disk.
+        rng : numpy.random.Generator or None
+            Random number generator to be restored using specific state stored
+            on disk.
+        args : list of str, of size ``n``
+            Variable list of strings corresponding to the name of the variables
+            to be loaded.
+        rdcc_nbytes : float, optional
+            Sets the total size (measured in bytes) of the raw data chunk cache
+            for each dataset. The default size is 1 MB. This should be set to
+            the size of each chunk times the number of chunks that are likely
+            to be needed in cache. By default None (see
+            `h5py documentation <https://docs.h5py.org/en/stable/high/file.html>`_).
+
+        Returns
+        -------
+        dic_var : dict
+            Dictionary containing all the variables loaded from the ``.h5``
+            file.
+        """
+        # https://stackoverflow.com/questions/10223427/python-function-that-returns-a-variable-number-of-outputs
+        filename_ = self.filename(file_id)
+        dic_var = {}
+
+        with h5py.File(filename_, "r", rdcc_nbytes=rdcc_nbytes) as f:
+            if rng is not None:
+                restore_rng_state(rng, f["inc"][:], f["state"][:])
+
+            for count, var in enumerate(args):
+                dic_var[var] = f[var][select[count]]
+            # {var: value for var in args}
+
+        return dic_var
+
+
+class DistributedCheckpoint(BaseCheckpoint):
+    r"""Checkpoint in distributed environments, using ``h5py``.
+
+    Attributes
+    ----------
+    comm : mpi4py.MPI.Comm
+        MPI communicator.
+    root_filename : str
+        Root of the filename (containing path to the appropriate directory)
+        where the checkpoint file is / will be stored.
+    cname : str
+        Name of the hdf5 compression filter (aka compressor). Default to
+        "gzip".
+    clevel : int
+        Compression level. Default to 5 (default for Blosc).
+    shuffle : int
+        Byte shuffle option (see hdf5plugin_ documentation). Default to 1.
+        Not used for the moment.
+    rank : int
+        Rank of the current process.
+    ncores : int
+        Total nunmber of processes involved in ``comm``.
+
+    .. _hdf5plugin: http://www.silx.org/doc/hdf5plugin/latest/usage.html#hdf5plugin.Blosc
+    """
+
+    def __init__(
+        self,
+        comm,
+        root_filename,
+        cname="gzip",
+        clevel=5,
+        shuffle=1,
+    ):
+        """
+        Parameters
+        ----------
+        comm : mpi4py.MPI.Comm
+            MPI communicator.
+        root_filename : str
+            Root of the filename (containing path to the appropriate directory)
+            where the checkpoint file is / will be stored.
+        cname : str, optional
+            Name of the hdf5 compression filter (aka compressor). Default to
+            "gzip".
+        clevel : int, optional
+            Compression level. Default to 5 (default for Blosc).
+        shuffle : int
+            Byte shuffle option (see hdf5plugin_ documentation). Default to
+            1. Not used for the moment.
+
+        .. _hdf5plugin: http://www.silx.org/doc/hdf5plugin/latest/usage.html#hdf5plugin.Blosc
+        """
+        super(DistributedCheckpoint, self).__init__(
+            root_filename, cname, clevel, shuffle
+        )
+        self.comm = comm
+        self.rank = comm.Get_rank()
+        self.ncores = comm.Get_size()
+
+    def save(
+        self,
+        file_id,
+        shape,
+        select,
+        chunk_sizes,
+        rng=None,
+        mode="w",
+        rdcc_nbytes=None,
+        **kwargs,
+    ):
+        r"""Saving content of the input variables within the dictionary
+        kwargs to disk.
+
+        Parameters
+        ----------
+        file_id : str or int
+            String or integer describing the id of the target ``.h5`` file.
+        shape : list of tuples, of size ``n``
+            List of tuples corresponding to the shape of each larger arrays
+            in which the variables will be saved to disk.
+        select : list of slices, of size ``n``
+            List of slices to save each variable to parts of larger arrays to
+            on disk.
+        chunk_sizes : list of tuples, of length ``n``
+            List of tuples representing the chunk size for each input variable.
+            For scalar input, the corresponding chunk size needs to be
+            ``None``.
+        rng : numpy.random.Generator or None, optional
+            Save to disk the current state of a random number generator, if
+            any. By default None.
+        mode : str, optional
+            Mode to open the h5 file ("a", or "w"). By default "w".
+        rdcc_nbytes : float, optional
+            Sets the total size (measured in bytes) of the raw data chunk cache
+            for each dataset. The default size is 1 MB. This should be set to
+            the size of each chunk times the number of chunks that are likely
+            to be needed in cache. By default None (see
+            `h5py documentation <https://docs.h5py.org/en/stable/high/file.html>`_).
+        kwargs : list of numpy.ndarray, of length ``n`` or int
+            List of keyword arguments reprensenting Python variables to be
+            saved. Only allows numpy.ndarray or integers.
+        """
+        # TODO: add case of variables to be saved from a single core
+        # TODO- (typically the root)
+        filename_ = self.filename(file_id)
+
+        with h5py.File(
+            filename_, mode, driver="mpio", comm=self.comm, rdcc_nbytes=rdcc_nbytes
+        ) as f:
+            # * backup state random number generator
+            if rng is not None:
+                # convert local rng to numpy array
+                state_array, inc_array = extract_rng_state(rng)
+                # backup rng state: dataset creation must be done collectively
+                dset_state = []
+                dset_inc = []
+                for r in range(self.ncores):
+                    dset_state.append(
+                        f.create_dataset(join(str(r), "state"), (32,), dtype=np.uint8)
+                    )
+                    dset_inc.append(
+                        f.create_dataset(join(str(r), "inc"), (32,), dtype=np.uint8)
+                    )
+                dset_state[self.rank][:] = state_array
+                dset_inc[self.rank][:] = inc_array
+
+            # * backup other variables
+            for count, (var_name, var) in enumerate(kwargs.items()):
+                # ! only allow numpy.ndarray or scalar integers
+                if isinstance(var, np.ndarray):
+                    dset = f.create_dataset(
+                        var_name,
+                        shape[count],
+                        dtype=var.dtype,
+                        compression=self.cname,
+                        compression_opts=self.clevel,
+                        chunks=chunk_sizes[count],
+                    )
+                else:
+                    # ! when input is a scalar, only allow integer type
+                    dset = f.create_dataset(var_name, (1,), dtype="i")
+
+                with dset.collective:
+                    dset[select[count]] = var
+
+    def save_from_process(
+        self,
+        rank,
+        file_id,
+        select,
+        chunk_sizes,
+        rng=None,
+        mode="a",
+        rdcc_nbytes=None,
+        **kwargs,
+    ):
+        r"""Saving content of some input variables from a single process.
+
+        Parameters
+        ----------
+        rank : int
+            Rank of the process in the communicator.
+        file_id : str or int
+            String or integer describing the id of the target ``.h5`` file.
+        select : list of slices
+            List of slices to save each variable to parts of larger arrays to
+            on disk.
+        chunk_sizes : list of tuples, of length ``n``
+            List of tuples representing the chunk size for each input variable.
+            For scalar input, the corresponding chunk size needs to be
+            ``None``.
+        rng : numpy.random.Generator or None, optional
+            Save to disk the current state of a random number generator, if
+            any. By default None.
+        mode : str, optional
+            Mode to open the h5 file ("a", or "w"). By default "a".
+        rdcc_nbytes : float, optional
+            Sets the total size (measured in bytes) of the raw data chunk cache
+            for each dataset. The default size is 1 MB. This should be set to
+            the size of each chunk times the number of chunks that are likely
+            to be needed in cache. By default None (see
+            `h5py documentation <https://docs.h5py.org/en/stable/high/file.html>`_).
+        kwargs : list of numpy.ndarray, of length ``n`` or int
+            List of keyword arguments reprensenting Python variables to be
+            saved. Only allows numpy.ndarray or integers.
+        """
+        if self.rank == rank:
+            filename_ = self.filename(file_id)
+
+            with h5py.File(filename_, mode, rdcc_nbytes=rdcc_nbytes) as f:
+                # * backup state random number generator
+                if rng is not None:
+                    # convert local rng to numpy array
+                    state_array, inc_array = extract_rng_state(rng)
+                    # backup some rng state
+                    dset = f.create_dataset(
+                        join(str(rank), "state"), (32,), dtype=np.uint8
+                    )
+                    dset[:] = state_array
+                    dset = f.create_dataset(
+                        join(str(rank), "inc"), (32,), dtype=np.uint8
+                    )
+                    dset[:] = inc_array
+
+                # * backup other variables
+                for count, (var_name, var) in enumerate(kwargs.items()):
+                    # ! only allow numpy.ndarray or scalar integers
+                    # ! compression only allowed for non-scalar datasets
+                    if isinstance(var, np.ndarray):
+                        if var.size > 1:
+                            dset = f.create_dataset(
+                                var_name,
+                                var.shape,
+                                dtype=var.dtype,
+                                compression=self.cname,
+                                compression_opts=self.clevel,
+                                chunks=chunk_sizes[count],
+                            )
+                        else:
+                            dset = f.create_dataset(var_name, (1,), dtype=var.dtype)
+                    else:
+                        # ! when input is a scalar, only allow integer type
+                        dset = f.create_dataset(var_name, (1,), dtype="i")
+
+                    dset[select[count]] = var
+
+    def load(self, file_id, select, rng, *args, rdcc_nbytes=None):
+        r"""Loading some variables from a checkpoint file.
+
+        Parameters
+        ----------
+        file_id : str or int
+            String or integer describing the id of the target ``.h5`` file.
+        select : list of slices
+            List of slices to retrieve parts of each variable to be loaded from
+            disk.
+        rng : numpy.random.Generator or None
+            Random number generator to be restored using specific state stored
+            on disk.
+        args : list of str
+            Variable list of strings corresponding to the name of the variables
+            to be loaded.
+        rdcc_nbytes : float, optional
+            Sets the total size (measured in bytes) of the raw data chunk cache
+            for each dataset. The default size is 1 MB. This should be set to
+            the size of each chunk times the number of chunks that are likely
+            to be needed in cache. By default None (see
+            `h5py documentation <https://docs.h5py.org/en/stable/high/file.html>`_).
+
+        Returns
+        -------
+        dic_var : dict
+            Dictionary containing all the variables loaded from the ``.h5``
+            file.
+        """
+        # https://stackoverflow.com/questions/10223427/python-function-that-returns-a-variable-number-of-outputs
+        filename_ = self.filename(file_id)
+        dic_var = {}
+
+        # rdcc_nbytes = 1024 ** 2 * 200
+        with h5py.File(
+            filename_, "r", driver="mpio", comm=self.comm, rdcc_nbytes=rdcc_nbytes
+        ) as f:
+            if rng is not None:
+                restore_rng_state(
+                    rng,
+                    f[join(str(self.rank), "inc")][:],
+                    f[join(str(self.rank), "state")][:],
+                )
+
+            for count, var in enumerate(args):
+                dic_var[var] = f[var][select[count]]  # (np.s_[-1], *select[count])
+
+        return dic_var
+
+    def load_from_process(self, rank, file_id, select, rng, *args, rdcc_nbytes=None):
+        r"""Loading some variables from a single process using a checkpoint
+        file.
+
+        Parameters
+        ----------
+        rank : int
+            Rank of the process in the communicator.
+        file_id : str or int
+            String or integer describing the id of the target ``.h5`` file.
+        select : list of slices
+            List of slices to retrieve parts of each variable to be loaded from
+            disk.
+        rng : numpy.random.Generator or None
+            Random number generator to be restored using specific state stored
+            on disk.
+        args : list of str
+            Variable list of strings corresponding to the name of the variables
+            to be loaded.
+        rdcc_nbytes : float, optional
+            Sets the total size (measured in bytes) of the raw data chunk cache
+            for each dataset. The default size is 1 MB. This should be set to
+            the size of each chunk times the number of chunks that are likely
+            to be needed in cache. By default None (see
+            `h5py documentation <https://docs.h5py.org/en/stable/high/file.html>`_).
+
+        Returns
+        -------
+        dic_var : dict
+            Dictionary containing all the variables loaded from the ``.h5``
+            file.
+        """
+        filename_ = self.filename(file_id)
+        dic_var = {}
+
+        if self.rank == rank:
+            with h5py.File(filename_, "r", rdcc_nbytes=rdcc_nbytes) as f:
+                if rng is not None:
+                    restore_rng_state(
+                        rng,
+                        f[join(str(self.rank), "inc")][:],
+                        f[join(str(self.rank), "state")][:],
+                    )
+
+                for count, var in enumerate(args):
+                    dic_var[var] = f[var][select[count]]  # np.s_[-1], *select[count]
+                    # ! fixing error with h5py: load data in the native byteorder
+                    # https://numpy.org/doc/stable/reference/generated/numpy.dtype.newbyteorder.html
+                    # dic_var[var].dtype = dic_var[var].dtype.newbyteorder("=")
+
+        return dic_var
+
+
+if __name__ == "__main__":
+
+    from mpi4py import MPI
+
+    # * crude debug serial checkpoint
+    # chkpt = SerialCheckpoint("test")
+    # c = np.ones((2, 2))
+    # chkpt.save(1, [None, None, None], rng=None, a=3, b=4, c=c)
+    # dic_var = chkpt.load(1, 3 * [np.s_[:]], None, "a", "b", "c")
+    # consistency = np.allclose(3, dic_var["a"])
+    # test syntax h5py in absence of chunking (chunks=None)
+    # syntax is fine
+    # f = h5py.File("test_chunk.h5", "w")
+    # dset = f.create_dataset("chunked", (1000, 1000), chunks=None)
+    # dset[:] = np.ones((1000, 1000))
+    # f.close()
+    # f = h5py.File("test_chunk.h5", "r")
+    # a = f["chunked"][:]
+    # f.close()
+    # print("Done")
+    # * crude debug parallel checkpoint
+    # TODO: test in realistic conditions, e.g., with convolution operator
+    # comm = MPI.COMM_WORLD
+    # size = comm.Get_size()
+    # rank = comm.Get_rank()  # The process ID (integer 0-3 for 4-process run)
+    # print("Process {}: world size={}".format(rank, size))
+    # if rank == 0:
+    #     ss = np.random.SeedSequence(1234)
+    #     # Spawn off nworkers child SeedSequences to pass to child processes.
+    #     child_seed = np.array(ss.spawn(size))
+    # else:
+    #     child_seed = None
+    # local_seed = comm.scatter(child_seed, root=0)
+    # local_rng = np.random.default_rng(local_seed)
+    # local_dic0 = local_rng.__getstate__()
+    # print(
+    #     "Process {}: child_seed[{}], entropy={}, spawn_key={}".format(
+    #         rank, rank, local_seed.entropy, local_seed.spawn_key
+    #     )
+    # )
+    # dchkpt = DistributedCheckpoint(comm, "distributed_test")
+    # # * backup for rng state
+    # dchkpt.save(1, None, None, rng=local_rng)
+    # a = local_rng.normal()
+    # print("Process {}: a={}".format(rank, a))
+    # # * load (warm-start for rng state)
+    # dchkpt.load(1, None, local_rng)
+    # b = local_rng.normal()
+    # print("Process {}: a={}, b={}, b=a? {}".format(rank, a, b, np.allclose(b, a)))
+    # # Check a=b on all processes
+    # local_consistency_check = np.array([np.allclose(b, a)])
+    # global_consistency_check = np.array([False])
+    # # Reduce "local_consistency_check" on the root
+    # comm.Reduce(
+    #     [local_consistency_check, MPI.C_BOOL],
+    #     [global_consistency_check, MPI.C_BOOL],
+    #     op=MPI.LAND,
+    #     root=0,
+    # )
+    # if rank == 0:
+    #     print("Communication correctness: {}".format(global_consistency_check))
+    # MPI.Finalize()
+
+
+# mpiexec -n 2 python -m mpi4py aaxda/utils/checkpoint.py
diff --git a/src/aaxda/utils/communications.py b/src/aaxda/utils/communications.py
new file mode 100644
index 0000000000000000000000000000000000000000..028ba5d678a0eb736f634ffed72d39c719d68c62
--- /dev/null
+++ b/src/aaxda/utils/communications.py
@@ -0,0 +1,1714 @@
+"""Core set of functions implementing the communications leveraged from the
+communicators defined in :mod:`aaxda.utils.communicators`.
+"""
+from collections import deque
+
+import numpy as np
+from mpi4py import MPI
+
+# TODO: investigate
+# [persistent communications](https://mpi4py.readthedocs.io/en/stable/overview.
+# html#persistent-communications)
+
+
+def split_range(nchunks, N, overlap=0, backward=True):
+    r"""Tessellates :math:`\{ 0, \dotsc , N-1 \}` into multiple subsets.
+
+    Tessellates :math:`\{ 0, \dotsc , N-1 \}` into (non-)overlapping
+    subsets, each containing approximately the same number of indices.
+
+    Parameters
+    ----------
+    nchunks : int
+        Number of segments.
+    N : int
+        Total number of segments.
+    overlap : int, optional
+        Defines overlap size between segments (if any). Defaults to 0.
+    backward : bool, optional
+        Direction of the overlap, if any (backward or forward). Defaults to
+        True.
+
+    Returns
+    -------
+    numpy.ndarray[int]
+        Start and end index of each segment. Shape: ``(nchunks, 2)``.
+
+    Raises
+    ------
+    ValueError
+        Error if the overlap is greater than the size of a segment.
+    """
+
+    splits = np.linspace(-1, N - 1, num=nchunks + 1, dtype="i")
+
+    if overlap > np.floor(N / nchunks):
+        raise ValueError(r"More than 100% overlap between two consecutive segments")
+
+    if overlap <= 0:
+        # w/o overlap
+        rg = np.concatenate((splits[:-1][:, None] + 1, splits[1:][:, None]), axis=1)
+    else:
+        # with overlap
+        if backward:
+            # overlap towards the left (backward)
+            rg = np.concatenate(
+                (
+                    np.array((splits[0] + 1, *(splits[1:-1] + 1 - overlap)))[:, None],
+                    splits[1:][:, None],
+                ),
+                axis=1,
+            )
+        else:
+            # overlap towards the right (forward)
+            rg = np.concatenate(
+                (
+                    splits[:-1][:, None] + 1,
+                    np.array((*(splits[1:-1] + overlap), splits[-1]))[:, None],
+                ),
+                axis=1,
+            )
+    return rg
+
+
+def local_split_range(nchunks, N, index, overlap=0, backward=True):
+    r"""Return the portion of :math:`\{ 0, \dotsc , N-1 \}` handled by a
+    process.
+
+    Return the portion of :math:`\{ 0, \dotsc , N-1 \}`, tesselated into
+    (non-)overlapping subsets, owned by a process, with nchunks processes in
+    total.
+
+    Parameters
+    ----------
+    nchunks : int
+        Total number of segments.
+    N : int
+        Total number of indices.
+    index : int
+        Rank of the current process.
+    overlap : int, optional
+        Overlap size between consecutive segments (if any), by default 0.
+    backward : bool, optional
+        Direction of the overlap, if any (backward or forward), by default
+        True.
+
+    Returns
+    -------
+    numpy.ndarray[int]
+        Start and end index of the segment: shape ``(2,)``.
+
+    Raises
+    ------
+    ValueError
+        Error if ``index`` is greater than ``nchunks-1``.
+    ValueError
+        Error if the overlap is greater than the size of a segment.
+    """
+
+    if nchunks <= index:
+        raise ValueError(
+            r"Index should be taken in [0, ..., nchunks-1], with nchunks={0}".format(
+                nchunks
+            )
+        )
+    step = N / nchunks
+    if overlap > np.floor(step):
+        raise ValueError(r"More than 100% overlap between two consecutive segments")
+
+    start = -1 + index * step
+    stop = np.rint(start + step)
+    start = np.rint(start)
+    rg = np.array([start + 1, stop], dtype="i")
+    # if the facet overlaps with a neighbour
+    if overlap > 0:
+        if backward:
+            # overlap towards the left (backward)
+            if index > 0:
+                rg[0] -= overlap
+        else:
+            # overlap towards the right (forward)
+            if index < nchunks - 1:
+                rg[-1] += overlap
+    return rg
+
+
+def local_split_range_nd(nchunks, N, index, overlap=None, backward=True):
+    r"""Return the portion of :math:`\{ 0, \dotsc , N-1 \}` (nD range
+    of indices) handled by a process.
+
+    Return the portion of :math:`\{ 0, \dotsc , N-1 \}`, tesselated
+    into (non-)overlapping subsets along each dimension, owned by a process.
+
+    Parameters
+    ----------
+    nchunks : numpy.ndarray[int]
+        Total number of segments along each dimension.
+    N : numpy.ndarray[int]
+        Total number of indices along each dimension.
+    index : numpy.ndarray[int]
+        Rank of the current process along each dimension.
+    overlap : numpy.ndarray[int], optional
+        Overlap size between consecutive segments along each dimension, by
+        default None.
+    backward : bool, optional
+        Direction of the overlap (forward or backward), by default True.
+
+    Returns
+    -------
+    numpy.ndarray[int]
+        Start and end index of the nD segment along each dimension:
+        shape ``(ndims, 2)``.
+
+    Raises
+    ------
+    ValueError
+        Error if any index is greater than ``nchunks-1``.
+    ValueError
+        Error if any overlap size is greater than the size of the corresponding
+        segment.
+    """
+
+    # TODO: see how to make it compatible with a 1D implementation
+    if np.any(nchunks <= index):
+        raise ValueError(
+            r"Index should be taken in [0, ..., nchunks-1], with nchunks={0}".format(
+                nchunks
+            )
+        )
+    step = N / nchunks
+    if overlap is not None:
+        if np.any(overlap > np.floor(step)):
+            raise ValueError(r"More than 100% overlap between two consecutive segments")
+    start = -1 + index * step
+    stop = (start + step).astype(np.int64)
+    start = start.astype(np.int64)
+    rg = np.concatenate(((start + 1)[:, None], stop[:, None]), axis=1)
+
+    if overlap is not None:
+        if backward:
+            rg[np.logical_and(index > 0, overlap > 0), 0] = (
+                rg[np.logical_and(index > 0, overlap > 0), 0]
+                - overlap[np.logical_and(index > 0, overlap > 0)]
+            )
+        else:
+            rg[np.logical_and(index < nchunks - 1, overlap > 0), 1] = (
+                rg[np.logical_and(index < nchunks - 1, overlap > 0), 1]
+                + overlap[np.logical_and(index < nchunks - 1, overlap > 0)]
+            )
+    return np.squeeze(rg)
+
+
+# ! WARNING: may result in empty tiles, thus changing the total number of
+# ! facets
+def rebalance_tile_nd(tile_indices, ranknd, N, grid_size, overlap_size):
+    r""" "Resize the local tiles to rebalance data sizes across the grid.
+
+    Resize the local tiles across the grid to compensate for the increase in
+    the data size for border facets.
+
+    Parameters
+    ----------
+    tile_indices : numpy.ndarray[int]
+        Indices of the pixels from the full array composing the tile (first
+        point, last point). Shape: ``(ndims, 2)``.
+    ranknd : numpy.ndarray[int]
+        Rank of the current process (multiple dimensions).
+    N : numpy.ndarray[int]
+        Total number of indices along each dimension.
+    grid_size : numpy.ndarray[int]
+        Size of the process grid.
+    overlap_size : numpy.ndarray[int]
+        Overlap size between consecutive facets across the grid along each
+        dimension.
+
+    Returns
+    -------
+    numpy.ndarray[int]
+        Updated indices defining the local tile: shape ``(ndims, 2)``.
+    """
+
+    # safeguard: trigger rebalance only if the data chunk associated with the
+    # last facet is expected to be twice as large as a tile
+    ndims = ranknd.size
+    ideal_size = np.floor(N / grid_size + overlap_size)
+    tile_size = tile_indices[:, 1] - tile_indices[:, 0] + 1
+
+    for d in range(ndims):
+
+        if (
+            overlap_size[d] > 0
+            and grid_size[d] > 1
+            and ideal_size[d] > 2 * tile_size[d]
+        ):
+            if overlap_size[d] < grid_size[d]:
+                offset = 1
+                # increase size of the first overlap_size[d] tiles along dim. d
+                if ranknd[d] < overlap_size[d] - 1:
+                    tile_indices[d, 0] += ranknd[d] * offset
+                    tile_indices[d, 1] += (ranknd[d] + 1) * offset
+            else:
+                # increase size of the first grid_size[d]-1 along dim. d
+                offset = np.floor(overlap_size[d] / grid_size[d])
+                if ranknd[d] < grid_size[d] - 1:
+                    tile_indices[d, 0] += ranknd[d] * offset
+                    tile_indices[d, 1] += (ranknd[d] + 1) * offset
+                else:
+                    tile_indices[d, 0] += (
+                        ranknd[d] * offset
+                    )  # ! only modify starting point (last facet)
+
+    return tile_indices
+
+
+def local_split_range_symmetric_nd(
+    nchunks, N, index, overlap_pre=np.array([0]), overlap_post=np.array([0])
+):
+    r"""Return the portion of :math:`\{ 0, \dotsc , N-1 \}` (nD range of
+    indices) handled by a process (symmetric version).
+
+    Return the portion of :math:`\{ 0, \dotsc , N-1 \}`, tesselated into
+    (non-overlapping subsets along each dimension, owned by a process.
+
+    Parameters
+    ----------
+    nchunks : numpy.ndarray[int]
+        Number of nd-segments along each dimension.
+    N : numpy.ndarray[int]
+        Total number of indices along each dimension.
+    index : numpy.ndarray[int]
+        Index of the process.
+    overlap_pre : numpy.ndarray[int], optional
+        Defines overlap with preceding segment, by default ``np.array([0])``.
+    overlap_post : numpy.ndarray[int], optional
+        Defines overlap with following segment, by default ``np.array([0])``.
+
+    Returns
+    -------
+    numpy.ndarray[int]
+        Start and end index of the nD segment along each dimension. Shape:
+        ``(ndims, 2)``.
+
+    Raises
+    ------
+    ValueError
+        Error if any index is greater than ``nchunks-1``.
+    ValueError
+        Error if any overlap is greater than the size of a segment.
+    """
+
+    # TODO: see how to make it compatible with a 1D implementation
+    if np.any(nchunks <= index):
+        raise ValueError(
+            r"Index should be taken in [0, ..., nchunks-1], with nchunks={0}".format(
+                nchunks
+            )
+        )
+    step = N / nchunks
+    if np.any(overlap_pre > np.floor(step)) or np.any(overlap_post > np.floor(step)):
+        raise ValueError(r"More than 100% overlap between two consecutive segments")
+    start = -1 + index * step
+    stop = (start + step).astype(np.int64)
+    start = start.astype(np.int64)
+    rg = np.concatenate(((start + 1)[:, None], stop[:, None]), axis=1)
+    rg[np.logical_and(index > 0, overlap_pre > 0), 0] = (
+        rg[np.logical_and(index > 0, overlap_pre > 0), 0]
+        - overlap_pre[np.logical_and(index > 0, overlap_pre > 0)]
+    )
+
+    rg[np.logical_and(index < nchunks - 1, overlap_post > 0), 1] = (
+        rg[np.logical_and(index < nchunks - 1, overlap_post > 0), 1]
+        + overlap_post[np.logical_and(index < nchunks - 1, overlap_post > 0)]
+    )
+    return np.squeeze(rg)
+
+
+def split_range_interleaved(nchunks, N):
+    r"""Tessellates :math:`\{ 0, \dotsc , N-1 \}` into interleaved subsets.
+
+    Tessellates :math:`\{ 0, \dotsc , N-1 \}` into subsets of interleaved
+    indices, each containing approximately the same number of indices
+    (downsampling of :math:`\{ 0, \dotsc , N-1 \}`).
+
+    Parameters
+    ----------
+    nchunks : int
+        Total number of segments.
+    N : int
+        Total number of indices.
+
+    Returns
+    -------
+    list[slice]
+        List of slices to extract the indices corresponding to each set.
+
+    Raises
+    ------
+    ValueError
+        Error if the overlap is greater than the size of a segment.
+    """
+
+    if nchunks > N:
+        raise ValueError(
+            r"Number of segments nchunks={0} greater than the dimension N={1}".format(
+                nchunks, N
+            )
+        )
+
+    return [np.s_[k:N:nchunks] for k in range(nchunks)]
+
+
+def local_split_range_interleaved(nchunks, N, index):
+    r"""Tessellates :math:`\{ 0, \dotsc , N-1 \}` into interleaved
+    subsets.
+
+    Tessellates :math:`\{ 0, \dotsc , N-1 \}` into subsets of
+    interleaved indices, each containing approximately the same number of
+    indices (downsampling of :math:`\{ 0, \dotsc , N-1 \}`).
+
+    Parameters
+    ----------
+    nchunks : int
+        Total number of segments.
+    N : int
+        Total number of indices.
+    index : int
+        Index identifying the chunk considered.
+
+    Returns
+    -------
+    slice
+        Slice to extract the indices corresponding to the corresponding
+        segment.
+
+    Raises
+    ------
+    ValueError
+        Error if the index is greater than ``nchunks-1``.
+    ValueError
+        Error if the overlap is greater than the size of a segment.
+    """
+
+    if nchunks <= index:
+        raise ValueError(
+            r"Index should be taken in [0, ..., nchunks-1], with nchunks={0}".format(
+                nchunks
+            )
+        )
+    if nchunks > N:
+        raise ValueError(
+            r"Number of segments nchunks={0} greater than the dimension N={1}".format(
+                nchunks, N
+            )
+        )
+
+    return np.s_[index:N:nchunks]
+
+
+def get_neighbour(ranknd, grid_size, disp):
+    """1D rank of a neighbour of the current MPI process.
+
+    Returns the 1D rank of the neighbour of the current MPI process,
+    corresponding to a pre-defined displacement vector `disp` in the nD
+    Cartesian grid.
+
+    Parameters
+    ----------
+    ranknd : numpy.ndarray[int]
+        nD rank of the current process
+    grid_size : numpy.ndarray[int]
+        Size of the Cartesian process grid (number of processes along each
+        dimension)
+    disp : numpy.ndarray[int]
+        Displacement vector to obtain the rank of a neighbour process.
+
+    Returns
+    -------
+    int
+        1D rank of the neighbour process
+    """
+
+    return np.ravel_multi_index(ranknd + disp, grid_size)
+
+
+# TODO: check changes for circular convolutions
+def slice_valid_coefficients(ranknd, grid_size, overlap_size):
+    r"""Helper elements to extract the valid local convolution coefficients.
+
+    Returns slice to select the valid local convolution coefficients, with the
+    necessary padding parameters to implement the adjoint (zero-padding)
+    operator.
+
+    Parameters
+    ----------
+    ranknd : numpy.ndarray[int]
+        Rank of the process in a Cartesian nD grid of MPI processes.
+    grid_size : numpy.ndarray[int]
+        Size of the MPI process grid.
+    overlap_size : numpy.ndarray[int]
+        Overlap between contiguous facets along each dimension.
+
+    Returns
+    -------
+    valid_coefficients : tuple[slice]
+        Slice to extract valid coefficients from the local convolutions.
+
+    Raises
+    ------
+    AssertionError
+        `ranknd`, `grid_size` and `overlap_size` must all have the save shape.
+    """
+
+    ndims = ranknd.size
+
+    if not (grid_size.size == ndims and overlap_size.size == ndims):
+        raise AssertionError(
+            r"`ranknd`, `grid_size` and `overlap_size` must have the save \
+                shape"
+        )
+
+    L = ndims * [None]
+    R = ndims * [None]
+
+    for d in range(ndims):
+        if grid_size[d] > 1 and overlap_size[d] > 0:
+            if ranknd[d] > 0 and ranknd[d] < grid_size[d] - 1:
+                L[d] = overlap_size[d]
+                R[d] = -overlap_size[d]
+            elif ranknd[d] == grid_size[d] - 1:
+                L[d] = overlap_size[d]
+                R[d] = None
+            else:
+                L[d] = 0
+                R[d] = -overlap_size[d]
+        else:
+            L[d] = 0
+            R[d] = None
+
+    valid_coefficients = tuple([np.s_[L[d] : R[d]] for d in range(ndims)])
+
+    return valid_coefficients
+
+
+def get_local_slice(ranknd, grid_size, overlap_size, backward=True):
+    r"""Slice to extract the pixels specific to a given worker.
+
+    Get the slice corresponding to the elements exclusively handled by the
+    current process (i.e., remove the overlap from overlapping facets).
+
+    Parameters
+    ----------
+    ranknd : numpy.ndarray[int]
+        Rank of the current process in the nD Cartesian grid of MPI processes.
+    grid_size : numpy.ndarray[int]
+        Size of the process grid
+    overlap_size : numpy.ndarray[int]
+        Size of the overlap between contiguous facets.
+    backward : bool, optional
+        Orientation of the overlap along the dimensions, by default True.
+
+    Returns
+    -------
+    tuple[slice]
+        Slice to extract the coefficients specifically handled by the current
+        process.
+
+    Raises
+    ------
+    AssertionError
+        `ranknd`, `grid_size` and `overlap_size` must all have the save shape.
+    """
+
+    ndims = ranknd.size
+
+    if not (grid_size.size == ndims and overlap_size.size == ndims):
+        raise AssertionError(
+            r"`ranknd`, `grid_size` and `overlap_size` must have the save \
+                shape"
+        )
+
+    local_slice = ndims * [np.s_[:]]
+    isvalid_splitting = np.logical_and(grid_size > 1, overlap_size > 0)
+
+    if backward:
+        for d in range(ndims):
+            if ranknd[d] > 0 and isvalid_splitting[d]:
+                local_slice[d] = np.s_[overlap_size[d] :]
+    else:
+        for d in range(ndims):
+            if ranknd[d] < grid_size[d] - 1 and isvalid_splitting[d]:
+                local_slice[d] = np.s_[: -overlap_size[d]]
+
+    return tuple(local_slice)
+
+
+def get_local_slice_border(ranknd, grid_size, overlap_size, border_size, backward=True):
+    r"""Slice to extract the pixels specific to a given worker.
+
+    Get the slice corresponding to the elements exclusively handled by the
+    current process (i.e., remove the overlap from overlapping facets).
+
+    Parameters
+    ----------
+    ranknd : numpy.ndarray[int]
+        Rank of the current process in the nD Cartesian grid of MPI processes.
+    grid_size : numpy.ndarray[int]
+        Size of the process grid
+    overlap_size : numpy.ndarray[int]
+        Size of the overlap between contiguous facets.
+    border_size : numpy.ndarray[int]
+        Size of the border added around the overall image (boundary extension).
+    backward : bool, optional
+        Orientation of the overlap along the dimensions, by default True.
+
+    Returns
+    -------
+    tuple[slice]
+        Slice to extract the coefficients specifically handled by the current
+        process.
+
+    Raises
+    ------
+    AssertionError
+        `ranknd`, `grid_size` and `overlap_size` must all have the save shape.
+    """
+
+    ndims = ranknd.size
+
+    if not (grid_size.size == ndims and overlap_size.size == ndims):
+        raise AssertionError(
+            r"`ranknd`, `grid_size` and `overlap_size` must have the save \
+                shape"
+        )
+
+    local_slice = ndims * [np.s_[:]]
+    isvalid_splitting = np.logical_and(grid_size > 1, border_size > 0)
+
+    # ! need to adjust offest due to boundary extension
+    if backward:
+        for d in range(ndims):
+            if isvalid_splitting[d]:
+                if ranknd[d] == 0:
+                    local_slice[d] = np.s_[border_size[d] :]
+                elif ranknd[d] < grid_size[d] - 1:
+                    local_slice[d] = np.s_[overlap_size[d] :]
+                else:  # ranknd[d] == grid_size[d] - 1
+                    local_slice[d] = np.s_[overlap_size[d] : -border_size[d]]
+    else:
+        for d in range(ndims):
+            if isvalid_splitting[d]:
+                if ranknd[d] == grid_size[d] - 1:
+                    local_slice[d] = np.s_[: -border_size[d]]
+                elif ranknd[d] > 0:
+                    local_slice[d] = np.s_[: -overlap_size[d]]
+                else:  # ranknd[d] == 0
+                    local_slice[d] = np.s_[border_size[d] : -overlap_size[d]]
+
+    return tuple(local_slice)
+
+
+# def get_border_slice(ranknd, grid_size, overlap_size, backward=True):
+#     r"""Slice to extract the border pixels on a given worker.
+
+#     Get the slice corresponding to the elements on the borders of the current process (i.e., retain only the overlap area from the facets).
+
+#     Parameters
+#     ----------
+#     ranknd : numpy.ndarray[int]
+#         Rank of the current process in the nD Cartesian grid of MPI processes.
+#     grid_size : numpy.ndarray[int]
+#         Size of the process grid
+#     overlap_size : numpy.ndarray[int]
+#         Size of the overlap between contiguous facets.
+#     backward : bool, optional
+#         Orientation of the overlap along the dimensions, by default True.
+
+#     Returns
+#     -------
+#     tuple[slice]
+#         Slice to extract the coefficients overlapping with neighbour processes.
+
+#     Raises
+#     ------
+#     AssertionError
+#         `ranknd`, `grid_size` and `overlap_size` must all have the save shape.
+#     """
+
+#     ndims = ranknd.size
+
+#     if not (grid_size.size == ndims and overlap_size.size == ndims):
+#         raise AssertionError(
+#             r"`ranknd`, `grid_size` and `overlap_size` must have the save \
+#                 shape"
+#         )
+
+#     border_slice = ndims * [np.s_[:]]
+#     isvalid_splitting = np.logical_and(grid_size > 1, overlap_size > 0)
+
+#     if backward:
+#         for d in range(ndims):
+#             if ranknd[d] > 0 and isvalid_splitting[d]:
+#                 border_slice[d] = np.s_[: overlap_size[d]]
+#     else:
+#         for d in range(ndims):
+#             if ranknd[d] < grid_size[d] - 1 and isvalid_splitting[d]:
+#                 border_slice[d] = np.s_[-overlap_size[d] :]
+
+#     return tuple(border_slice)
+
+
+def isvalid_communication(ranknd, grid_size, overlap_size, N, backward=True):
+    r"""Check which of the possible communications, along each axis or
+    combination of axes ("diagonal"), are valid.
+
+    This function checks which communications (including "diagonal"
+    communications, i.e., along a combination of multiple axes) are valid, and
+    returns the rank the source (`src`) process, destination process (`dest`),
+    and the number of pixels.
+
+    Parameters
+    ----------
+    ranknd : numpy.ndarray[int]
+        Rank of the current process in the nD Cartesian grid of MPI processes.
+    grid_size : numpy.ndarray[int]
+        Size of the process grid.
+    overlap_size : numpy.ndarray[int]
+        Size of the overlap between contiguous facets.
+    N : numpy.ndarray[int]
+        Total number of pixels along each direction.
+    backward : bool, optional
+        Orientation of the overlap along the dimensions, by default True.
+
+    Returns
+    -------
+    dest : numpy.ndarray[int]
+        Rank of the destination process (-1 if not valid).
+    src : numpy.ndarray[int]
+        Rank of the source process (-1 if not valid).
+    isvalid_dest : array[bool]
+        List of valid communications (send).
+    isvalid_src : array[bool]
+        List of valid communications (receive).
+    sizes_dest : numpy.ndarray[int]
+        Number of pixels (along each axis) to be sent for each communication.
+        Shape: ``(ndims * (ndims - 1) + 1, ndims)``.
+    sizes_src : numpy.ndarray[int]
+        Number of pixels (along each axis) to be received for each
+        communication.
+        Shape: ``(ndims * (ndims - 1) + 1, ndims)``..
+    start_src : numpy.ndarray[int]
+        Coordinates of the starting point needed to extract pixels to be
+        communicated. Shape: ``(ndims * (ndims - 1) + 1, ndims)``.
+    """
+
+    # Example: order of communications in 3D, [x, y, z]
+    # [x, y, z, xy, yz, zx, xyz]
+
+    # TODO: add definition of the subtypes to send / receive the necessary data
+    # start, size, types, ...
+    # TODO: add more comments
+
+    ndims = ranknd.size
+    isvalid_splitting = np.logical_and(overlap_size > 0, grid_size > 1)
+
+    if backward:
+        isvalid_rank_dest = ranknd < grid_size - 1
+        isvalid_rank_src = ranknd > 0
+        dest_value = 1
+    else:
+        isvalid_rank_dest = ranknd > 0
+        isvalid_rank_src = ranknd < grid_size - 1
+        dest_value = -1
+
+    isvalid_dest = np.full(ndims * (ndims - 1) + 1, False, dtype="bool")
+    isvalid_src = np.full(ndims * (ndims - 1) + 1, False, dtype="bool")
+    src = (ndims * (ndims - 1) + 1) * [MPI.PROC_NULL]
+    dest = (ndims * (ndims - 1) + 1) * [MPI.PROC_NULL]
+    sizes_dest = np.zeros((ndims * (ndims - 1) + 1, ndims), dtype="i")
+    sizes_src = np.zeros((ndims * (ndims - 1) + 1, ndims), dtype="i")
+    start_src = np.zeros((ndims * (ndims - 1) + 1, ndims), dtype="i")
+    disp = np.zeros(ndims, dtype="i")
+
+    overlap_size[np.logical_not(isvalid_rank_src)] = 0
+    Ns = N - overlap_size
+
+    for k in range(1, ndims):
+        sel_id = deque(k * [True] + (ndims - k) * [False])
+        nsel_id = deque(k * [False] + (ndims - k) * [True])
+        for d in range(ndims):
+            c0 = np.all(isvalid_splitting[sel_id])
+            s = (k - 1) * ndims + d
+
+            if c0 and np.all(isvalid_rank_dest[sel_id]):
+                isvalid_dest[s] = True
+                sizes_dest[s, sel_id] = overlap_size[sel_id]
+                sizes_dest[s, nsel_id] = Ns[nsel_id]
+                disp[sel_id] = dest_value
+                dest[s] = get_neighbour(ranknd, grid_size, disp)
+            if c0 and np.all(isvalid_rank_src[sel_id]):
+                isvalid_src[s] = True
+                sizes_src[s, sel_id] = overlap_size[sel_id]
+                sizes_src[s, nsel_id] = Ns[nsel_id]
+                disp[sel_id] = -dest_value
+                src[s] = get_neighbour(ranknd, grid_size, disp)
+                start_src[s, np.logical_not(sel_id)] = overlap_size[
+                    np.logical_not(sel_id)
+                ]
+
+            disp[sel_id] = 0
+            sel_id.rotate(1)
+            nsel_id.rotate(1)
+
+    # all dimensions active
+    isvalid_src[-1] = np.all(isvalid_splitting) and np.all(isvalid_rank_src)
+    isvalid_dest[-1] = np.all(isvalid_splitting) and np.all(isvalid_rank_dest)
+    if isvalid_src[-1]:
+        src[-1] = get_neighbour(
+            ranknd, grid_size, np.full(ndims, -dest_value, dtype="i")
+        )
+        sizes_src[-1, :] = overlap_size
+
+    if isvalid_dest[-1]:
+        dest[-1] = get_neighbour(
+            ranknd, grid_size, np.full(ndims, dest_value, dtype="i")
+        )
+        sizes_dest[-1, :] = overlap_size
+
+    return (
+        dest,
+        src,
+        isvalid_dest,
+        isvalid_src,
+        sizes_dest,
+        sizes_src,
+        start_src,
+    )
+
+
+def setup_border_update(
+    cartcomm, ndims, itemsize, facet_size, overlap_size, backward=True
+):
+    r"""Source, destination types and ranks to update facet borders (for
+    `double` format data).
+
+    Set-up destination and source data types and process ranks to communicate
+    facet borders within an nD Cartesian communicator. Diagonal communications
+    (involving more than a single dimension) are not separated from the other
+    communications.
+
+    Parameters
+    ----------
+    cartcomm : mpi4py.MPI.Cartcomm
+        Cartesian topology intracommunicator.
+    ndims : int
+        Number of dimensions of the Cartesian grid.
+    itemsize : int
+        Size in bytes of an item from the array to be sent.
+    facet_size : numpy.ndarray[int]
+        Size of the overlapping facets.
+    overlap_size : numpy.ndarray[int]
+        Overlap size between contiguous facets.
+    backward : bool, optional
+        Direction of the overlap along each axis, by default True.
+
+    Returns
+    -------
+    dest : list[int]
+        List of process ranks to which the current process sends data.
+    src : list[int]
+        List of process ranks from which the current process receives data.
+    resizedsendsubarray : list[MPI subarray]
+        Custom MPI subarray type describing the data array sent by the current
+        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_).
+    resizedrecvsubarray : list[MPI subarray]
+        Custom MPI subarray type describing the data array sent by the current
+        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_).
+
+    Note
+    ----
+    Function appropriate only for subarrays of type float (``numpy.float64``).
+    Will trigger a segfault error otherwise.
+    """
+
+    # * defining custom types to communicate non-contiguous arrays in the
+    # directions considered
+    sendsubarray = []
+    recvsubarray = []
+    resizedsendsubarray = []
+    resizedrecvsubarray = []
+
+    sizes = facet_size  # size of local array
+    sM = sizes - overlap_size
+
+    # * rank of processes involved in the communications
+    src = ndims * [MPI.PROC_NULL]
+    dest = ndims * [MPI.PROC_NULL]
+
+    # * comm. along each dimension
+    if backward:
+        for k in range(ndims):
+            if overlap_size[k] > 0:
+
+                # ! if there is no overlap along a dimension, make sure there
+                # ! is no communication
+                [src[k], dest[k]] = cartcomm.Shift(k, 1)
+
+                # send buffer
+                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                starts = np.r_[
+                    np.zeros(k, dtype="d"),
+                    sM[k],
+                    np.zeros(ndims - k - 1, dtype="d"),
+                ]
+                sendsubarray.append(
+                    MPI.DOUBLE.Create_subarray(
+                        sizes, subsizes, starts, order=MPI.ORDER_C
+                    )
+                )
+                resizedsendsubarray.append(
+                    sendsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )  # ! see if extent is still fine for more than 2 dimensions
+                resizedsendsubarray[-1].Commit()
+
+                # recv buffer
+                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                starts = np.zeros(ndims, dtype="d")
+                recvsubarray.append(
+                    MPI.DOUBLE.Create_subarray(
+                        sizes, subsizes, starts, order=MPI.ORDER_C
+                    )
+                )
+                resizedrecvsubarray.append(
+                    recvsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )
+                resizedrecvsubarray[-1].Commit()
+            else:
+                resizedsendsubarray.append(None)
+                resizedrecvsubarray.append(None)
+    else:
+        for k in range(ndims):
+            if overlap_size[k] > 0:
+
+                # ! if there is no overlap along a dimension, make sure there
+                # ! is no communication
+                [src[k], dest[k]] = cartcomm.Shift(k, -1)
+
+                # recv buffer
+                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                starts = np.r_[
+                    np.zeros(k, dtype="d"),
+                    sM[k],
+                    np.zeros(ndims - k - 1, dtype="d"),
+                ]
+                recvsubarray.append(
+                    MPI.DOUBLE.Create_subarray(
+                        sizes, subsizes, starts, order=MPI.ORDER_C
+                    )
+                )
+                resizedrecvsubarray.append(
+                    recvsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )  # ! see if extent is still fine for more than 2 dimensions
+                resizedrecvsubarray[-1].Commit()
+
+                # send buffer
+                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                starts = np.zeros(ndims, dtype="d")
+                sendsubarray.append(
+                    MPI.DOUBLE.Create_subarray(
+                        sizes, subsizes, starts, order=MPI.ORDER_C
+                    )
+                )
+                resizedsendsubarray.append(
+                    sendsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )
+                resizedsendsubarray[-1].Commit()
+            else:
+                resizedsendsubarray.append(None)
+                resizedrecvsubarray.append(None)
+
+    return dest, src, resizedsendsubarray, resizedrecvsubarray
+
+
+def setup_border_circular_update(
+    cartcomm, ndims, itemsize, facet_size, overlap_size, ranknd, backward=True
+):
+    r"""Source, destination types and ranks to update facet borders (for
+    `double` format data).
+
+    Set-up destination and source data types and process ranks to communicate
+    facet borders within an nD Cartesian communicator. Diagonal communications
+    (involving more than a single dimension) are not separated from the other
+    communications.
+
+    Parameters
+    ----------
+    cartcomm : mpi4py.MPI.Cartcomm
+        Cartesian topology intracommunicator.
+    ndims : int
+        Number of dimensions of the Cartesian grid.
+    itemsize : int
+        Size in bytes of an item from the array to be sent.
+    facet_size : numpy.ndarray[int]
+        Size of the overlapping facets.
+    overlap_size : numpy.ndarray[int]
+        Overlap size between contiguous facets.
+    backward : bool, optional
+        Direction of the overlap along each axis, by default True.
+
+    Returns
+    -------
+    dest : list[int]
+        List of process ranks to which the current process sends data.
+    src : list[int]
+        List of process ranks from which the current process receives data.
+    resizedsendsubarray : list[MPI subarray]
+        Custom MPI subarray type describing the data array sent by the current
+        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_).
+    resizedrecvsubarray : list[MPI subarray]
+        Custom MPI subarray type describing the data array sent by the current
+        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_).
+
+    Note
+    ----
+    Function appropriate only for subarrays of type float (``numpy.float64``).
+    Will trigger a segfault error otherwise.
+    """
+
+    # * defining custom types to communicate non-contiguous arrays in the
+    # directions considered
+    sendsubarray = []
+    recvsubarray = []
+    resizedsendsubarray = []
+    resizedrecvsubarray = []
+
+    border_sendsubarray = []
+    border_recvsubarray = []
+    border_resizedsendsubarray = []
+    border_resizedrecvsubarray = []
+
+    sizes = facet_size  # size of local array
+    sM = sizes - overlap_size
+
+    # * rank of processes involved in the communications
+    src = ndims * [MPI.PROC_NULL]
+    dest = ndims * [MPI.PROC_NULL]
+
+    # * comm. along each dimension
+    # TODO: backward option to be adapted to the setting considered
+    if backward:
+        for k in range(ndims):
+            if overlap_size[k] > 0:
+
+                # ! if there is no overlap along a dimension, make sure there
+                # ! is no communication
+                [src[k], dest[k]] = cartcomm.Shift(k, 1)
+
+                # send buffer
+                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                # ! change send area for first worker along the dimension (will
+                # ! communicate with the last worker for border exchange)
+                if ranknd[k] == cartcomm.dims[k] - 1:
+                    starts = np.r_[
+                        np.zeros(k, dtype="d"),
+                        sM[k] - overlap_size[k],
+                        np.zeros(ndims - k - 1, dtype="d"),
+                    ]
+                else:
+                    starts = np.r_[
+                        np.zeros(k, dtype="d"),
+                        sM[k],
+                        np.zeros(ndims - k - 1, dtype="d"),
+                    ]
+                sendsubarray.append(
+                    MPI.DOUBLE.Create_subarray(
+                        sizes, subsizes, starts, order=MPI.ORDER_C
+                    )
+                )
+                resizedsendsubarray.append(
+                    sendsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )  # ! see if extent is still fine for more than 2 dimensions
+                resizedsendsubarray[-1].Commit()
+
+                # recv buffer
+                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                starts = np.zeros(ndims, dtype="d")
+                recvsubarray.append(
+                    MPI.DOUBLE.Create_subarray(
+                        sizes, subsizes, starts, order=MPI.ORDER_C
+                    )
+                )
+                resizedrecvsubarray.append(
+                    recvsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )
+                resizedrecvsubarray[-1].Commit()
+
+                # add one more communication between first and last worker
+                # (along each dimension) for border exchange (last receives
+                # from last)
+                if ranknd[k] == cartcomm.dims[k] - 1:
+                    # recv
+                    subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                    starts = np.r_[
+                        np.zeros(k, dtype="d"),
+                        sM[k],
+                        np.zeros(ndims - k - 1, dtype="d"),
+                    ]
+                    border_recvsubarray.append(
+                        MPI.DOUBLE.Create_subarray(
+                            sizes, subsizes, starts, order=MPI.ORDER_C
+                        )
+                    )
+                    border_resizedrecvsubarray.append(
+                        border_recvsubarray[-1].Create_resized(
+                            0, overlap_size[k] * itemsize
+                        )
+                    )
+                    border_resizedrecvsubarray[-1].Commit()
+
+                    # send
+                    border_resizedsendsubarray.append(None)
+
+                elif ranknd[k] == 0:
+
+                    # recv
+                    border_resizedrecvsubarray.append(None)
+
+                    # send
+                    subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                    starts = np.r_[
+                        np.zeros(k, dtype="d"),
+                        overlap_size[k],
+                        np.zeros(ndims - k - 1, dtype="d"),
+                    ]
+                    border_sendsubarray.append(
+                        MPI.DOUBLE.Create_subarray(
+                            sizes, subsizes, starts, order=MPI.ORDER_C
+                        )
+                    )
+                    border_resizedsendsubarray.append(
+                        border_sendsubarray[-1].Create_resized(
+                            0, overlap_size[k] * itemsize
+                        )
+                    )
+                    border_resizedsendsubarray[-1].Commit()
+            else:
+                resizedsendsubarray.append(None)
+                resizedrecvsubarray.append(None)
+    else:
+        for k in range(ndims):
+            if overlap_size[k] > 0:
+
+                # ! if there is no overlap along a dimension, make sure there
+                # ! is no communication
+                [src[k], dest[k]] = cartcomm.Shift(k, -1)
+
+                # recv buffer
+                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                starts = np.r_[
+                    np.zeros(k, dtype="d"),
+                    sM[k],
+                    np.zeros(ndims - k - 1, dtype="d"),
+                ]
+                recvsubarray.append(
+                    MPI.DOUBLE.Create_subarray(
+                        sizes, subsizes, starts, order=MPI.ORDER_C
+                    )
+                )
+                resizedrecvsubarray.append(
+                    recvsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )  # ! see if extent is still fine for more than 2 dimensions
+                resizedrecvsubarray[-1].Commit()
+
+                # send buffer
+                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                # ! change send area for first worker along the dimension (will
+                # ! communicate with the last worker for border exchange)
+                if ranknd[k] == 0:
+                    starts = np.r_[
+                        np.zeros(k, dtype="d"),
+                        overlap_size[k],
+                        np.zeros(ndims - k - 1, dtype="d"),
+                    ]
+                else:
+                    starts = np.zeros(ndims, dtype="d")
+                sendsubarray.append(
+                    MPI.DOUBLE.Create_subarray(
+                        sizes, subsizes, starts, order=MPI.ORDER_C
+                    )
+                )
+                resizedsendsubarray.append(
+                    sendsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )
+                resizedsendsubarray[-1].Commit()
+
+                # add one more communication between first and last worker
+                # (along each dimension) for border exchange (first receives
+                # from last)
+                if ranknd[k] == 0:
+                    # recv
+                    subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                    starts = np.zeros(ndims, dtype="d")
+                    border_recvsubarray.append(
+                        MPI.DOUBLE.Create_subarray(
+                            sizes, subsizes, starts, order=MPI.ORDER_C
+                        )
+                    )
+                    border_resizedrecvsubarray.append(
+                        border_recvsubarray[-1].Create_resized(
+                            0, overlap_size[k] * itemsize
+                        )
+                    )
+                    border_resizedrecvsubarray[-1].Commit()
+
+                    # send
+                    border_resizedsendsubarray.append(None)
+
+                elif ranknd[k] == cartcomm.dims[k] - 1:
+
+                    # recv
+                    border_resizedrecvsubarray.append(None)
+
+                    # send
+                    subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                    starts = np.r_[
+                        np.zeros(k, dtype="d"),
+                        sM[k] - overlap_size[k],
+                        np.zeros(ndims - k - 1, dtype="d"),
+                    ]
+                    border_sendsubarray.append(
+                        MPI.DOUBLE.Create_subarray(
+                            sizes, subsizes, starts, order=MPI.ORDER_C
+                        )
+                    )
+                    border_resizedsendsubarray.append(
+                        border_sendsubarray[-1].Create_resized(
+                            0, overlap_size[k] * itemsize
+                        )
+                    )
+                    border_resizedsendsubarray[-1].Commit()
+                else:
+                    # find a way not to receive sth !!
+                    border_resizedsendsubarray.append(None)
+                    border_resizedrecvsubarray.append(None)
+
+            else:
+                resizedsendsubarray.append(None)
+                resizedrecvsubarray.append(None)
+                border_resizedsendsubarray.append(None)
+                border_resizedrecvsubarray.append(None)
+
+    return (
+        dest,
+        src,
+        resizedsendsubarray,
+        resizedrecvsubarray,
+        border_resizedsendsubarray,
+        border_resizedrecvsubarray,
+    )
+
+
+def setup_border_update_int(
+    cartcomm, ndims, itemsize, facet_size, overlap_size, backward=True
+):
+    r"""Source and destination types and ranks to update facet borders (for
+    `int` format data).
+
+    Set-up destination and source data types and process ranks to communicate
+    facet borders within an nD Cartesian communicator. Diagonal communications
+    (involving more than a single dimension) are not separated from the other
+    communications.
+
+    Parameters
+    ----------
+    cartcomm : mpi4py.MPI.Cartcomm
+        Cartesian topology intracommunicator.
+    ndims : int
+        Number of dimensions of the Cartesian grid.
+    itemsize : int
+        Size in bytes of an item from the array to be sent.
+    facet_size : numpy.ndarray[int]
+        Size of the overlapping facets.
+    overlap_size : numpy.ndarray[int]
+        Overlap size between contiguous facets.
+    backward : bool, optional
+        Direction of the overlap along each axis, by default True.
+
+    Returns
+    -------
+    dest : list[int]
+        List of process ranks to which the current process sends data.
+    src : list[int]
+        List of process ranks from which the current process receives data.
+    resizedsendsubarray : list[MPI subarray]
+        Custom MPI subarray type describing the data array sent by the current
+        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_).
+    resizedrecvsubarray : list[MPI subarray]
+        Custom MPI subarray type describing the data array sent by the current
+        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_).
+
+    Note
+    ----
+    Function appropriate only for subarrays of type int (``numpy.int32``).
+    Will trigger a segfault error otherwise.
+    """
+
+    # * defining custom types to communicate non-contiguous arrays
+    sendsubarray = []
+    recvsubarray = []
+    resizedsendsubarray = []
+    resizedrecvsubarray = []
+
+    sizes = facet_size  # size of local array
+    sM = sizes - overlap_size
+
+    # * rank of processes involved in the communications
+    src = ndims * [MPI.PROC_NULL]
+    dest = ndims * [MPI.PROC_NULL]
+
+    # * comm. along each dimension
+    if backward:
+        for k in range(ndims):
+            if overlap_size[k] > 0:
+
+                # ! if there is no overlap along a dimension, make sure there is no
+                # ! communication
+                [src[k], dest[k]] = cartcomm.Shift(k, 1)
+
+                # send buffer
+                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                starts = np.r_[
+                    np.zeros(k, dtype="i"),
+                    sM[k],
+                    np.zeros(ndims - k - 1, dtype="i"),
+                ]
+                sendsubarray.append(
+                    MPI.INT.Create_subarray(sizes, subsizes, starts, order=MPI.ORDER_C)
+                )
+                resizedsendsubarray.append(
+                    sendsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )  # ! see if extent is still fine for more than 2 dimensions
+                resizedsendsubarray[-1].Commit()
+
+                # recv buffer
+                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                starts = np.zeros(ndims, dtype="i")
+                recvsubarray.append(
+                    MPI.INT.Create_subarray(sizes, subsizes, starts, order=MPI.ORDER_C)
+                )
+                resizedrecvsubarray.append(
+                    recvsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )
+                resizedrecvsubarray[-1].Commit()
+            else:
+                resizedsendsubarray.append(None)
+                resizedrecvsubarray.append(None)
+    else:
+        for k in range(ndims):
+            if overlap_size[k] > 0:
+
+                # ! if there is no overlap along a dimension, make sure there
+                # ! is no communication
+                [src[k], dest[k]] = cartcomm.Shift(k, -1)
+
+                # recv buffer
+                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                starts = np.r_[
+                    np.zeros(k, dtype="d"),
+                    sM[k],
+                    np.zeros(ndims - k - 1, dtype="d"),
+                ]
+                recvsubarray.append(
+                    MPI.INT.Create_subarray(sizes, subsizes, starts, order=MPI.ORDER_C)
+                )
+                resizedrecvsubarray.append(
+                    recvsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )  # ! see if extent is still fine for more than 2 dimensions
+                resizedrecvsubarray[-1].Commit()
+
+                # send buffer
+                subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+                starts = np.zeros(ndims, dtype="d")
+                sendsubarray.append(
+                    MPI.INT.Create_subarray(sizes, subsizes, starts, order=MPI.ORDER_C)
+                )
+                resizedsendsubarray.append(
+                    sendsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )
+                resizedsendsubarray[-1].Commit()
+            else:
+                resizedsendsubarray.append(None)
+                resizedrecvsubarray.append(None)
+
+    return dest, src, resizedsendsubarray, resizedrecvsubarray
+
+
+def setup_border_update_tv(
+    cartcomm, ndims, itemsize, facet_size, overlap_size, backward=True
+):
+    r"""Source, destination types and ranks to update facet borders for the
+    distributed gradient operator (for `double` format data).
+
+    Set-up destination and source data types and process ranks to communicate
+    facet borders within an nD Cartesian communicator. Diagonal communications
+    (involving more than a single dimension) are not separated from the other
+    communications.
+
+    Parameters
+    ----------
+    cartcomm : mpi4py.MPI.Cartcomm
+        Cartesian topology intracommunicator.
+    ndims : int
+        Number of dimensions of the Cartesian grid.
+    itemsize : int
+        Size in bytes of an item from the array to be sent.
+    facet_size : numpy.ndarray[int]
+        Size of the overlapping facets.
+    overlap_size : numpy.ndarray[int]
+        Overlap size between contiguous facets.
+    backward : bool, optional
+        Direction of the overlap along each axis, by default True.
+
+    Returns
+    -------
+    dest : list[int]
+        List of process ranks to which the current process sends data.
+    src : list[int]
+        List of process ranks from which the current process receives data.
+    resizedsendsubarray : list[MPI subarray]
+        Custom MPI subarray type describing the data array sent by the current
+        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_).
+    resizedrecvsubarray : list[MPI subarray]
+        Custom MPI subarray type describing the data array sent by the current
+        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_).
+    """
+    # TODO
+    # ! try to generalize to merge with setup_border_update
+    # ! idea: some dimensions are not affected by the distribution, need to
+    # ! specify which are those
+    # ! some elements are hard-coded for now
+
+    # * defining custom types to communicate non-contiguous arrays in the
+    # directions considered
+    sendsubarray = []
+    recvsubarray = []
+    resizedsendsubarray = []
+    resizedrecvsubarray = []
+
+    sizes = np.empty(ndims + 1, dtype="i")
+    sizes[0] = 2
+    sizes[1:] = facet_size  # size of local array
+    sM = facet_size - overlap_size
+
+    # * rank of processes involved in the communications
+    src = ndims * [MPI.PROC_NULL]
+    dest = ndims * [MPI.PROC_NULL]
+
+    # * comm. along each dimension
+    if backward:
+        for k in range(ndims):
+            if overlap_size[k] > 0:
+
+                # ! if there is no overlap along a dimension, make sure there
+                # ! is no communication
+                [src[k], dest[k]] = cartcomm.Shift(k, 1)
+
+                # send buffer
+                subsizes = np.r_[
+                    2, facet_size[:k], overlap_size[k], facet_size[k + 1 :]
+                ]
+                starts = np.r_[
+                    0,
+                    np.zeros(k, dtype="i"),
+                    sM[k],
+                    np.zeros(ndims - k - 1, dtype="i"),
+                ]
+                sendsubarray.append(
+                    MPI.DOUBLE.Create_subarray(
+                        sizes, subsizes, starts, order=MPI.ORDER_C
+                    )
+                )
+                resizedsendsubarray.append(
+                    sendsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )  # ! see if extent is still fine for more than 2 dimensions
+                resizedsendsubarray[-1].Commit()
+
+                # recv buffer
+                subsizes = np.r_[
+                    2, facet_size[:k], overlap_size[k], facet_size[k + 1 :]
+                ]
+                starts = np.zeros(ndims + 1, dtype="i")
+                recvsubarray.append(
+                    MPI.DOUBLE.Create_subarray(
+                        sizes, subsizes, starts, order=MPI.ORDER_C
+                    )
+                )
+                resizedrecvsubarray.append(
+                    recvsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )
+                resizedrecvsubarray[-1].Commit()
+            else:
+                resizedsendsubarray.append(None)
+                resizedrecvsubarray.append(None)
+    else:
+        for k in range(ndims):
+            if overlap_size[k] > 0:
+
+                # ! if there is no overlap along a dimension, make sure there
+                # ! is no communication
+                [src[k], dest[k]] = cartcomm.Shift(k, -1)
+
+                # recv buffer
+                subsizes = np.r_[
+                    2, facet_size[:k], overlap_size[k], facet_size[k + 1 :]
+                ]
+                starts = np.r_[
+                    0,
+                    np.zeros(k, dtype="i"),
+                    sM[k],
+                    np.zeros(ndims - k - 1, dtype="i"),
+                ]
+                recvsubarray.append(
+                    MPI.DOUBLE.Create_subarray(
+                        sizes, subsizes, starts, order=MPI.ORDER_C
+                    )
+                )
+                resizedrecvsubarray.append(
+                    recvsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )  # ! see if extent is still fine for more than 2 dimensions
+                resizedrecvsubarray[-1].Commit()
+
+                # send buffer
+                subsizes = np.r_[
+                    2, facet_size[:k], overlap_size[k], facet_size[k + 1 :]
+                ]
+                starts = np.zeros(ndims + 1, dtype="i")
+                sendsubarray.append(
+                    MPI.DOUBLE.Create_subarray(
+                        sizes, subsizes, starts, order=MPI.ORDER_C
+                    )
+                )
+                resizedsendsubarray.append(
+                    sendsubarray[-1].Create_resized(0, overlap_size[k] * itemsize)
+                )
+                resizedsendsubarray[-1].Commit()
+            else:
+                resizedsendsubarray.append(None)
+                resizedrecvsubarray.append(None)
+
+    return dest, src, resizedsendsubarray, resizedrecvsubarray
+
+
+def mpi_update_borders(
+    comm, local_array, dest, src, resizedsendsubarray, resizedrecvsubarray
+):
+    r"""Update borders of the overlapping facets.
+
+    Parameters
+    ----------
+    comm : mpi4py.MPI.Comm
+        Communicator object.
+    local_array : array
+        Local buffer, from which data data is sent to (resp. from) workers
+        whose rank is given in the list `dest` (resp. `src`).
+    dest : list[int]
+        List of process ranks to which the current process sends data.
+    src : list[int]
+        List of process ranks from which the current process receives data.
+    resizedsendsubarray : MPI subarray
+        Custom MPI subarray type describing the data sent by the current
+        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_.
+    resizedrecvsubarray : MPI subarray
+        Custom MPI subarray type describing the data received by the current
+        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_).
+
+    Note
+    ----
+    The input array ``local_array`` is updated in-place.
+    """
+
+    ndims = len(dest)
+
+    for d in range(ndims):
+        comm.Sendrecv(
+            [local_array, 1, resizedsendsubarray[d]],
+            dest[d],
+            recvbuf=[local_array, 1, resizedrecvsubarray[d]],
+            source=src[d],
+        )
+
+    return
+
+
+def mpi_circular_update_borders(
+    comm,
+    local_array,
+    dest,
+    src,
+    resizedsendsubarray,
+    resizedrecvsubarray,
+    border_resizedsendsubarray,
+    border_resizedrecvsubarray,
+    ranknd,
+    grid_size,
+    backward=True,
+):
+    r"""Update borders of the overlapping facets.
+
+    Parameters
+    ----------
+    comm : mpi4py.MPI.Comm
+        Communicator object.
+    local_array : array
+        Local buffer, from which data data is sent to (resp. from) workers
+        whose rank is given in the list `dest` (resp. `src`).
+    dest : list[int]
+        List of process ranks to which the current process sends data.
+    src : list[int]
+        List of process ranks from which the current process receives data.
+    resizedsendsubarray : MPI subarray
+        Custom MPI subarray type describing the data sent by the current
+        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_.
+    resizedrecvsubarray : MPI subarray
+        Custom MPI subarray type describing the data received by the current
+        process (see `mpi4py.MPI.Datatype.Create_subarray <https://mpi4py.github.io/usrman/reference/mpi4py.MPI.Datatype.html?highlight=create%20subarray#mpi4py.MPI.Datatype.Create_subarray>`_).
+    border_resizedsendsubarray : MPI subarray
+        Custom MPI subarray type describing the data received by the first worker to the last along each axis (border exchange).
+    border_resizedrecvsubarray : MPI subarray
+        Custom MPI subarray type describing the data sent by the last worker to the first along each axis (border exchange).
+    ranknd : numpy.ndarray of int
+        nD rank of the current process in the Cartesian process grid.
+    grid_size : numpy.ndarray of int
+        Dimension of the MPI process grid (Cartesian communicator)..
+
+    Note
+    ----
+    The input array ``local_array`` is updated in-place.
+    """
+
+    ndims = len(dest)
+    requests = []
+
+    for d in range(ndims):
+        comm.Sendrecv(
+            [local_array, 1, resizedsendsubarray[d]],
+            dest[d],
+            recvbuf=[local_array, 1, resizedrecvsubarray[d]],
+            source=src[d],
+        )
+
+        # ! additional communication (between first and last worker along each
+        # ! axis)
+        if backward:
+            if ranknd[d] == 0:
+                requests.append(
+                    comm.Isend(
+                        [local_array, 1, border_resizedsendsubarray[d]],
+                        dest[d],
+                    )
+                )
+
+            if ranknd[d] == grid_size[d] - 1:
+                requests.append(
+                    comm.Irecv(
+                        [local_array, 1, border_resizedrecvsubarray[d]],
+                        src[d],
+                    )
+                )
+        else:
+            if ranknd[d] == 0:
+                requests.append(
+                    comm.Irecv(
+                        [local_array, 1, border_resizedrecvsubarray[d]],
+                        src[d],
+                    )
+                )
+
+            if ranknd[d] == grid_size[d] - 1:
+                requests.append(
+                    comm.Isend(
+                        [local_array, 1, border_resizedsendsubarray[d]],
+                        dest[d],
+                    )
+                )
+
+    MPI.Request.Waitall(requests)
+
+    return
+
+
+if __name__ == "__main__":
+    N = 10
+    nchunks = 3
+    overlap = 1  # number of pixels in the overlap
+    index = 0
+
+    # no overlap
+    rg0 = split_range(nchunks, N)
+    # check that 2 consecutive entries are distant from 1
+    test1 = np.all(rg0[1:, 0] - rg0[:-1, 1] == 1)
+    # check size of each chunk (uniform in this case)
+    test2 = np.all(np.diff(rg0, n=1, axis=1) + 1 == 3)
+
+    # # overlap
+    # rg_overlap = split_range(nchunks, N, overlap=overlap)
+    # test1 = np.all(np.abs(rg_overlap[:-1, 1] - rg_overlap[1:, 0] + 1) == overlap)
+
+    # rgo_r = split_range(nchunks, N, overlap=overlap, backward=False)
+    # test2 = np.all(np.abs(rg_overlap[:-1, 1] - rg_overlap[1:, 0] + 1) == overlap)
+    # rgo_r2 = local_split_range_nd(np.array(2*[nchunks], dtype="i"), np.array(2*[N], dtype="i"), np.array(2*[0], dtype="i"), overlap=np.array(2*[overlap], dtype="i"), backward=False)
+
+    # # interleaved
+    # rg_interleaved = split_range_interleaved(nchunks, N)
+    # x = np.arange(N)
+
+    # # start all slices start by k
+    # test_start = np.all(
+    #     [rg_interleaved[k].start == k for k in range(len(rg_interleaved))]
+    # )
+    # test_stop = np.all(
+    #     [rg_interleaved[k].stop == N for k in range(len(rg_interleaved))]
+    # )
+    # test_step = np.all(
+    #     [rg_interleaved[k].step == nchunks for k in range(len(rg_interleaved))]
+    # )
+
+    # # checking indices for a single process
+    # rg_local = local_split_range(nchunks, N, index, overlap=overlap)
+    # print(rg_local)
+    # rg_local1 = local_split_range_nd(
+    #     np.array([nchunks]),
+    #     np.array([N]),
+    #     np.array([index]),
+    #     overlap=np.array([overlap]),
+    # )
+    # print(np.allclose(rg_local, rg_local1))
+
+    # index2 = np.array(np.unravel_index(index, 2 * [nchunks]))
+    # rg_local2 = local_split_range_nd(
+    #     np.array(2 * [nchunks]),
+    #     np.array(2 * [N]),
+    #     index2,
+    #     overlap=np.array(2 * [overlap]),
+    # )
+
+    # rg_local2_sym = local_split_range_symmetric_nd(
+    #     np.array(2 * [nchunks]),
+    #     np.array(2 * [N]),
+    #     index2,
+    #     overlap_pre=np.array(2 * [overlap]),
+    #     overlap_post=np.array(2 * [overlap]),
+    # )
+
+    # # check consistency between split_range and local version
+    # global_rg = np.concatenate(
+    #     [
+    #         local_split_range(nchunks, N, k, overlap=overlap)[None, :]
+    #         for k in range(nchunks)
+    #     ],
+    #     axis=0,
+    # )
+    # print(global_rg)
+    # err = np.allclose(global_rg, rg_overlap)
+    # print(err)
+
+    pass
diff --git a/src/aaxda/utils/communicators.py b/src/aaxda/utils/communicators.py
new file mode 100644
index 0000000000000000000000000000000000000000..85ead34b6d2515897530e14bb0cf288395c5f80b
--- /dev/null
+++ b/src/aaxda/utils/communicators.py
@@ -0,0 +1,445 @@
+"""Set of helper communicator objects to define a common communication
+interface across the different samplers considered.
+"""
+import weakref
+from abc import ABC, abstractmethod
+
+import numpy as np
+
+import aaxda.utils.communications as libcomm
+
+# Documentation Abstract Base Class (ABC): necessary since no virtual classes
+# by default in Python
+# https://docs.python.org/3/library/abc.html
+# https://stackoverflow.com/questions/4613000/difference-between-cls-and-self-in-python-classes
+# https://www.geeksforgeeks.org/class-method-vs-static-method-python/
+# Always use self for the first argument to instance methods.
+# Always use cls for the first argument to class methods.
+# class methods used as utility function, no access to the state of the object
+
+# a nice post about finalizers
+# https://docs.python.org/3.6/library/weakref.html#finalizer-objects
+
+
+# TODO: prepare AsyncCartesianCommunicator (need to communicate separately
+# along the diagonals)
+
+
+class BaseCommunicator(ABC):
+    """Base communicator, including the minimal data and methods to define the
+    communication process.
+
+    Attributes
+    ----------
+    comm : mpi4py.MPI.Comm
+        Underlying MPI communicator.
+    grid_size : numpy.ndarray of int, of size ``d``
+        Number of workers along each of the ``d`` dimensions of the
+        communicator grid.
+    itemsize : numpy.dtype.itemsize
+        Size (in bytes) of the scalar type to be handled during the
+        communications.
+    facet_size : numpy.ndarray of int, of size ``d``
+        Number of elements along each of the ``d`` dimensions of the facet
+        handled by the current process.
+    """
+
+    def __init__(
+        self,
+        comm,
+        grid_size,
+        itemsize,
+        facet_size,
+    ):
+        """BaseCommunicator constructor.
+
+        Parameters
+        ----------
+        comm : mpi4py.MPI.Comm
+            Underlying MPI communicator.
+        grid_size : list of int, of size ``d``
+            Number of workers along each of the ``d`` dimensions of the
+            communicator grid.
+        itemsize : numpy.dtype.itemsize
+            Size (in bytes) of the scalar type to be handled during the
+            communications.
+        facet_size : numpy.ndarray of int, of size ``d``
+            Number of elements along each of the ``d`` dimensions of the facet
+            handled by the current process.
+
+        Raises
+        ------
+        ValueError
+            ``grid_size`` and ``facet_size`` must contain the same number of
+            element.
+        """
+
+        # primary attributes
+        self.comm = comm
+        self.grid_size__ = grid_size
+        self.grid_size = np.array(self.grid_size__, dtype="i")
+        self.itemsize = itemsize
+        if not self.grid_size.size == facet_size.size:
+            raise ValueError(
+                "`grid_size` and `facet_size` must contain the same number of element."
+            )
+        self.facet_size = facet_size
+
+        # secondary attributes
+        self.ndims = len(grid_size)
+        self.rank = self.comm.Get_rank()
+
+    @abstractmethod
+    def update_borders(self, local_array):  # pragma: no cover
+        """Update the borders of a local array using the communication scheme
+        defined in the communicator.
+
+        Parameters
+        ----------
+        local_array : numpy.ndarray, with ``d`` dimensions
+            Local array to be updated through communications.
+
+        Returns
+        -------
+        NotImplemented
+
+        Note
+        ----
+        The method needs to be implemented in any class inheriting from
+        BaseCommunicator.
+        """
+        return NotImplemented
+
+    @abstractmethod
+    def remove(self):  # pragma: no cover
+        """Base function to clean up auxiliary quantities when the object can be
+        safely deleted.
+
+        Returns
+        -------
+        NotImplemented
+        """
+        return NotImplemented
+
+
+def free_custom_mpi_types(resizedsendsubarray, resizedrecvsubarray, isvalid_comm):
+    r"""Freeing custom MPI resized types.
+
+    Parameters
+    ----------
+    resizedsendsubarray : list of mpi4py.MPI.Datatype, of size ``d``
+        Custom MPI subarray type describing the data sent by the current
+        process, as returned by ``mpi4py.MPI.Datatype.Create_subarray``.
+    resizedrecvsubarray : list of mpi4py.MPI.Datatype, of size ``d``
+        Custom MPI subarray type describing the data received by the current
+        process, as returned by ``mpi4py.MPI.Datatype.Create_subarray``.
+    isvalid_comm : numpy.ndarray of bool, of size ``d``
+        Boolean vector indicating wether each of the ``d`` possible
+        communications are valid for the current worker (e.g., absence of
+        neighbour, ...).
+    """
+
+    ndims = isvalid_comm.size
+
+    # free custom MPI types
+    for d in range(ndims):
+        if isvalid_comm[d]:
+            resizedsendsubarray[d].Free()
+            resizedrecvsubarray[d].Free()
+
+
+class SyncCartesianCommunicator(BaseCommunicator):
+    """Cartesian communicator underlying the synchronous samplers.
+
+    Attributes
+    ----------
+    comm : mpi4py.MPI.Comm
+        Underlying MPI communicator.
+    grid_size : list of int, of size ``d``
+        Number of workers along each of the ``d`` dimensions of the
+        communicator grid.
+    itemsize : numpy.dtype.itemsize
+        Size (in bytes) of the scalar type to be handled during the
+        communications.
+    facet_size : numpy.ndarray of int, of size ``d``
+        Number of elements along each of the ``d`` dimensions of the facet
+        handled by the current process.
+    overlap_size : numpy.ndarray of int, of size ``d``
+        Size of the overlap between the array handled by two different
+        workers.
+    circular_boundaries : bool
+        Indicates whether periodic boundary conditions need to be
+        considered for the communicator grid along each axis. By default
+    direction : bool, optional
+        Direction of the overlap between facets along all the axis (True
+        for backward overlap, False for forward overlap). By default False.
+    """
+
+    def __init__(
+        self,
+        comm,
+        cartcomm,
+        grid_size,
+        itemsize,
+        facet_size,
+        overlap_size,
+        direction=False,
+    ):
+        """SyncCartesianCommunicator constructor.
+
+        Parameters
+        ----------
+        comm : mpi4py.MPI.Comm
+            Underlying MPI communicator.
+        cartcomm : mpi4py.MPI.Cartcomm
+            Underlying Cartesian MPI communicator.
+        grid_size : list of int, of size ``d``
+            Number of workers along each of the ``d`` dimensions of the
+            communicator grid.
+        itemsize : numpy.dtype.itemsize
+            Size (in bytes) of the scalar type to be handled during the
+            communications.
+        facet_size : numpy.ndarray of int, of size ``d``
+            Number of elements along each of the ``d`` dimensions of the facet
+            handled by the current process.
+        overlap_size : numpy.ndarray of int, of size ``d``
+            Size of the overlap between the array handled by two different
+            workers.
+        direction : bool, optional
+            Direction of the overlap between facets along all the axis (True
+            for backward overlap, False for forward overlap). By default False.
+
+        Raises
+        ------
+        ValueError
+            ``overlap_size`` and ``grid_size`` must contain the same number of
+            element.
+        """
+        super(SyncCartesianCommunicator, self).__init__(
+            comm,
+            grid_size,
+            itemsize,
+            facet_size,
+        )
+        if not self.grid_size.size == facet_size.size:
+            raise ValueError(
+                "`overlap_size` and `grid_size` must contain the same number of element."
+            )
+        self.overlap_size = overlap_size
+
+        # TODO: give the possibility to set the type from the interface
+        # configure communication scheme
+        (
+            self.dest,
+            self.src,
+            self.resizedsendsubarray,
+            self.resizedrecvsubarray,
+        ) = libcomm.setup_border_update(
+            cartcomm,
+            self.ndims,
+            self.itemsize,
+            self.facet_size,
+            self.overlap_size,
+            backward=direction,
+        )
+
+        # (
+        #     self.dest,
+        #     self.src,
+        #     self.resizedsendsubarray,
+        #     self.resizedrecvsubarray,
+        # ) = libcomm.setup_border_update_int(
+        #     cartcomm,
+        #     self.ndims,
+        #     self.itemsize,
+        #     self.facet_size,
+        #     self.overlap_size,
+        #     backward=direction,
+        # )
+
+        # setup finalizer
+        self.isvalid_comm = self.overlap_size > 0
+        # np.logical_and(self.overlap_size > 0, self.grid_size > 1)
+        self._finalizer = weakref.finalize(
+            self,
+            free_custom_mpi_types,
+            self.resizedsendsubarray,
+            self.resizedrecvsubarray,
+            self.isvalid_comm,
+        )
+
+    def update_borders(self, local_array):
+        """Update the borders of a local array using the communication scheme
+        defined in the communicator.
+
+        Parameters
+        ----------
+        local_array : numpy.ndarray, with ``d`` dimensions, float entries
+            Local array to be updated through communications.
+
+        Note
+        ----
+        - The input array, ``local_array`` is updated in-place.
+        - The function will trigger a segfault error if the type of
+          ``local_array`` is different from ``float`` (due to the hard-coded
+          MPI type used to defined ``self.resizedsendsubarray`` and
+          ``self.resizedrecvsubarray``)
+        """
+        for d in range(self.ndims):
+            self.comm.Sendrecv(
+                [local_array, 1, self.resizedsendsubarray[d]],
+                self.dest[d],
+                recvbuf=[local_array, 1, self.resizedrecvsubarray[d]],
+                source=self.src[d],
+            )
+        return
+
+    def remove(self):
+        """Trigger object finalizer (clean-up)."""
+        return self._finalizer()
+
+    @property
+    def removed(self):
+        """Check whether the object has been finalized."""
+        return not self._finalizer.alive
+
+
+class SyncCartesianCommunicatorTV(BaseCommunicator):
+    """Cartesian communicator underlying synchronous discrete TV computations.
+
+    Attributes
+    ----------
+    comm : mpi4py.MPI.Comm
+        Underlying MPI communicator.
+    grid_size : list of int, of size ``d``
+        Number of workers along each of the ``d`` dimensions of the
+        communicator grid.
+    itemsize : numpy.dtype.itemsize
+        Size (in bytes) of the scalar type to be handled during the
+        communications.
+    facet_size : numpy.ndarray of int, of size ``d``
+        Number of elements along each of the ``d`` dimensions of the facet
+        handled by the current process.
+    overlap_size : numpy.ndarray of int, of size ``d``
+        Size of the overlap between the array handled by two different
+        workers.
+    circular_boundaries : bool
+        Indicates whether periodic boundary conditions need to be
+        considered for the communicator grid along each axis. By default
+    direction : bool, optional
+        Direction of the overlap between facets along all the axis (True
+        for backward overlap, False for forward overlap). By default False.
+
+    Note
+    ----
+    Try to merge this object with
+    :class:`aaxda.utils.SyncCartesianCommunicator`.
+    """
+
+    def __init__(
+        self,
+        comm,
+        cartcomm,
+        grid_size,
+        itemsize,
+        facet_size,
+        direction=False,
+    ):
+        """SyncCartesianCommunicatorTV constructor.
+
+        Parameters
+        ----------
+        comm : mpi4py.MPI.Comm
+            Underlying MPI communicator.
+        cartcomm : mpi4py.MPI.Cartcomm
+            Underlying Cartesian MPI communicator.
+        grid_size : list of int, of size ``d``
+            Number of workers along each of the ``d`` dimensions of the
+            communicator grid.
+        itemsize : numpy.dtype.itemsize
+            Size (in bytes) of the scalar type to be handled during the
+            communications.
+        facet_size : numpy.ndarray of int, of size ``d``
+            Number of elements along each of the ``d`` dimensions of the facet
+            handled by the current process.
+        overlap_size : numpy.ndarray of int, of size ``d``
+            Size of the overlap between the array handled by two different
+            workers.
+        direction : bool, optional
+            Direction of the overlap between facets along all the axis (True
+            for backward overlap, False for forward overlap). By default False.
+
+        Raises
+        ------
+        ValueError
+            ``overlap_size`` and ``grid_size`` must contain the same number of
+            element.
+        """
+        super(SyncCartesianCommunicatorTV, self).__init__(
+            comm,
+            grid_size,
+            itemsize,
+            facet_size,
+        )
+        self.overlap_size = np.ones(facet_size.size, dtype="i")
+
+        # configure communication scheme
+        (
+            self.dest,
+            self.src,
+            self.resizedsendsubarray,
+            self.resizedrecvsubarray,
+        ) = libcomm.setup_border_update_tv(
+            cartcomm,
+            self.ndims,
+            self.itemsize,
+            self.facet_size,
+            self.overlap_size,
+            backward=direction,
+        )
+
+        # setup finalizer
+        self.isvalid_comm = self.overlap_size > 0
+        # np.logical_and(self.overlap_size > 0, self.grid_size > 1)
+        self._finalizer = weakref.finalize(
+            self,
+            free_custom_mpi_types,
+            self.resizedsendsubarray,
+            self.resizedrecvsubarray,
+            self.isvalid_comm,
+        )
+
+    def update_borders(self, local_array):
+        """Update the borders of a local array using the communication scheme
+        defined in the communicator.
+
+        Parameters
+        ----------
+        local_array : numpy.ndarray, with ``d`` dimensions, float entries
+            Local array to be updated through communications.
+
+        Note
+        ----
+        - The input array, ``local_array`` is updated in-place.
+        - The function will trigger a segfault error if the type of
+          ``local_array`` is different from ``float`` (due to the hard-coded
+          MPI type used to defined ``self.resizedsendsubarray`` and
+          ``self.resizedrecvsubarray``)
+        """
+        for d in range(self.ndims):
+            self.comm.Sendrecv(
+                [local_array, 1, self.resizedsendsubarray[d]],
+                self.dest[d],
+                recvbuf=[local_array, 1, self.resizedrecvsubarray[d]],
+                source=self.src[d],
+            )
+        return
+
+    def remove(self):
+        """Trigger object finalizer (clean-up)."""
+        return self._finalizer()
+
+    @property
+    def removed(self):
+        """Check whether the object has been finalized."""
+        return not self._finalizer.alive
diff --git a/src/aaxda/utils/memory.py b/src/aaxda/utils/memory.py
new file mode 100644
index 0000000000000000000000000000000000000000..529de74e4b0fe5ad54f658d808c710360064e546
--- /dev/null
+++ b/src/aaxda/utils/memory.py
@@ -0,0 +1,32 @@
+import os
+import resource
+from logging import Logger
+
+import psutil
+
+
+def display_memory(logger: Logger):
+    """Display max and current moemory usage for the current process.
+
+    Parameters
+    ----------
+    logger : Logger
+        Logger object to display the current amount of memory used (in MB).
+    """
+    process = psutil.Process(os.getpid())
+    logger.info(
+        "Max  Memory usage: %s (MB)"
+        % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
+    )
+    logger.info("Curr Memory usage: %s (MB)" % (process.memory_info().rss / 1024**2))
+
+    pass
+
+
+def get_memory_usage():
+    """Return the memory usage in Mo."""
+    # Reference
+    # https://bitbucket.org/mpi4py/mpi4py/issues/119/memory-leak-allreduce
+    process = psutil.Process(os.getpid())
+    mem = process.memory_info()[0] / float(2**20)
+    return mem
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/models/__init__.py b/tests/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/models/test_boundary_extensions.py b/tests/models/test_boundary_extensions.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b738fb694021fb1db0fe59ef8fe10a779d9b150
--- /dev/null
+++ b/tests/models/test_boundary_extensions.py
@@ -0,0 +1,91 @@
+"""Test the implementation of the boundary extension operators and their
+adjoint. Supported extensions include the zero-padding, (half-point) symmetric
+and circular boundary extensions."""
+import numpy as np
+import pytest
+
+import aaxda.models.padding as amp
+
+
+@pytest.fixture
+def rng():
+    return np.random.default_rng(1234)
+
+
+@pytest.fixture
+def image_size():
+    return np.array([10, 7], dtype="i")
+
+
+@pytest.fixture
+def lsize():
+    return np.array([2, 2], dtype="i")
+
+
+@pytest.fixture
+def rsize():
+    return np.array([2, 3], dtype="i")
+
+
+@pytest.fixture
+def x(image_size, rng):
+    """Random image."""
+    return rng.normal(loc=0.0, scale=1.0, size=image_size)
+
+
+def test_error_message_pad_array_nd(x, rsize):
+    """Testing errors thrown by the pad_array_nd function."""
+    with pytest.raises(ValueError) as excinfo:
+        amp.pad_array_nd(x, np.array([1, 2, 3]), rsize, mode="constant")
+    assert "`x.shape`, `lsize` and `rsize` must have the same length." in str(
+        excinfo.value
+    )
+
+
+def test_error_message_adjoint_padding_nd(x, lsize, rsize):
+    """Testing errors thrown by the adjoint_padding_nd function."""
+    with pytest.raises(ValueError) as excinfo:
+        amp.adjoint_padding(x, np.array([1, 2, 3]), -rsize, mode="constant")
+    assert "`x.shape`, `lsize` and `rsize` must have the same length." in str(
+        excinfo.value
+    )
+
+    with pytest.raises(ValueError) as excinfo:
+        amp.adjoint_padding(x, lsize, -rsize, mode="zpd")
+    assert "Unknown extension `mode`: zpd" in str(excinfo.value)
+
+
+# zero padding
+def test_zero_padding(x, lsize, rsize, rng):
+    """Check consistency of the implementation of the adjoint zero-padding operator."""
+    Px = amp.pad_array_nd(x, lsize, rsize, mode="constant")
+    y = rng.normal(loc=0.0, scale=1.0, size=Px.shape)
+    Padj_y = amp.adjoint_padding(y, lsize, -rsize, mode="constant")
+
+    sp1 = np.sum(np.conj(Px) * y)  # usual scalar product
+    sp2 = np.sum(np.conj(x) * Padj_y)
+    assert np.isclose(sp1, sp2)
+
+
+# circular extension
+def test_circular_padding(x, lsize, rsize, rng):
+    """Check consistency of the implementation of the adjoint zero-padding operator."""
+    Px = amp.pad_array_nd(x, lsize, rsize, mode="wrap")
+    y = rng.normal(loc=0.0, scale=1.0, size=Px.shape)
+    Padj_y = amp.adjoint_padding(y, lsize, -rsize, mode="wrap")
+
+    sp1 = np.sum(np.conj(Px) * y)
+    sp2 = np.sum(np.conj(x) * Padj_y)
+    assert np.isclose(sp1, sp2)
+
+
+# symmetric extension
+def test_symmetric_padding(x, lsize, rsize, rng):
+    """Check consistency of the implementation of the adjoint zero-padding operator."""
+    Px = amp.pad_array_nd(x, lsize, rsize, mode="symmetric")
+    y = rng.normal(loc=0.0, scale=1.0, size=Px.shape)
+    Padj_y = amp.adjoint_padding(y, lsize, -rsize, mode="symmetric")
+
+    sp1 = np.sum(np.conj(Px) * y)
+    sp2 = np.sum(np.conj(x) * Padj_y)
+    assert np.isclose(sp1, sp2)
diff --git a/tests/models/test_convolution_model.py b/tests/models/test_convolution_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..64ab5c50f7654a64d8f00279a110610614d13916
--- /dev/null
+++ b/tests/models/test_convolution_model.py
@@ -0,0 +1,105 @@
+import numpy as np
+import pytest
+import scipy.signal as sg
+
+from aaxda.models.convolutions import fft2_conv
+from aaxda.models.data import generate_2d_gaussian_kernel
+from aaxda.models.models import SerialConvModel
+from aaxda.models.padding import pad_array
+
+
+@pytest.fixture
+def rng():
+    return np.random.default_rng(1234)
+
+
+@pytest.fixture
+def kernel():
+    """Convolution kernel"""
+    return generate_2d_gaussian_kernel(3, 0.1)
+
+
+@pytest.fixture
+def image_size():
+    return np.array([10, 7], dtype="i")
+
+
+@pytest.fixture
+def data_size(image_size, kernel):
+    return image_size + np.array(kernel.shape, dtype="i") - 1
+
+
+@pytest.fixture
+def x(image_size, rng):
+    return rng.normal(loc=0.0, scale=1.0, size=image_size)
+
+
+@pytest.fixture
+def lconv_model(image_size, kernel, data_size):
+    """Linear convolution model."""
+    return SerialConvModel(image_size, kernel, data_size)
+
+
+@pytest.fixture
+def cconv_model(image_size, kernel, data_size):
+    """Circular convolution model."""
+    return SerialConvModel(image_size, kernel, image_size)
+
+
+def test_SerialConvModel_throws_exceptions(image_size, kernel, data_size):
+    # inconsistent number of elements between image_size and data_size
+    with pytest.raises(ValueError) as excinfo:
+        SerialConvModel(image_size[1:], kernel, data_size)
+    assert "image_size and data_size must have the same number of elements" in str(
+        excinfo.value
+    )
+
+    # inconsistent number of axis for kernel, compared to prescribed image
+    # and data sizes
+    with pytest.raises(ValueError) as excinfo:
+        SerialConvModel(image_size, kernel[0, :], data_size)
+    assert "kernel should have ndims = len(image_size) dimensions" in str(excinfo.value)
+
+    # pass complex-valued kernel (only real kernel supported for now)
+    with pytest.raises(TypeError) as excinfo:
+        SerialConvModel(image_size, (1 + 1j) * kernel, data_size)
+    assert "only real-valued kernel supported" in str(excinfo.value)
+
+
+def test_SeriaConvModel_lconv_direct_operator(x, lconv_model, kernel):
+    """Check consistency between proposed implementation and
+    ``scipy.signal.convolve``.
+    """
+    Hx = lconv_model.apply_direct_operator(x)
+    Hx_scipy = sg.convolve(x, kernel, mode="full")
+    assert np.allclose(Hx, Hx_scipy)
+
+
+def test_SeriaConvModel_lconv_adjoint(x, lconv_model, rng):
+    """Check correctness adjoint operator (linear convolution)."""
+    y = rng.standard_normal(lconv_model.data_size)
+    Hx = lconv_model.apply_direct_operator(x)
+    Hadj_y = lconv_model.apply_adjoint_operator(y)
+    sp1 = np.sum(Hx * y)  # usual scalar product
+    sp2 = np.sum(x * Hadj_y)
+    assert np.isclose(sp1, sp2)
+
+
+def test_SerialConvModel_cconv_consistency(x, cconv_model, rng):
+    """Check consistency with ``aaxda.models.models.fft2_conv``."""
+    padded_kernel = pad_array(
+        cconv_model.kernel, cconv_model.data_size, padmode="after"
+    )
+    y_, ft_kernel = fft2_conv(x, padded_kernel, cconv_model.data_size)
+    Hx = cconv_model.apply_direct_operator(x)
+    assert np.allclose(Hx, y_)
+
+
+def test_SeriaConvModel_cconv_adjoint(x, cconv_model, rng):
+    """Check correctness adjoint operator (circular convolution)."""
+    y = rng.standard_normal(cconv_model.data_size)
+    Hx = cconv_model.apply_direct_operator(x)
+    Hadj_y = cconv_model.apply_adjoint_operator(y)
+    sp1 = np.sum(Hx * y)  # usual scalar product
+    sp2 = np.sum(x * Hadj_y)
+    assert np.isclose(sp1, sp2)
diff --git a/tests/models/test_convolutions.py b/tests/models/test_convolutions.py
new file mode 100644
index 0000000000000000000000000000000000000000..e94c7067570d21f10ac323d9e7ee755ed1a24eee
--- /dev/null
+++ b/tests/models/test_convolutions.py
@@ -0,0 +1,161 @@
+import numpy as np
+import pytest
+import scipy.signal as sg
+
+import aaxda.models.convolutions as amc
+from aaxda.models.data import generate_2d_gaussian_kernel
+from aaxda.models.padding import pad_array
+
+
+@pytest.fixture
+def rng():
+    return np.random.default_rng(1234)
+
+
+@pytest.fixture
+def image_size():
+    return np.array([10, 7], "i")
+
+
+@pytest.fixture
+def x(image_size, rng):
+    return (1 + 1j) * rng.normal(loc=0.0, scale=1.0, size=image_size)
+
+
+@pytest.fixture
+def kernel():
+    return generate_2d_gaussian_kernel(3, 0.1)
+
+
+@pytest.fixture
+def data_size(image_size, kernel):
+    return image_size + np.array(kernel.shape, dtype="i") - 1
+
+
+@pytest.fixture
+def lc_fft_kernel(kernel, data_size):
+    return np.fft.rfftn(kernel, data_size)
+
+
+@pytest.fixture
+def lc_fft_kernel_c(kernel, data_size):
+    return np.fft.fftn(kernel, data_size)
+
+
+def test_fft_conv_linear_convolution_1d(x, kernel, image_size, data_size):
+    """Check :func:`aaxda.models.convolutions.fft_conv` against `scipy.signal.convolve` on a 1d array."""
+    ft_kernel = np.fft.fftn(kernel[0, :], s=[data_size[1]])
+    Hx = amc.fft_conv(x[0, :], ft_kernel, shape=data_size[1])
+    Hx_scipy = sg.convolve(x[0, :], kernel[0, :], mode="full")
+    assert np.allclose(Hx, Hx_scipy)
+
+
+def test_fft_conv_linear_convolution_real(
+    x, kernel, lc_fft_kernel, image_size, data_size
+):
+    """Check :func:`aaxda.models.convolutions.fft_conv` against
+    `scipy.signal.convolve` on a real-type array."""
+    Hx = amc.fft_conv(np.real(x), lc_fft_kernel, shape=data_size)
+    Hx_scipy = sg.convolve(np.real(x), kernel, mode="full")
+    assert np.allclose(Hx, Hx_scipy)
+
+
+def test_fft_conv_linear_convolution_complex(
+    x, kernel, lc_fft_kernel_c, image_size, data_size
+):
+    """Check :func:`aaxda.models.convolutions.fft_conv` against
+    `scipy.signal.convolve` on a complex-type array."""
+    Hx = amc.fft_conv(x, lc_fft_kernel_c, shape=data_size)
+    Hx_scipy = sg.convolve(x, kernel, mode="full")
+    assert np.allclose(Hx, Hx_scipy)
+
+
+def test_fft_conv_linear_convolution_adjoint(
+    x, kernel, lc_fft_kernel_c, image_size, data_size, rng
+):
+    """Check the correctness of the adjoint convolution operator (linear
+    convolution).
+    """
+    Hx = amc.fft_conv(x, lc_fft_kernel_c, shape=data_size)
+    y = (1 + 1j) * rng.standard_normal(data_size)
+    Hadj_y = amc.fft_conv(y, np.conj(lc_fft_kernel_c), shape=data_size)[
+        : image_size[0], : image_size[1]
+    ]
+
+    hp1 = np.sum(np.conj(Hx) * y)
+    hp2 = np.sum(np.conj(x) * Hadj_y)
+    assert np.isclose(hp1, hp2)
+
+
+def test_fft_conv_linear_convolution_adjoint_real(
+    x, kernel, lc_fft_kernel, image_size, data_size, rng
+):
+    """Check :func:`aaxda.models.convolutions.fft_conv` against
+    `scipy.signal.convolve` on a real-type array."""
+    Hx = amc.fft_conv(np.real(x), lc_fft_kernel, shape=data_size)
+    Hx_scipy = sg.convolve(np.real(x), kernel, mode="full")
+    assert np.allclose(Hx, Hx_scipy)
+
+    y = rng.standard_normal(data_size)
+    Hadj_y = amc.fft_conv(y, np.conj(lc_fft_kernel), shape=data_size)[
+        : image_size[0], : image_size[1]
+    ]
+
+    hp1 = np.sum(np.conj(Hx) * y)
+    hp2 = np.sum(np.conj(np.real(x)) * Hadj_y)
+    assert np.isclose(hp1, hp2)
+
+
+def test_fft2_conv_throws_exceptions():
+    """Check exceptions returned by
+    :func:`aaxda.models.convolutions.fft2_conv`.
+    """
+    # * shape has the wrong number of elements (1 instead of 2)
+    with pytest.raises(ValueError) as excinfo:
+        amc.fft2_conv(np.ones((5, 2)), np.ones((2, 2)), shape=[5])
+    assert "x.shape and shape must have the same length" in str(excinfo.value)
+
+    # * kernel has the wrong number of dimensions (1 instead of 2)
+    with pytest.raises(ValueError) as excinfo:
+        amc.fft2_conv(np.ones((5, 2)), np.ones(2), shape=[5, 6])
+    assert "x.shape and h.shape must have the same length" in str(excinfo.value)
+
+
+def test_fft2_conv_linear_convolution(x, kernel, image_size, data_size, rng):
+    """Check consistency between :func:`aaxda.models.convolutions.fft2_conv`
+    and :func:`aaxda.models.convolutions.fft_conv` for a linear convolution.
+    """
+    # * complex case
+    Hxc, fft_kernel_c = amc.fft2_conv(x, kernel, shape=data_size)
+    Hxc_ref = amc.fft_conv(x, fft_kernel_c, shape=data_size)
+    assert Hxc.dtype.kind == "c"
+    assert np.allclose(Hxc, Hxc_ref)
+
+    # * real case
+    x_ = np.real(x)
+    padded_kernel = pad_array(kernel, data_size, padmode="after")
+    Hx, fft_kernel = amc.fft2_conv(x_, padded_kernel, shape=data_size)
+    Hx_ref = amc.fft_conv(x_, fft_kernel, shape=data_size)
+    assert not Hx.dtype.kind == "c"
+    assert np.allclose(Hx, Hx_ref)
+
+
+def test_fft2_conv_circular_convolution(x, kernel, image_size, rng):
+    """Check consistency between :func:`aaxda.models.convolutions.fft2_conv`
+    and :func:`aaxda.models.convolutions.fft_conv` for a circular convolution.
+    """
+    # * complex case
+    Hxc, fft_kernel_c = amc.fft2_conv(x, kernel, shape=None)
+    Hxc_ref = amc.fft_conv(x, fft_kernel_c, shape=image_size)
+    assert Hxc.shape == x.shape
+    assert Hxc.dtype.kind == "c"
+    assert np.allclose(Hxc, Hxc_ref)
+
+    # * real case
+    x_ = np.real(x)
+    padded_kernel = pad_array(kernel, image_size, padmode="after")
+    Hx, fft_kernel = amc.fft2_conv(x_, padded_kernel, shape=image_size)
+    Hx_ref = amc.fft_conv(x_, fft_kernel, shape=image_size)
+    assert Hx.shape == x.shape
+    assert not Hx.dtype.kind == "c"
+    assert np.allclose(Hx, Hx_ref)
diff --git a/tests/models/test_crop.py b/tests/models/test_crop.py
new file mode 100755
index 0000000000000000000000000000000000000000..1764ab15692f894fbdbbecfa65256ba6b577198c
--- /dev/null
+++ b/tests/models/test_crop.py
@@ -0,0 +1,91 @@
+import unittest
+
+import numpy as np
+
+import aaxda.models.padding as up
+
+
+class TestCrop(unittest.TestCase):
+    def test_dimension_error(self):
+        with self.assertRaises(ValueError):
+            up.crop_array(np.ones(2), [3, 3])
+
+    def test_value_error(self):
+        with self.assertRaises(ValueError):
+            up.crop_array(np.ones(2), [3])
+
+    def test_shape(self):
+        N = [5]
+        K = [8]
+        y = np.arange(K[0])
+        x = up.crop_array(y, N)
+        self.assertTrue(len(y.shape) == len(x.shape))
+        self.assertTrue(x.size == N[0])
+
+    def test_center_value_after_cropping(self):
+        N = [5]
+        K = [8]
+        y = np.arange(K[0])
+        # fftshift convention
+        cN = N[0] // 2
+        cK = K[0] // 2
+        x = up.crop_array(y, N, "around")
+        self.assertTrue(x[cN] == y[cK])
+        # imfilter convention
+        cN = (N[0] - 1) // 2
+        cK = (K[0] - 1) // 2
+        x = up.crop_array(y, N, "around", True)
+        self.assertTrue(x[cN] == y[cK])
+
+    def test_crop_2d_around(self):
+        N = [5, 4]
+        K = [7, 8]
+        M = np.array([K[n] - N[n] for n in range(len(K))])
+        xref = np.arange(N[0])[:, np.newaxis] * np.arange(N[1])[np.newaxis, :]
+        # fftshift convention
+        y = np.zeros(K)
+        start_crop = [int(np.ceil(M[n] / 2)) for n in range(len(N))]
+        stop_crop = [int(np.floor(M[n] / 2)) for n in range(len(N))]
+        y[tuple([np.s_[start_crop[n] : -stop_crop[n]] for n in range(len(N))])] = xref
+        x = up.crop_array(y, N, "around")
+        self.assertTrue(np.linalg.norm(x - xref) <= 1e-13)
+        # imfilter convention
+        yref = np.zeros(K)
+        start_crop = [int(np.floor(M[n] / 2)) for n in range(len(N))]
+        stop_crop = [int(np.ceil(M[n] / 2)) for n in range(len(N))]
+        yref[tuple([np.s_[start_crop[n] : -stop_crop[n]] for n in range(len(N))])] = x
+        x = up.crop_array(y, N, "around", True)
+        self.assertTrue(np.linalg.norm(x - xref) <= 1e-13)
+
+    def test_crop_2d_after(self):
+        N = [5, 4]
+        K = [7, 8]
+        xref = np.arange(N[0])[:, np.newaxis] * np.arange(N[1])[np.newaxis, :]
+        y = np.zeros(K)
+        y[: N[0], : N[1]] = xref
+        x = up.crop_array(y, N)
+        self.assertTrue(np.linalg.norm(x - xref) <= 1e-13)
+
+    def test_crop_array_nd_shape(self):
+        N = np.array([5, 4], dtype="i")
+        L = [0, 2]
+        R = [2, None]  # last element in the slice
+        rng = np.random.default_rng(1234)
+        x = (1 + 1j) * rng.standard_normal(size=N)
+        y = up.crop_array_nd(x, L, R)
+
+        self.assertTrue(
+            np.allclose(np.array(y.shape, dtype="i"), np.array([2, 2], dtype="i"))
+        )
+
+    def test_crop_array_nd_error(self):
+        N = np.array([5, 4], dtype="i")
+        L = [0, 2]
+        R = [2]  # last element in the slice
+
+        with self.assertRaises(ValueError):
+            up.crop_array_nd(np.ones(N), L, R)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/tests/models/test_inpainting_model.py b/tests/models/test_inpainting_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3a5b07383a53b5ee9a93a08299ea7221489155b
--- /dev/null
+++ b/tests/models/test_inpainting_model.py
@@ -0,0 +1,52 @@
+"""Test the functionalities of the SerialInpaintingModel object (errors and
+correctness of the direct and adjoint operators)."""
+import numpy as np
+import pytest
+
+from aaxda.models.data import generate_random_mask
+from aaxda.models.models import SerialInpaintingModel
+
+
+@pytest.fixture
+def rng():
+    return np.random.default_rng(1234)
+
+
+@pytest.fixture
+def image_size():
+    return np.array([10, 7], dtype="i")
+
+
+@pytest.fixture
+def mask(rng, image_size):
+    """Inpainting mask."""
+    return generate_random_mask(image_size, 0.4, rng)
+
+
+@pytest.fixture
+def x(image_size, rng):
+    """Random image."""
+    return rng.normal(loc=0.0, scale=1.0, size=image_size)
+
+
+@pytest.fixture
+def inpainting_model(image_size, mask):
+    """Inpainting model."""
+    return SerialInpaintingModel(image_size, mask)
+
+
+def test_SerialInpaintingModel_throws_exceptions(image_size, mask):
+    # inconsistent number of elements between image_size and data_size
+    with pytest.raises(ValueError) as excinfo:
+        SerialInpaintingModel(image_size[1:], mask)
+    assert "mask and image should have the same size" in str(excinfo.value)
+
+
+def test_SerialInpatingModel_adjoint(x, inpainting_model, rng):
+    """Check correctness adjoint operator."""
+    y = rng.standard_normal(inpainting_model.data_size)
+    Hx = inpainting_model.apply_direct_operator(x)
+    Hadj_y = inpainting_model.apply_adjoint_operator(y)
+    sp1 = np.sum(Hx * y)  # usual scalar product
+    sp2 = np.sum(x * Hadj_y)
+    assert np.isclose(sp1, sp2)
diff --git a/tests/models/test_jtv.py b/tests/models/test_jtv.py
new file mode 100755
index 0000000000000000000000000000000000000000..16a3fa440a3c134d71f3001dce1bd133e2606076
--- /dev/null
+++ b/tests/models/test_jtv.py
@@ -0,0 +1,293 @@
+import unittest
+
+import numpy as np
+
+import aaxda.models.jtv as tv
+import aaxda.utils.communications as ucomm
+
+
+class TestJitTV(unittest.TestCase):
+    def setUp(self):
+        self.rng = np.random.default_rng(1234)
+        N = [5, 3]
+        self.x = (1 + 1j) * self.rng.normal(loc=0.0, scale=1.0, size=N)
+        self.yh = (1 + 1j) * self.rng.normal(loc=0.0, scale=1.0, size=N)
+        self.yv = (1 + 1j) * self.rng.normal(loc=0.0, scale=1.0, size=N)
+        self.Dx = tv.gradient_2d(self.x)
+        self.Dadjy = tv.gradient_2d_adjoint(self.yh, self.yv)
+
+    def test_shape(self):
+        self.assertTrue(len(self.yh.shape) == len(self.Dx[0].shape))
+        self.assertTrue(len(self.yv.shape) == len(self.Dx[1].shape))
+        self.assertTrue(len(self.Dadjy.shape) == len(self.x.shape))
+
+    def test_adjoint(self):
+        sp1 = np.sum(np.conj(self.Dx[0]) * self.yh + np.conj(self.Dx[1]) * self.yv)
+        sp2 = np.sum(np.conj(self.x) * self.Dadjy)
+        self.assertTrue(np.abs(sp1 - sp2) <= 1e-13)
+
+    def test_tv(self):
+        tv_x = tv.tv(self.x)
+        self.assertTrue(
+            np.abs(
+                tv_x
+                - np.sum(np.sqrt(np.abs(self.Dx[0]) ** 2 + np.abs(self.Dx[1]) ** 2))
+                <= 1e-13
+            )
+        )
+
+    def test_smooth_tv(self):
+        tv_x = tv.smooth_tv(self.x, np.finfo(float).eps)
+        self.assertTrue(
+            np.abs(
+                tv_x
+                - np.sum(
+                    np.sqrt(
+                        np.abs(self.Dx[0]) ** 2
+                        + np.abs(self.Dx[1]) ** 2
+                        + np.finfo(float).eps
+                    )
+                )
+                <= 1e-13
+            )
+        )
+
+    def test_gradient_smooth_tv(self):
+        grad_smooth_tv_x = tv.gradient_smooth_tv(self.x, np.finfo(float).eps)
+        c = np.sqrt(
+            np.abs(self.Dx[0]) ** 2 + np.abs(self.Dx[1]) ** 2 + np.finfo(float).eps
+        )
+        Dh_x = self.Dx[0] / c
+        Dv_x = self.Dx[1] / c
+
+        self.assertTrue(
+            np.linalg.norm(grad_smooth_tv_x - tv.gradient_2d_adjoint(Dh_x, Dv_x))
+            <= 1e-13
+        )
+
+    def test_gradient2d_throws_exception(self):
+        x = (1 + 1j) * self.rng.normal(loc=0.0, scale=1.0, size=(3, *self.x.shape))
+        with self.assertRaises(AssertionError) as context:
+            tv.gradient_2d(x)
+        self.assertTrue(
+            "gradient_2d: Invalid input, expected len(x.shape)==2"
+            in str(context.exception)
+        )
+        # self.assertRaises(AssertionError, tv.gradient_smooth_tv, x, np.finfo(float).eps)
+
+    def test_gradient2dadjoint_throws_exception(self):
+        uh = (1 + 1j) * self.rng.normal(loc=0.0, scale=1.0, size=(3, *self.x.shape))
+        uv = (1 + 1j) * self.rng.normal(loc=0.0, scale=1.0, size=(3, *self.x.shape))
+        with self.assertRaises(AssertionError) as context:
+            tv.gradient_2d_adjoint(uh, uv)
+        self.assertTrue(
+            "gradient_2d_adjoint: Invalid input, expected len(uh.shape)==len(uv.shape)==2"
+            in str(context.exception)
+        )
+
+    def test_chunkgradient2d_backward(self):
+        N = np.array([10, 5], "i")
+        rng = np.random.default_rng(1234)
+        x = (1 + 1j) * rng.normal(loc=0.0, scale=1.0, size=N)
+        uh0, uv0 = tv.gradient_2d(x)
+        uh = np.empty(N, dtype=complex)
+        uv = np.empty(N, dtype=complex)
+
+        grid_size = np.array([3, 2], dtype="i")
+        nchunks = np.prod(grid_size)
+
+        overlap = (grid_size > 1).astype(int)
+
+        for k in range(nchunks):
+            ranknd = np.array(np.unravel_index(k, grid_size), dtype="i")
+            islast = ranknd == grid_size - 1
+
+            # direct operator
+            rd = ucomm.local_split_range_nd(
+                grid_size, N, ranknd, overlap=overlap, backward=True
+            )
+            facet = x[rd[0, 0] : rd[0, 1] + 1, rd[1, 0] : rd[1, 1] + 1]
+            uh_k, uv_k = tv.chunk_gradient_2d(facet, islast)
+
+            start = rd[:, 0]
+            stop = start + np.array(uv_k.shape, dtype="i")
+            uv[start[0] : stop[0], start[1] : stop[1]] = uv_k
+            stop = start + np.array(uh_k.shape, dtype="i")
+            uh[start[0] : stop[0], start[1] : stop[1]] = uh_k
+
+        self.assertTrue(np.allclose(uv, uv0))
+        self.assertTrue(np.allclose(uh, uh0))
+
+    def test_chunkgradient2d_forward(self):
+        N = np.array([10, 5], "i")
+        rng = np.random.default_rng(1234)
+        x = (1 + 1j) * rng.normal(loc=0.0, scale=1.0, size=N)
+        uh0, uv0 = tv.gradient_2d(x)
+        uh = np.empty(N, dtype=complex)
+        uv = np.empty(N, dtype=complex)
+
+        grid_size = np.array([3, 2], dtype="i")
+        nchunks = np.prod(grid_size)
+
+        overlap = (grid_size > 1).astype(int)
+
+        for k in range(nchunks):
+            ranknd = np.array(np.unravel_index(k, grid_size), dtype="i")
+            islast = ranknd == grid_size - 1
+
+            # direct operator
+            r0 = ucomm.local_split_range_nd(grid_size, N, ranknd)
+            rd = ucomm.local_split_range_nd(
+                grid_size, N, ranknd, overlap=overlap, backward=False
+            )
+            facet = x[rd[0, 0] : rd[0, 1] + 1, rd[1, 0] : rd[1, 1] + 1]
+            uh_k, uv_k = tv.chunk_gradient_2d(facet, islast)
+
+            start = r0[:, 0]
+            stop = start + np.array(uv_k.shape, dtype="i")
+            uv[start[0] : stop[0], start[1] : stop[1]] = uv_k
+            stop = start + np.array(uh_k.shape, dtype="i")
+            uh[start[0] : stop[0], start[1] : stop[1]] = uh_k
+
+        self.assertTrue(np.allclose(uv, uv0))
+        self.assertTrue(np.allclose(uh, uh0))
+
+    def test_chunkgradient2dadjoint(self):
+        N = np.array([10, 5], "i")
+        rng = np.random.default_rng(1234)
+        uh = (1 + 1j) * rng.normal(loc=0.0, scale=1.0, size=N)
+        uv = (1 + 1j) * rng.normal(loc=0.0, scale=1.0, size=N)
+        x0 = tv.gradient_2d_adjoint(uh, uv)
+        x = np.zeros(N, dtype=complex)
+
+        grid_size = np.array([3, 3], dtype="i")
+        nchunks = np.prod(grid_size)
+
+        overlap = (grid_size > 1).astype(int)
+
+        for k in range(nchunks):
+            ranknd = np.array(np.unravel_index(k, grid_size), dtype="i")
+            islast = ranknd == grid_size - 1
+            isfirst = ranknd == 0
+            r0 = ucomm.local_split_range_nd(grid_size, N, ranknd)
+            Nk = r0[:, 1] - r0[:, 0] + 1
+
+            ra = ucomm.local_split_range_nd(
+                grid_size, N, ranknd, overlap=overlap, backward=True
+            )
+            facet_h = uh[ra[0, 0] : ra[0, 1] + 1, ra[1, 0] : ra[1, 1] + 1]
+            facet_v = uv[ra[0, 0] : ra[0, 1] + 1, ra[1, 0] : ra[1, 1] + 1]
+            x_k = np.zeros(Nk, dtype=complex)
+            tv.chunk_gradient_2d_adjoint(facet_h, facet_v, x_k, isfirst, islast)
+
+            start = r0[:, 0]
+            stop = start + Nk
+            x[start[0] : stop[0], start[1] : stop[1]] = x_k
+
+        self.assertTrue(np.allclose(x, x0))
+
+    def test_chunkgradient2dadjoint_single_facet(self):
+        N = np.array([10, 5], "i")
+        rng = np.random.default_rng(1234)
+        uh = (1 + 1j) * rng.normal(loc=0.0, scale=1.0, size=N)
+        uv = (1 + 1j) * rng.normal(loc=0.0, scale=1.0, size=N)
+        x0 = tv.gradient_2d_adjoint(uh, uv)
+        x = np.zeros(N, dtype=complex)
+
+        grid_size = np.array([1, 1], dtype="i")
+        nchunks = np.prod(grid_size)
+
+        overlap = (grid_size > 1).astype(int)
+
+        for k in range(nchunks):
+            ranknd = np.array(np.unravel_index(k, grid_size), dtype="i")
+            islast = ranknd == grid_size - 1
+            isfirst = ranknd == 0
+            r0 = ucomm.local_split_range_nd(grid_size, N, ranknd)
+            Nk = r0[:, 1] - r0[:, 0] + 1
+
+            ra = ucomm.local_split_range_nd(
+                grid_size, N, ranknd, overlap=overlap, backward=True
+            )
+            facet_h = uh[ra[0, 0] : ra[0, 1] + 1, ra[1, 0] : ra[1, 1] + 1]
+            facet_v = uv[ra[0, 0] : ra[0, 1] + 1, ra[1, 0] : ra[1, 1] + 1]
+            x_k = np.zeros(Nk, dtype=complex)
+            tv.chunk_gradient_2d_adjoint(facet_h, facet_v, x_k, isfirst, islast)
+
+            start = r0[:, 0]
+            stop = start + Nk
+            x[start[0] : stop[0], start[1] : stop[1]] = x_k
+
+        self.assertTrue(np.allclose(x, x0))
+
+    def test_chunkgradient2dadjoint_two_facets_y(self):
+        N = np.array([10, 5], "i")
+        rng = np.random.default_rng(1234)
+        uh = (1 + 1j) * rng.normal(loc=0.0, scale=1.0, size=N)
+        uv = (1 + 1j) * rng.normal(loc=0.0, scale=1.0, size=N)
+        x0 = tv.gradient_2d_adjoint(uh, uv)
+        x = np.zeros(N, dtype=complex)
+
+        grid_size = np.array([1, 2], dtype="i")
+        nchunks = np.prod(grid_size)
+
+        overlap = (grid_size > 1).astype(int)
+
+        for k in range(nchunks):
+            ranknd = np.array(np.unravel_index(k, grid_size), dtype="i")
+            islast = ranknd == grid_size - 1
+            isfirst = ranknd == 0
+            r0 = ucomm.local_split_range_nd(grid_size, N, ranknd)
+            Nk = r0[:, 1] - r0[:, 0] + 1
+
+            ra = ucomm.local_split_range_nd(
+                grid_size, N, ranknd, overlap=overlap, backward=True
+            )
+            facet_h = uh[ra[0, 0] : ra[0, 1] + 1, ra[1, 0] : ra[1, 1] + 1]
+            facet_v = uv[ra[0, 0] : ra[0, 1] + 1, ra[1, 0] : ra[1, 1] + 1]
+            x_k = np.zeros(Nk, dtype=complex)
+            tv.chunk_gradient_2d_adjoint(facet_h, facet_v, x_k, isfirst, islast)
+
+            start = r0[:, 0]
+            stop = start + Nk
+            x[start[0] : stop[0], start[1] : stop[1]] = x_k
+
+        self.assertTrue(np.allclose(x, x0))
+
+    def test_chunkgradient2dadjoint_two_facets_x(self):
+        N = np.array([10, 5], "i")
+        rng = np.random.default_rng(1234)
+        uh = (1 + 1j) * rng.normal(loc=0.0, scale=1.0, size=N)
+        uv = (1 + 1j) * rng.normal(loc=0.0, scale=1.0, size=N)
+        x0 = tv.gradient_2d_adjoint(uh, uv)
+        x = np.zeros(N, dtype=complex)
+
+        grid_size = np.array([2, 1], dtype="i")
+        nchunks = np.prod(grid_size)
+
+        overlap = (grid_size > 1).astype(int)
+
+        for k in range(nchunks):
+            ranknd = np.array(np.unravel_index(k, grid_size), dtype="i")
+            islast = ranknd == grid_size - 1
+            isfirst = ranknd == 0
+            r0 = ucomm.local_split_range_nd(grid_size, N, ranknd)
+            Nk = r0[:, 1] - r0[:, 0] + 1
+
+            ra = ucomm.local_split_range_nd(
+                grid_size, N, ranknd, overlap=overlap, backward=True
+            )
+            facet_h = uh[ra[0, 0] : ra[0, 1] + 1, ra[1, 0] : ra[1, 1] + 1]
+            facet_v = uv[ra[0, 0] : ra[0, 1] + 1, ra[1, 0] : ra[1, 1] + 1]
+            x_k = np.zeros(Nk, dtype=complex)
+            tv.chunk_gradient_2d_adjoint(facet_h, facet_v, x_k, isfirst, islast)
+
+            start = r0[:, 0]
+            stop = start + Nk
+            x[start[0] : stop[0], start[1] : stop[1]] = x_k
+
+        self.assertTrue(np.allclose(x, x0))
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/tests/models/test_padding.py b/tests/models/test_padding.py
new file mode 100755
index 0000000000000000000000000000000000000000..e8f0613c4ec465b5e46d7c25dcac1a7721ef0a1a
--- /dev/null
+++ b/tests/models/test_padding.py
@@ -0,0 +1,275 @@
+import unittest
+
+import numpy as np
+
+import aaxda.models.padding as up
+
+
+class TestPadding(unittest.TestCase):
+    def test_dimension_error(self):
+        with self.assertRaises(ValueError):
+            up.pad_array(np.ones(2), [0, 0])
+        with self.assertRaises(ValueError):
+            up.crop_array(np.ones(2), [3])
+
+    def test_value_error(self):
+        with self.assertRaises(ValueError):
+            up.pad_array(np.ones(2), [1])
+        with self.assertRaises(ValueError):
+            up.crop_array(np.ones(2), [1, 1])
+
+    def test_shape(self):
+        N = [5]
+        K = [8]
+        x = np.arange(N[0])
+        y = up.pad_array(x, K)
+        self.assertTrue(len(y.shape) == len(x.shape))
+        self.assertTrue(y.size == K[0])
+
+    def test_center_value_after_padding(self):
+        N = [5]
+        K = [8]
+        x = np.arange(N[0])
+        # fftshift convention
+        cN = N[0] // 2
+        cK = K[0] // 2
+        y = up.pad_array(x, K, "around")
+        self.assertTrue(x[cN] == y[cK])
+        # imfilter convention
+        cN = (N[0] - 1) // 2
+        cK = (K[0] - 1) // 2
+        y = up.pad_array(x, K, "around", True)
+        self.assertTrue(x[cN] == y[cK])
+
+    def test_pad_1d_around_odd_to_even(self):
+        N = [5]
+        K = [8]
+        M = K[0] - N[0]
+        x = np.arange(N[0])
+        # fftshift convention
+        yref = np.r_[np.zeros(int(np.ceil(M / 2))), x, np.zeros(M // 2)]
+        y = up.pad_array(x, K, "around")
+        self.assertTrue(np.linalg.norm(y - yref) <= 1e-13)
+        # imfilter convention
+        yref = np.r_[np.zeros(M // 2), x, np.zeros(int(np.ceil(M / 2)))]
+        y = up.pad_array(x, K, "around", True)
+        self.assertTrue(np.linalg.norm(y - yref) <= 1e-13)
+
+    def test_crop_1d_around_even_to_odd(self):
+        N = [5]
+        K = [8]
+        M = K[0] - N[0]
+        xref = np.arange(N[0])
+        # fftshift convention
+        y = np.r_[np.zeros(int(np.ceil(M / 2))), xref, np.zeros(M // 2)]
+        x = up.crop_array(y, N, "around")
+        self.assertTrue(np.linalg.norm(x - xref) <= 1e-13)
+        # imfilter convention
+        y = np.r_[np.zeros(M // 2), xref, np.zeros(int(np.ceil(M / 2)))]
+        x = up.crop_array(y, N, "around", True)
+        self.assertTrue(np.linalg.norm(x - xref) <= 1e-13)
+
+    def test_pad_1d_around_even_to_odd(self):
+        N = [6]
+        K = [9]
+        M = K[0] - N[0]
+        x = np.arange(N[0])
+        # fftshift convention
+        yref = np.r_[np.zeros(int(np.ceil(M / 2))), x, np.zeros(M // 2)]
+        y = up.pad_array(x, K, "around")
+        self.assertTrue(np.linalg.norm(y - yref) <= 1e-13)
+        # imfilter convention
+        yref = np.r_[np.zeros(M // 2), x, np.zeros(int(np.ceil(M / 2)))]
+        y = up.pad_array(x, K, "around", True)
+        self.assertTrue(np.linalg.norm(y - yref) <= 1e-13)
+
+    def test_crop_1d_around_odd_to_even(self):
+        N = [6]
+        K = [9]
+        M = K[0] - N[0]
+        xref = np.arange(N[0])
+        # fftshift convention
+        y = np.r_[np.zeros(int(np.ceil(M / 2))), xref, np.zeros(M // 2)]
+        x = up.crop_array(y, N, "around")
+        self.assertTrue(np.linalg.norm(x - xref) <= 1e-13)
+        # imfilter convention
+        y = np.r_[np.zeros(M // 2), xref, np.zeros(int(np.ceil(M / 2)))]
+        x = up.crop_array(y, N, "around", True)
+        self.assertTrue(np.linalg.norm(x - xref) <= 1e-13)
+
+    def test_pad_1d_around_odd_to_odd(self):
+        N = [5]
+        K = [9]
+        M = K[0] - N[0]
+        x = np.arange(N[0])
+        # fftshift convention
+        yref = np.r_[np.zeros(int(np.ceil(M / 2))), x, np.zeros(M // 2)]
+        y = up.pad_array(x, K, "around")
+        self.assertTrue(np.linalg.norm(y - yref) <= 1e-13)
+        # imfilter convention
+        yref = np.r_[np.zeros(M // 2), x, np.zeros(int(np.ceil(M / 2)))]
+        y = up.pad_array(x, K, "around", True)
+        self.assertTrue(np.linalg.norm(y - yref) <= 1e-13)
+
+    def test_crop_1d_around_odd_to_odd(self):
+        N = [5]
+        K = [9]
+        M = K[0] - N[0]
+        xref = np.arange(N[0])
+        # fftshift convention
+        y = np.r_[np.zeros(int(np.ceil(M / 2))), xref, np.zeros(M // 2)]
+        x = up.crop_array(y, N, "around")
+        self.assertTrue(np.linalg.norm(x - xref) <= 1e-13)
+        # imfilter convention
+        y = np.r_[np.zeros(M // 2), xref, np.zeros(int(np.ceil(M / 2)))]
+        x = up.crop_array(y, N, "around", True)
+        self.assertTrue(np.linalg.norm(x - xref) <= 1e-13)
+
+    def test_pad_1d_around_even_to_even(self):
+        N = [4]
+        K = [8]
+        M = K[0] - N[0]
+        x = np.arange(N[0])
+        # fftshift convention
+        yref = np.r_[np.zeros(int(np.ceil(M / 2))), x, np.zeros(M // 2)]
+        y = up.pad_array(x, K, "around")
+        self.assertTrue(np.linalg.norm(y - yref) <= 1e-13)
+        # imfilter convention
+        yref = np.r_[np.zeros(M // 2), x, np.zeros(int(np.ceil(M / 2)))]
+        y = up.pad_array(x, K, "around", True)
+        self.assertTrue(np.linalg.norm(y - yref) <= 1e-13)
+
+    def test_pad_1d_around_even_to_even_(self):
+        N = [4]
+        K = [8]
+        M = K[0] - N[0]
+        x = np.arange(N[0])
+        # fftshift convention
+        yref = np.r_[np.zeros(int(np.ceil(M / 2))), x, np.zeros(M // 2)]
+        y = up.pad_array(x, K, padmode="around", mode="constant")
+        self.assertTrue(np.linalg.norm(y - yref) <= 1e-13)
+        # imfilter convention
+        yref = np.r_[np.zeros(M // 2), x, np.zeros(int(np.ceil(M / 2)))]
+        y = up.pad_array(x, K, "around", True, "constant")
+        self.assertTrue(np.linalg.norm(y - yref) <= 1e-13)
+
+    def test_crop_1d_around_even_to_even(self):
+        N = [4]
+        K = [8]
+        M = K[0] - N[0]
+        xref = np.arange(N[0])
+        # fftshift convention
+        y = np.r_[np.zeros(int(np.ceil(M / 2))), xref, np.zeros(M // 2)]
+        x = up.crop_array(y, N, "around")
+        self.assertTrue(np.linalg.norm(x - xref) <= 1e-13)
+        # imfilter convention
+        y = np.r_[np.zeros(M // 2), xref, np.zeros(int(np.ceil(M / 2)))]
+        x = up.crop_array(y, N, "around", True)
+        self.assertTrue(np.linalg.norm(x - xref) <= 1e-13)
+
+    def test_pad_2d_around(self):
+        N = [5, 4]
+        K = [7, 8]
+        M = np.array([K[n] - N[n] for n in range(len(K))])
+        x = np.arange(N[0])[:, np.newaxis] * np.arange(N[1])[np.newaxis, :]
+        # fftshift convention
+        yref = np.zeros(K)
+        start_crop = [int(np.ceil(M[n] / 2)) for n in range(len(N))]
+        stop_crop = [int(np.floor(M[n] / 2)) for n in range(len(N))]
+        yref[tuple([np.s_[start_crop[n] : -stop_crop[n]] for n in range(len(N))])] = x
+        y = up.pad_array(x, K, "around")
+        self.assertTrue(np.linalg.norm(y - yref) <= 1e-13)
+        # imfilter convention
+        yref = np.zeros(K)
+        start_crop = [int(np.floor(M[n] / 2)) for n in range(len(N))]
+        stop_crop = [int(np.ceil(M[n] / 2)) for n in range(len(N))]
+        yref[tuple([np.s_[start_crop[n] : -stop_crop[n]] for n in range(len(N))])] = x
+        y = up.pad_array(x, K, "around", True)
+        self.assertTrue(np.linalg.norm(y - yref) <= 1e-13)
+
+    def test_crop_2d_around(self):
+        N = [5, 4]
+        K = [7, 8]
+        M = np.array([K[n] - N[n] for n in range(len(K))])
+        xref = np.arange(N[0])[:, np.newaxis] * np.arange(N[1])[np.newaxis, :]
+        # fftshift convention
+        y = np.zeros(K)
+        start_crop = [int(np.ceil(M[n] / 2)) for n in range(len(N))]
+        stop_crop = [int(np.floor(M[n] / 2)) for n in range(len(N))]
+        y[tuple([np.s_[start_crop[n] : -stop_crop[n]] for n in range(len(N))])] = xref
+        x = up.crop_array(y, N, "around")
+        self.assertTrue(np.linalg.norm(x - xref) <= 1e-13)
+        # imfilter convention
+        y = np.zeros(K)
+        start_crop = [int(np.floor(M[n] / 2)) for n in range(len(N))]
+        stop_crop = [int(np.ceil(M[n] / 2)) for n in range(len(N))]
+        y[tuple([np.s_[start_crop[n] : -stop_crop[n]] for n in range(len(N))])] = xref
+        x = up.crop_array(y, N, "around", True)
+        self.assertTrue(np.linalg.norm(x - xref) <= 1e-13)
+
+    def test_pad_2d_after(self):
+        N = [5, 4]
+        K = [7, 8]
+        x = np.arange(N[0])[:, np.newaxis] * np.arange(N[1])[np.newaxis, :]
+        yref = np.zeros(K)
+        yref[: N[0], : N[1]] = x
+        y = up.pad_array(x, K)
+        self.assertTrue(np.linalg.norm(y - yref) <= 1e-13)
+
+    def test_crop_2d_after(self):
+        N = [5, 4]
+        K = [7, 8]
+        xref = np.arange(N[0])[:, np.newaxis] * np.arange(N[1])[np.newaxis, :]
+        y = np.zeros(K)
+        y[: N[0], : N[1]] = xref
+        x = up.crop_array(y, N)
+        self.assertTrue(np.linalg.norm(x - xref) <= 1e-13)
+
+    # * testing pad_array_nd / crop_array_nd
+    def test_pad_array_nd_shape(self):
+        N = np.array([5, 4], dtype="i")
+        L = np.array([0, 2], dtype="i")
+        R = np.array([2, 0], dtype="i")
+        rng = np.random.default_rng(1234)
+        x = (1 + 1j) * rng.standard_normal(size=N)
+        y = up.pad_array_nd(x, L, R)
+
+        self.assertTrue(np.allclose(np.array(y.shape, dtype="i"), N + L + R))
+        self.assertTrue(np.isclose(np.linalg.norm(y), np.linalg.norm(x)))
+
+    def test_pad_array_nd_error(self):
+        x = np.array([5, 4], dtype="i")
+        L = [0, 2]
+        R = [2]  # last element in the slice
+
+        with self.assertRaises(ValueError):
+            up.pad_array_nd(x, L, R)
+
+    def test_crop_array_nd_error(self):
+        x = np.array([5, 4], dtype="i")
+        L = [0, 2]
+        R = [2]  # last element in the slice
+
+        with self.assertRaises(ValueError):
+            up.crop_array_nd(x, L, R)
+
+    def test_adjoint_pad_crop_array_nd(self):
+        N = [5, 4]
+        Lx = [0, 2]
+        Rx = [-2, None]
+        Ly = [0, 2]
+        Ry = [2, 0]
+        rng = np.random.default_rng(1234)
+        x = (1 + 1j) * rng.standard_normal(size=N)
+        Cx = up.crop_array_nd(x, Lx, Rx)
+        y = (1 + 1j) * rng.standard_normal(size=Cx.shape)
+        Cadj_y = up.pad_array_nd(y, Ly, Ry)
+
+        # check the 2 operators are adjoint
+        sp1 = np.sum(np.conj(Cx) * y)
+        sp2 = np.sum(np.conj(x) * Cadj_y)
+        self.assertTrue(np.abs(sp1 - sp2) <= 1e-13)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/tests/models/test_prox.py b/tests/models/test_prox.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7b6accd3878dd51183fb005b1e1bac28e7d9c25
--- /dev/null
+++ b/tests/models/test_prox.py
@@ -0,0 +1,62 @@
+import unittest
+
+import numpy as np
+
+import aaxda.models.prox as prox
+
+
+class TestProx(unittest.TestCase):
+    def setUp(self):
+        self.rng = np.random.default_rng(1234)
+        N = [5, 3]
+        self.x = self.rng.normal(loc=0.0, scale=1.0, size=N)
+        self.y = self.rng.normal(loc=0.0, scale=1.0, size=N)
+
+    def test_hard_thresholding(self):
+        z0 = prox.hard_thresholding(1.0, 0.0)
+        z1 = prox.hard_thresholding(1.0, 2.0)
+        self.assertTrue(np.isclose(z0, 1.0))
+        self.assertTrue(np.isclose(z1, 0.0))
+
+    def test_prox_nonegativity(self):
+        z = self.x.copy()
+        prox.prox_nonegativity(z)
+        self.assertTrue(np.allclose(z[z > 0], self.x[self.x > 0]))
+
+    def test_prox_kullback_leibler(self):
+        z = prox.prox_kullback_leibler(
+            np.zeros_like(self.x), np.zeros_like(self.y), lam=1
+        )
+        self.assertTrue(np.allclose(z, 0.0))
+
+    def test_prox_kullback_leibler_negative_regularizer(self):
+        with self.assertRaises(ValueError):
+            prox.prox_kullback_leibler(self.x, self.y, lam=-1)
+
+    def test_prox_l21_norm(self):
+        z = prox.prox_l21norm(np.zeros_like(self.x), lam=1, axis=0)
+        self.assertTrue(np.allclose(z, 0.0))
+
+    def test_prox_l21_norm_negative_regularizer(self):
+        with self.assertRaises(ValueError):
+            prox.prox_l21norm(self.x, lam=-1, axis=0)
+
+
+class TestFunctions(unittest.TestCase):
+    def setUp(self):
+        self.rng = np.random.default_rng(5678)
+        N = [5, 3]
+        self.x = self.rng.normal(loc=0.0, scale=1.0, size=N)
+        self.y = self.rng.normal(loc=0.0, scale=1.0, size=N)
+
+    def test_l21_norm(self):
+        z = prox.l21_norm(self.x, axis=0)
+        self.assertTrue(np.allclose(z, np.sum(np.sqrt(np.sum(self.x ** 2, axis=0)))))
+
+    def test_kullback_leibler(self):
+        z = prox.kullback_leibler(self.x, np.zeros_like(self.y))
+        self.assertTrue(np.allclose(z, np.sum(self.x)))
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/tests/models/test_tv.py b/tests/models/test_tv.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f4f478db4b2b26a3c98cdb61a974b7502d5931e
--- /dev/null
+++ b/tests/models/test_tv.py
@@ -0,0 +1,93 @@
+import numpy as np
+import pytest
+
+import aaxda.models.tv as tv
+
+
+@pytest.fixture
+def rng():
+    return np.random.default_rng(1234)
+
+
+@pytest.fixture
+def N():
+    return [5, 3]
+
+
+@pytest.fixture
+def x(N, rng):
+    return (1 + 1j) * rng.normal(loc=0.0, scale=1.0, size=N)
+
+
+@pytest.fixture
+def y(N, rng):
+    return (1 + 1j) * rng.normal(loc=0.0, scale=1.0, size=[2, *N])
+
+
+@pytest.fixture
+def Dx(x):
+    return tv.gradient_2d(x)
+
+
+@pytest.fixture
+def Dadjy(y):
+    return tv.gradient_2d_adjoint(y)
+
+
+def test_gradient2d_throws_exception():
+    with pytest.raises(AssertionError) as excinfo:
+        tv.gradient_2d(np.ones(5))
+    assert "gradient_2d: Invalid input, expected a 2d numpy array" in str(excinfo.value)
+
+
+def test_shape(x, Dx, y, Dadjy):
+    assert y.shape == Dx.shape
+    assert Dadjy.shape == x.shape
+
+
+def test_adjoint(x, Dx, y, Dadjy):
+    sp1 = np.sum(np.conj(Dx) * y)
+    sp2 = np.sum(np.conj(x) * Dadjy)
+    assert np.allclose(sp1, sp2)
+
+
+def test_tv(x, Dx):
+    tv_x = tv.tv(x)
+    assert np.allclose(tv_x, np.sum(np.sqrt(np.sum(np.abs(Dx) ** 2, axis=0))))
+
+
+def test_gradientnd(x):
+    u = tv.gradient_nd(x)
+    u_ref = tv.gradient_2d(x)
+    err = np.array([np.linalg.norm(u[k] - u_ref[k]) for k in range(len(u))])
+    assert np.allclose(err, 0)
+
+
+def test_gradientndadjoint(x, Dx, y, Dadjy):
+    Dx = tv.gradient_nd(x)
+    Dadjy = tv.gradient_nd_adjoint(y)
+    sp1 = np.sum(np.conj(Dx) * y)
+    sp2 = np.sum(np.conj(x) * Dadjy)
+    assert np.allclose(sp1, sp2)
+
+
+def test_tv_nd(x):
+    tv_nd_x = tv.tv_nd(x)
+    tv_x = tv.tv(x)
+    assert np.allclose(tv_x, tv_nd_x)
+
+
+def test_smooth_tv(x, Dx):
+    tv_x = tv.smooth_tv(x, np.finfo(float).eps)
+    assert np.allclose(
+        tv_x,
+        np.sum(np.sqrt(np.abs(Dx[0]) ** 2 + np.abs(Dx[1]) ** 2 + np.finfo(float).eps)),
+    )
+
+
+def test_gradient_smooth_tv(x, Dx):
+    grad_smooth_tv_x = tv.gradient_smooth_tv(x, np.finfo(float).eps)
+    c = np.sqrt(np.abs(Dx[0]) ** 2 + np.abs(Dx[1]) ** 2 + np.finfo(float).eps)
+    Dx = Dx / c
+
+    assert np.allclose(grad_smooth_tv_x, tv.gradient_2d_adjoint(Dx))
diff --git a/tests/mpi/__init__.py b/tests/mpi/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/mpi/test_mpi.py b/tests/mpi/test_mpi.py
new file mode 100644
index 0000000000000000000000000000000000000000..c99064a637973a0b858c72099cdbc4815f03881a
--- /dev/null
+++ b/tests/mpi/test_mpi.py
@@ -0,0 +1,254 @@
+"""Test basic MPI communications.
+"""
+import numpy as np
+import pytest
+from mpi4py import MPI
+
+import aaxda.utils.communications as ucomm
+
+# pytestmark = pytest.mark.mpi
+
+
+@pytest.fixture
+def comm():
+    return MPI.COMM_WORLD
+
+
+@pytest.fixture
+def size(comm):
+    return comm.Get_size()
+
+
+@pytest.fixture
+def rank(comm):
+    return comm.Get_rank()
+
+
+def test_communication_1d(comm, rank, size):
+    N = np.array([20, 20], dtype="i")  # overall image size
+    M = np.array([2, 2], dtype="i")  # size of convolution kernel
+    overlap_size = M - 1  # overlap size
+    ndims = 2
+    grid_size = [size, 1]
+
+    # * create Cartesian topology communicator
+    cartcomm = comm.Create_cart(dims=grid_size, periods=ndims * [False], reorder=False)
+    grid_size = np.array(grid_size, dtype="i")
+    ranknd = cartcomm.Get_coords(rank)
+    ranknd = np.array(ranknd, dtype=int)
+
+    local_tile = ucomm.local_split_range_nd(grid_size, N, ranknd)
+    tile_size = local_tile[:, 1] - local_tile[:, 0] + 1
+    local_indices = ucomm.local_split_range_nd(
+        grid_size, N, ranknd, overlap=overlap_size
+    )
+    facet_size = local_indices[:, 1] - local_indices[:, 0] + 1
+    facet = np.full(facet_size, rank + 1, dtype="i")
+
+    # * defining custom types to communicate non-contiguous arrays in the
+    # * directions considered
+    sendsubarray = []
+    recvsubarray = []
+    resizedsendsubarray = []
+    resizedrecvsubarray = []
+
+    sizes = facet_size  # size of local array
+    sM = sizes - overlap_size
+
+    # * comm. along each dimension
+    for k in range(ndims):
+        if overlap_size[k] > 0:
+            # send buffer
+            subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+            starts = np.r_[
+                np.zeros(k, dtype="i"),
+                sM[k],
+                np.zeros(ndims - k - 1, dtype="i"),
+            ]
+            sendsubarray.append(
+                MPI.INT.Create_subarray(sizes, subsizes, starts, order=MPI.ORDER_C)
+            )
+            resizedsendsubarray.append(
+                sendsubarray[-1].Create_resized(0, overlap_size[k] * facet.itemsize)
+            )
+            resizedsendsubarray[-1].Commit()
+
+            # recv buffer
+            subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+            starts = np.zeros(ndims, dtype="i")
+            recvsubarray.append(
+                MPI.INT.Create_subarray(sizes, subsizes, starts, order=MPI.ORDER_C)
+            )
+            resizedrecvsubarray.append(
+                recvsubarray[-1].Create_resized(0, overlap_size[k] * facet.itemsize)
+            )
+            resizedrecvsubarray[-1].Commit()
+        else:
+            resizedsendsubarray.append(None)
+            resizedrecvsubarray.append(None)
+
+    # * rank of processes involved in the communications
+    src = ndims * [MPI.PROC_NULL]
+    dest = ndims * [MPI.PROC_NULL]
+
+    for d in range(ndims):
+        if overlap_size[d] > 0:
+            [src[d], dest[d]] = cartcomm.Shift(d, 1)
+
+    # * communications
+    for d in range(ndims):
+        comm.Sendrecv(
+            [facet, 1, resizedsendsubarray[d]],
+            dest[d],
+            recvbuf=[facet, 1, resizedrecvsubarray[d]],
+            source=src[d],
+        )
+
+    (
+        dst,
+        sr,
+        isvalid_dest,
+        isvalid_src,
+        sizes_dest,
+        sizes_src,
+        start_src,
+    ) = ucomm.isvalid_communication(ranknd, grid_size, overlap_size, N)
+
+    s0 = np.zeros(ndims * (ndims - 1) + 1)
+
+    for k in range(sizes_src.shape[0]):
+        if isvalid_src[k]:
+            sel = tuple(
+                [
+                    np.s_[start_src[k, d] : start_src[k, d] + sizes_src[k, d]]
+                    for d in range(ndims)
+                ]
+            )
+            s0[k] = np.sum(facet[sel])
+
+    assert np.isclose(np.sum(facet), (rank + 1) * np.prod(tile_size) + np.sum(s0))
+
+    # print("Worker {}: {}".format(rank, facet))
+
+    comm.Barrier()
+    for d in range(ndims):
+        if overlap_size[d] > 0:
+            resizedsendsubarray[d].Free()
+            resizedrecvsubarray[d].Free()
+    comm.Barrier()
+
+
+def test_communication_2d(comm, size, rank):
+    N = np.array([20, 20], dtype="i")  # overall image size
+    M = np.array([2, 2], dtype="i")  # size of convolution kernel
+    overlap_size = M - 1  # overlap size
+    ndims = 2
+    grid_size = MPI.Compute_dims(size, ndims)
+
+    # * create Cartesian topology communicator
+    cartcomm = comm.Create_cart(dims=grid_size, periods=ndims * [False], reorder=False)
+    grid_size = np.array(grid_size, dtype="i")
+    ranknd = cartcomm.Get_coords(rank)
+    ranknd = np.array(ranknd, dtype=int)
+
+    local_tile = ucomm.local_split_range_nd(grid_size, N, ranknd)
+    tile_size = local_tile[:, 1] - local_tile[:, 0] + 1
+    local_indices = ucomm.local_split_range_nd(
+        grid_size, N, ranknd, overlap=overlap_size
+    )
+    facet_size = local_indices[:, 1] - local_indices[:, 0] + 1
+    facet = np.full(facet_size, rank + 1, dtype="i")
+
+    # * defining custom types to communicate non-contiguous arrays in the
+    # directions considered
+    sendsubarray = []
+    recvsubarray = []
+    resizedsendsubarray = []
+    resizedrecvsubarray = []
+
+    sizes = facet_size  # size of local array
+    sM = sizes - overlap_size
+
+    # * comm. along each dimension
+    for k in range(ndims):
+        if overlap_size[k] > 0:
+            # send buffer
+            subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+            starts = np.r_[
+                np.zeros(k, dtype="i"),
+                sM[k],
+                np.zeros(ndims - k - 1, dtype="i"),
+            ]
+            sendsubarray.append(
+                MPI.INT.Create_subarray(sizes, subsizes, starts, order=MPI.ORDER_C)
+            )
+            resizedsendsubarray.append(
+                sendsubarray[-1].Create_resized(0, overlap_size[k] * facet.itemsize)
+            )
+            resizedsendsubarray[-1].Commit()
+
+            # recv buffer
+            subsizes = np.r_[sizes[:k], overlap_size[k], sizes[k + 1 :]]
+            starts = np.zeros(ndims, dtype="i")
+            recvsubarray.append(
+                MPI.INT.Create_subarray(sizes, subsizes, starts, order=MPI.ORDER_C)
+            )
+            resizedrecvsubarray.append(
+                recvsubarray[-1].Create_resized(0, overlap_size[k] * facet.itemsize)
+            )
+            resizedrecvsubarray[-1].Commit()
+        else:
+            resizedsendsubarray.append(None)
+            resizedrecvsubarray.append(None)
+
+    # * rank of processes involved in the communications
+    src = ndims * [MPI.PROC_NULL]
+    dest = ndims * [MPI.PROC_NULL]
+
+    for d in range(ndims):
+        if overlap_size[d] > 0:
+            [src[d], dest[d]] = cartcomm.Shift(d, 1)
+
+    # * communications
+    for d in range(ndims):
+        comm.Sendrecv(
+            [facet, 1, resizedsendsubarray[d]],
+            dest[d],
+            recvbuf=[facet, 1, resizedrecvsubarray[d]],
+            source=src[d],
+        )
+
+    (
+        dst,
+        sr,
+        isvalid_dest,
+        isvalid_src,
+        sizes_dest,
+        sizes_src,
+        start_src,
+    ) = ucomm.isvalid_communication(ranknd, grid_size, overlap_size, N)
+
+    s0 = np.zeros(ndims * (ndims - 1) + 1)
+
+    for k in range(sizes_src.shape[0]):
+        if isvalid_src[k]:
+            sel = tuple(
+                [
+                    np.s_[start_src[k, d] : start_src[k, d] + sizes_src[k, d]]
+                    for d in range(ndims)
+                ]
+            )
+            s0[k] = np.sum(facet[sel])
+
+    assert np.isclose(np.sum(facet), (rank + 1) * np.prod(tile_size) + np.sum(s0))
+
+    comm.Barrier()
+    for d in range(ndims):
+        if overlap_size[d] > 0:
+            resizedsendsubarray[d].Free()
+            resizedrecvsubarray[d].Free()
+    comm.Barrier()
+
+
+# mpiexec -n 2 python -m unittest tests/test_communications.py
+# mpiexec -n 2 python -m unittest tests/test_mpi.py
diff --git a/tests/mpi/test_mpi_checkpoint.py b/tests/mpi/test_mpi_checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae3cabd6b5e72024266c3240fefbf459b41067a2
--- /dev/null
+++ b/tests/mpi/test_mpi_checkpoint.py
@@ -0,0 +1,226 @@
+"""Test distributed checkpoint functionalities.
+"""
+import numpy as np
+import pytest
+from mpi4py import MPI
+
+import aaxda.utils.communications as ucomm
+from aaxda.models.data import generate_2d_gaussian_kernel
+from aaxda.utils.checkpoint import DistributedCheckpoint
+
+pytestmark = pytest.mark.mpi
+
+
+@pytest.fixture
+def comm():
+    return MPI.COMM_WORLD
+
+
+# ! tmp_path seems to hang forever in MPI tests: why?
+@pytest.fixture
+def checkpointer(comm):  # tmp_path
+    return DistributedCheckpoint(
+        comm,
+        "mpi_checkpoint_test",  # tmp_path / "mpi_checkpoint_test",
+        cname="gzip",
+        clevel=5,
+        shuffle=1,
+    )
+
+
+@pytest.fixture
+def local_rng(comm):
+    rank = comm.Get_rank()
+    size = comm.Get_size()
+    if rank == 0:
+        ss = np.random.SeedSequence(1234)
+        # Spawn off nworkers child SeedSequences to pass to child processes.
+        child_seed = np.array(ss.spawn(size))
+    else:
+        child_seed = None
+
+    local_seed = comm.scatter(child_seed, root=0)
+    local_rng = np.random.default_rng(local_seed)
+    return local_rng
+
+
+@pytest.fixture
+def overlap_size():
+    return np.array([2, 2], dtype="i")
+
+
+@pytest.fixture
+def kernel():
+    return generate_2d_gaussian_kernel(3, 0.1)
+
+
+def test_distributed_checkpoint_rng(checkpointer, local_rng):
+    root_process = 0
+    fileid = "1"
+    rank = checkpointer.comm.Get_rank()
+
+    # * backup for rng state
+    checkpointer.save(fileid, [None], [None], [None], rng=local_rng)
+    a = local_rng.normal()
+
+    # print("Process {}: a={}".format(rank, a))
+
+    # * load (warm-start for rng state)
+    checkpointer.load(fileid, [None], local_rng)
+
+    b = local_rng.normal()
+
+    # print("Process {}: a={}, b={}, b=a? {}".format(rank, a, b, np.allclose(b, a)))
+
+    # Check a=b on all processes
+    local_consistency_check = np.array([np.allclose(b, a)])
+    global_consistency_check = np.array([False])
+    assert local_consistency_check[0]
+
+    # Reduce "local_consistency_check" on the root
+    checkpointer.comm.Reduce(
+        [local_consistency_check, MPI.C_BOOL],
+        [global_consistency_check, MPI.C_BOOL],
+        op=MPI.LAND,
+        root=root_process,
+    )
+
+    if rank == root_process:
+        assert global_consistency_check
+
+
+def test_distributed_checkpoint_from_root(checkpointer, local_rng, kernel):
+    root_process = 0
+    fileid = "_from_root"
+    rank = checkpointer.comm.Get_rank()
+
+    # save data from root process only
+    if rank == root_process:
+        a = 3
+    else:
+        a = None
+
+    checkpointer.save_from_process(
+        root_process,
+        fileid,
+        2 * [np.s_[:]],
+        [None, None],
+        rng=local_rng,
+        kernel=kernel,
+        a=a,
+        mode="w",
+    )
+
+    if rank == root_process:
+        b = local_rng.standard_normal(2)
+
+    loaded_values = checkpointer.load_from_process(
+        root_process,
+        fileid,
+        2 * [np.s_[:]],
+        local_rng,
+        "kernel",
+        "a",
+    )
+
+    if rank == root_process:
+        b2 = local_rng.standard_normal(2)
+        assert np.isclose(a, loaded_values["a"])
+        assert np.allclose(b2, b)
+        assert np.allclose(kernel, loaded_values["kernel"])
+
+
+def test_checkpoint_variables(checkpointer, local_rng, kernel, overlap_size):
+
+    root_process = 0
+    fileid = "2"
+    ndims = 2
+    grid_size = MPI.Compute_dims(checkpointer.comm.Get_size(), ndims)
+    rank = checkpointer.comm.Get_rank()
+    backward_overlap = True
+
+    # * Cartesian topology communicator and nD rank
+    cartcomm = checkpointer.comm.Create_cart(
+        dims=grid_size, periods=ndims * [False], reorder=False
+    )
+    ranknd = cartcomm.Get_coords(rank)
+
+    grid_size = np.array(grid_size, dtype="i")
+    ranknd = np.array(ranknd, dtype="i")
+
+    # full image size
+    image_size = np.array([20, 20], dtype="i")  # overall image size
+    # tile size
+    tile_pixels = ucomm.local_split_range_nd(
+        grid_size, image_size, ranknd, backward=backward_overlap
+    )
+    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+    # facet size (convolution)
+    facet_pixels = ucomm.local_split_range_nd(
+        grid_size,
+        image_size,
+        ranknd,
+        overlap=overlap_size,
+        backward=backward_overlap,
+    )
+    facet_size = facet_pixels[:, 1] - facet_pixels[:, 0] + 1
+    offset = facet_size - tile_size
+
+    # * setup useful slices
+    local_slice_tile = ucomm.get_local_slice(
+        ranknd, grid_size, offset, backward=backward_overlap
+    )  # extract tile from local conv facet
+
+    # indexing into global arrays
+    global_slice_tile = tuple(
+        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    )
+
+    # * local image
+    x = np.empty(facet_size, dtype="d")
+    x[local_slice_tile] = local_rng.standard_normal(size=tile_size)
+
+    # * save
+    # image (all processes save a chunk)
+    checkpointer.save(
+        fileid,
+        [image_size, np.ones(1, dtype="i")],
+        [global_slice_tile, np.s_[:]],
+        [None, None],
+        x=x[local_slice_tile],
+        a=1,
+    )
+    # kernel (from root process only)
+    checkpointer.save_from_process(
+        root_process, fileid, [np.s_[:]], [None], kernel=kernel
+    )
+
+    # * load
+    # image
+    loaded_dic = checkpointer.load(fileid, [global_slice_tile], None, "x")
+    # kernel
+    loaded_kernel = checkpointer.load_from_process(
+        root_process, fileid, [np.s_[:]], None, "kernel"
+    )
+
+    # * check consistency
+    local_consistency_x = np.array(np.allclose(x[local_slice_tile], loaded_dic["x"]))
+
+    # reduce "local_consistency_x" on the root
+    global_consistency_x = np.array([False])
+    checkpointer.comm.Reduce(
+        [local_consistency_x, MPI.C_BOOL],
+        [global_consistency_x, MPI.C_BOOL],
+        op=MPI.LAND,
+        root=root_process,
+    )
+
+    consistency_kernel = False
+    if rank == root_process:
+        consistency_kernel = np.allclose(loaded_kernel["kernel"], kernel)
+        assert consistency_kernel and global_consistency_x
+
+
+# mpiexec -n 2 python -m pytest tests/mpi/test_mpi_checkpoint.py
+# running all tests tagged with pytest.mark.mpi
+# mpiexec -n 2 python -m pytest -m mpi
diff --git a/tests/mpi/test_mpi_convolutions.py b/tests/mpi/test_mpi_convolutions.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee04eae3635a1207f04ef89aa6ff814163b96cb4
--- /dev/null
+++ b/tests/mpi/test_mpi_convolutions.py
@@ -0,0 +1,630 @@
+import h5py
+import numpy as np
+import pytest
+from mpi4py import MPI
+
+import aaxda.models.convolutions as uconv
+import aaxda.utils.communications as ucomm
+from aaxda.models.data import generate_2d_gaussian_kernel
+from aaxda.models.distributed_convolutions import (
+    calculate_local_data_size,
+    create_local_to_global_slice,
+)
+
+# TODO: simplify the different notation used
+
+pytestmark = pytest.mark.mpi
+
+
+@pytest.fixture
+def comm():
+    return MPI.COMM_WORLD
+
+
+@pytest.fixture
+def size(comm):
+    return comm.Get_size()
+
+
+@pytest.fixture
+def rank(comm):
+    return comm.Get_rank()
+
+
+@pytest.fixture
+def seed():
+    return 123
+
+
+@pytest.fixture
+def ndims():
+    return 2
+
+
+@pytest.fixture
+def image_size():
+    # overall image size
+    return np.array([20, 20], dtype="i")
+
+
+@pytest.fixture
+def kernel_size():
+    # size of convolution
+    return np.array([4, 4], dtype="i")
+
+
+@pytest.fixture
+def overlap_size(kernel_size):
+    # overlap size kernel
+    return kernel_size - 1
+
+
+@pytest.fixture
+def kernel(kernel_size):
+    # square convolution kernel
+    return generate_2d_gaussian_kernel(kernel_size[0], 0.1)
+
+
+def test_distributed_convolution_backward(
+    comm, rank, size, ndims, image_size, kernel, kernel_size, overlap_size, seed
+):
+    """Testing distributed convolution with backward facet overlap."""
+
+    data_size = image_size + overlap_size
+    grid_size = MPI.Compute_dims(size, ndims)
+
+    # * Cartesian topology communicator and nD rank
+    cartcomm = comm.Create_cart(dims=grid_size, periods=ndims * [False], reorder=False)
+    ranknd = cartcomm.Get_coords(rank)
+    ranknd = np.array(ranknd, dtype="i")
+    grid_size = np.array(grid_size, dtype="i")
+
+    # tile size
+    tile_pixels = ucomm.local_split_range_nd(
+        grid_size, image_size, ranknd, backward=True
+    )
+    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+
+    # data size
+    local_data_size = (
+        tile_size + (ranknd == grid_size - 1) * overlap_size
+    )  # ! backward overlap
+
+    # facet size (convolution)
+    facet_pixels = ucomm.local_split_range_nd(
+        grid_size, image_size, ranknd, overlap=overlap_size, backward=True
+    )
+    facet_size = facet_pixels[:, 1] - facet_pixels[:, 0] + 1
+    local_conv_size = facet_size + overlap_size
+    offset = facet_size - tile_size
+
+    # * setup useful slices
+    local_slice_tile = ucomm.get_local_slice(
+        ranknd, grid_size, offset, backward=True
+    )  # extract tile from local conv facet
+
+    local_slice_valid_conv = ucomm.slice_valid_coefficients(
+        ranknd, grid_size, overlap_size
+    )  # extract valid coefficients after local convolutions
+
+    # indexing into global arrays
+    global_slice_tile = tuple(
+        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    )
+    # ! needs to be changed bepending on direction of the overlap?
+    global_slice_data = tuple(
+        [
+            np.s_[tile_pixels[d, 0] : tile_pixels[d, 0] + local_data_size[d]]
+            for d in range(ndims)
+        ]
+    )
+
+    # * parallel rng
+    child_seed = None
+    if rank == 0:
+        ss = np.random.SeedSequence(seed)
+        child_seed = ss.spawn(size)
+    local_seed = comm.scatter(child_seed, root=0)
+    local_rng = np.random.default_rng(local_seed)
+
+    # * local image and kernel
+    local_image = np.empty(facet_size, dtype="d")
+    local_image[local_slice_tile] = local_rng.standard_normal(size=tile_size)
+    ft_kernel = np.fft.rfftn(kernel, local_conv_size)
+
+    # * setup communication scheme
+    # direct convolution
+    (dest, src, resizedsendsubarray, resizedrecvsubarray,) = ucomm.setup_border_update(
+        cartcomm,
+        ndims,
+        local_image.itemsize,
+        facet_size,
+        overlap_size,
+        backward=True,
+    )
+
+    # * setup auxiliary buffers
+    # communicate facet borders to neighbours (x)
+    ucomm.mpi_update_borders(
+        comm, local_image, dest, src, resizedsendsubarray, resizedrecvsubarray
+    )
+
+    # TODO: condition to be improved (do a function to free custom types)
+    # * free custom types
+    for d in range(ndims):
+        if overlap_size[d] > 1:
+            resizedsendsubarray[d].Free()
+            resizedrecvsubarray[d].Free()
+
+    local_data = uconv.fft_conv(local_image, ft_kernel, local_conv_size)[
+        local_slice_valid_conv
+    ]
+
+    # * save to an h5 test file (parallel writing)
+    f = h5py.File("convolution_test.h5", "w", driver="mpio", comm=comm)
+
+    dset_image = f.create_dataset("x", image_size, dtype="d")
+    dset_image[global_slice_tile] = local_image[local_slice_tile]
+
+    dset_data = f.create_dataset("y", data_size, dtype="d")
+    dset_data[global_slice_data] = local_data
+
+    dset_kernel = f.create_dataset("h", kernel_size, dtype="d")
+    if rank == 0:
+        dset_kernel[:] = kernel
+    f.close()
+    del f, dset_image, dset_data, dset_kernel
+
+    # * compare to full convolution
+    g = h5py.File("convolution_test.h5", "r+", driver="mpio", comm=comm)
+    if rank == 0:
+        y0 = g["y"][()]
+        x0 = g["x"][()]
+        h0 = g["h"][()]
+
+        H0 = np.fft.rfftn(h0, data_size)
+        y = uconv.fft_conv(x0, H0, data_size)
+        # print(np.allclose(y, y0))
+        assert np.allclose(y, y0)
+    g.close()
+
+
+def test_distributed_convolution_forward(
+    comm, rank, size, ndims, image_size, kernel, kernel_size, overlap_size, seed
+):
+    """Testing distributed convolution with forward facet overlap."""
+
+    data_size = image_size + overlap_size
+    grid_size = MPI.Compute_dims(size, ndims)
+
+    # * Cartesian topology communicator and nD rank
+    cartcomm = comm.Create_cart(dims=grid_size, periods=ndims * [False], reorder=False)
+    ranknd = cartcomm.Get_coords(rank)
+    ranknd = np.array(ranknd, dtype="i")
+    grid_size = np.array(grid_size, dtype="i")
+
+    # tile size
+    tile_pixels = ucomm.local_split_range_nd(
+        grid_size, image_size, ranknd, backward=False
+    )
+    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+
+    # data size
+    local_data_size = tile_size + (ranknd == 0) * overlap_size  # ! forward overlap
+    # local_data_size = (
+    #         tile_size + (ranknd == grid_size - 1) * overlap_size
+    #     )  # ! backward overlap
+
+    # facet size (convolution)
+    facet_pixels = ucomm.local_split_range_nd(
+        grid_size,
+        image_size,
+        ranknd,
+        overlap=overlap_size,
+        backward=False,
+    )
+    facet_size = facet_pixels[:, 1] - facet_pixels[:, 0] + 1
+    local_conv_size = facet_size + overlap_size
+    offset = facet_size - tile_size
+    # facet_size_adj = (
+    #     local_data_size + np.logical_and(ranknd > 0, grid_size > 1) * overlap_size
+    # )
+    # offset_adj = facet_size_adj - tile_size
+
+    # * setup useful slices
+    local_slice_tile = ucomm.get_local_slice(
+        ranknd, grid_size, offset, backward=False
+    )  # extract tile from local conv facet (backward overlap by default)
+    # slice_local_tile = tuple([np.s_[: tile_size[d]] for d in range(ndims)])  # ! forward overlap
+    local_slice_valid_conv = ucomm.slice_valid_coefficients(
+        ranknd, grid_size, overlap_size
+    )  # extract valid coefficients after local convolutions
+
+    # indexing into global arrays
+    global_slice_tile = tuple(
+        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    )
+    # ! needs to be changes bepending on direction of the overlap
+    # ! create function: global slice convolution
+    global_slice_data = tuple(
+        [
+            np.s_[
+                tile_pixels[d, 0]
+                + (ranknd[d] > 0) * overlap_size[d] : tile_pixels[d, 0]
+                + (ranknd[d] > 0) * overlap_size[d]
+                + local_data_size[d]
+            ]
+            for d in range(ndims)
+        ]
+    )
+
+    # * parallel rng
+    child_seed = None
+    if rank == 0:
+        ss = np.random.SeedSequence(seed)
+        child_seed = ss.spawn(size)
+    local_seed = comm.scatter(child_seed, root=0)
+    local_rng = np.random.default_rng(local_seed)
+
+    # * local image and kernel
+    local_image = np.empty(facet_size, dtype="d")
+    local_image[local_slice_tile] = local_rng.standard_normal(size=tile_size)
+    ft_kernel = np.fft.rfftn(kernel, local_conv_size)
+
+    # * setup communication scheme
+    # direct convolution
+    (dest, src, resizedsendsubarray, resizedrecvsubarray,) = ucomm.setup_border_update(
+        cartcomm,
+        ndims,
+        local_image.itemsize,
+        facet_size,
+        overlap_size,
+        backward=False,
+    )
+
+    # * setup auxiliary buffers
+    # communicate facet borders to neighbours (x)
+    ucomm.mpi_update_borders(
+        comm, local_image, dest, src, resizedsendsubarray, resizedrecvsubarray
+    )
+
+    # TODO: condition to be improved (do a function to free custom types)
+    # * free custom types
+    for d in range(ndims):
+        if overlap_size[d] > 1:
+            resizedsendsubarray[d].Free()
+            resizedrecvsubarray[d].Free()
+
+    local_data = uconv.fft_conv(local_image, ft_kernel, local_conv_size)[
+        local_slice_valid_conv
+    ]
+
+    # * save to an h5 test file (parallel writing)
+    f = h5py.File("convolution_test.h5", "w", driver="mpio", comm=comm)
+
+    dset_image = f.create_dataset("x", image_size, dtype="d")
+    dset_image[global_slice_tile] = local_image[local_slice_tile]
+
+    dset_data = f.create_dataset("y", data_size, dtype="d")
+    dset_data[global_slice_data] = local_data
+
+    dset_kernel = f.create_dataset("h", kernel_size, dtype="d")
+    if rank == 0:
+        dset_kernel[:] = kernel
+    f.close()
+    del f, dset_image, dset_data, dset_kernel
+
+    # * compare to full convolution
+    g = h5py.File("convolution_test.h5", "r+", driver="mpio", comm=comm)
+    if rank == 0:
+        y0 = g["y"][()]
+        x0 = g["x"][()]
+        h0 = g["h"][()]
+
+        H0 = np.fft.rfftn(h0, data_size)
+        y = uconv.fft_conv(x0, H0, data_size)
+        # print(np.allclose(y, y0))
+        assert np.allclose(y, y0)
+    g.close()
+
+
+def test_adjoint_distributed_convolution_backward(
+    comm, rank, size, ndims, image_size, kernel, kernel_size, overlap_size, seed
+):
+    """Testing adjoint distributed convolution with backward facet
+    overlap for the direct operator."""
+
+    data_size = image_size + overlap_size
+    grid_size = MPI.Compute_dims(size, ndims)
+
+    # * Cartesian topology communicator and nD rank
+    cartcomm = comm.Create_cart(dims=grid_size, periods=ndims * [False], reorder=False)
+    ranknd = cartcomm.Get_coords(rank)
+    ranknd = np.array(ranknd, dtype="i")
+    grid_size = np.array(grid_size, dtype="i")
+
+    # tile size
+    tile_pixels = ucomm.local_split_range_nd(
+        grid_size, image_size, ranknd, backward=True
+    )
+    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+
+    # data size
+    local_data_size = tile_size + (ranknd == grid_size - 1) * overlap_size
+
+    # facet size (adjoint convolution)
+    facet_size_adj = local_data_size + (ranknd < grid_size - 1) * overlap_size
+    local_conv_size = facet_size_adj + overlap_size
+    offset_adj = (facet_size_adj - local_data_size).astype(int)
+
+    # * useful slices
+    # indexing into local data
+    # ! for direct operator with backward overlap, adjoint operator has
+    # ! forward overlap (local slice data has forward overlap here)
+    local_slice_data = ucomm.get_local_slice(
+        ranknd, grid_size, offset_adj, backward=False
+    )
+
+    # indexing into global arrays
+    global_slice_tile = tuple(
+        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    )
+    global_slice_data = create_local_to_global_slice(
+        tile_pixels, ranknd, overlap_size, local_data_size, backward=True
+    )
+
+    # * parallel rng
+    child_seed = None
+    if rank == 0:
+        ss = np.random.SeedSequence(seed)
+        child_seed = ss.spawn(size)
+    local_seed = comm.scatter(child_seed, root=0)
+    local_rng = np.random.default_rng(local_seed)
+
+    # * local data and kernel
+    facet_adj = np.empty(facet_size_adj, dtype="d")
+    facet_adj[local_slice_data] = local_rng.standard_normal(size=local_data_size)
+    ft_kernel = np.fft.rfftn(kernel, local_conv_size)
+
+    # * setup communication scheme
+    # ! for the adjoint communication scheme, take overlap direction
+    # ! opposite to the one taken for the direct operator
+    (
+        dest_adj,
+        src_adj,
+        resizedsendsubarray_adj,
+        resizedrecvsubarray_adj,
+    ) = ucomm.setup_border_update(
+        cartcomm,
+        ndims,
+        facet_adj.itemsize,
+        facet_size_adj,
+        overlap_size,
+        backward=False,
+    )
+
+    # * communications
+    # adjoint operator
+    ucomm.mpi_update_borders(
+        comm,
+        facet_adj,
+        dest_adj,
+        src_adj,
+        resizedsendsubarray_adj,
+        resizedrecvsubarray_adj,
+    )
+
+    # * free custom types
+    for d in range(ndims):
+        if overlap_size[d] > 1:
+            resizedsendsubarray_adj[d].Free()
+            resizedrecvsubarray_adj[d].Free()
+
+    # * local linear convolution
+    local_x = uconv.fft_conv(facet_adj, np.conj(ft_kernel), local_conv_size)[
+        : tile_size[0], : tile_size[1]
+    ]
+
+    # * save to an h5 test file (parallel writing)
+    f = h5py.File("2d_convolution_adj_test.h5", "w", driver="mpio", comm=comm)
+
+    # TODO to be generalized to nD
+    dset = f.create_dataset("x", image_size, dtype="d")
+    dset[global_slice_tile] = local_x
+
+    yh5 = f.create_dataset("y", data_size, dtype="d")
+    yh5[global_slice_data] = facet_adj[local_slice_data]
+
+    hh5 = f.create_dataset("h", kernel_size, dtype="d")
+    if rank == 0:
+        hh5[:] = kernel
+    f.close()  # ! flush done automatically when closing the file
+    comm.Barrier()
+
+    # * test parallel load convolution result
+    g = h5py.File(
+        "2d_convolution_adj_test.h5", "r+", driver="mpio", comm=MPI.COMM_WORLD
+    )
+    dset = g["x"]
+    loaded_x = np.zeros(local_x.shape)
+    dset.read_direct(
+        loaded_x,
+        global_slice_tile,
+        (np.s_[:], np.s_[:]),
+    )
+
+    # * debugging section
+    # comm.Barrier()
+    # print(
+    #     "Process {}: |local_x - loaded_x| = {}".format(
+    #         rank, np.linalg.norm(local_x - loaded_x)
+    #     )
+    # )
+    g.close()
+
+    # * compare to full convolution
+    g = h5py.File("2d_convolution_adj_test.h5", "r+", driver="mpio", comm=comm)
+    if rank == 0:
+        y0 = g["y"][()]
+        x0 = g["x"][()]
+        h0 = g["h"][()]
+
+        H0 = np.conj(np.fft.rfftn(h0, data_size))
+        x = uconv.fft_conv(y0, H0, data_size)[: image_size[0], : image_size[1]]
+        consistency_test = np.allclose(x0, x)
+        # print("Consistency serial / parallel convolution: {}".format(consistency_test))
+        assert consistency_test
+    g.close()
+
+
+def test_adjoint_distributed_convolution_forward(
+    comm, rank, size, ndims, image_size, kernel, kernel_size, overlap_size, seed
+):
+    """Testing adjoint distributed convolution with forward facet
+    overlap for the direct operator."""
+
+    data_size = image_size + overlap_size
+    grid_size = MPI.Compute_dims(size, ndims)
+
+    # * Cartesian topology communicator and nD rank
+    cartcomm = comm.Create_cart(dims=grid_size, periods=ndims * [False], reorder=False)
+    ranknd = cartcomm.Get_coords(rank)
+    ranknd = np.array(ranknd, dtype="i")
+    grid_size = np.array(grid_size, dtype="i")
+
+    # tile size
+    tile_pixels = ucomm.local_split_range_nd(
+        grid_size, image_size, ranknd, backward=False
+    )
+    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+
+    # data size
+    local_data_size, facet_size, facet_size_adj = calculate_local_data_size(
+        tile_size, ranknd, overlap_size, grid_size, backward=False
+    )
+    local_conv_size = facet_size_adj + overlap_size
+    offset_adj = (facet_size_adj - local_data_size).astype(int)
+
+    # * useful slices
+    # indexing into local data
+    # ! for direct operator with forward overlap, adjoint operator has
+    # ! backward overlap
+    local_slice_data = ucomm.get_local_slice(
+        ranknd, grid_size, offset_adj, backward=True
+    )
+
+    # indexing into global arrays
+    global_slice_tile = tuple(
+        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    )
+    global_slice_data = create_local_to_global_slice(
+        tile_pixels, ranknd, overlap_size, local_data_size, backward=False
+    )
+
+    # * parallel rng
+    child_seed = None
+    if rank == 0:
+        ss = np.random.SeedSequence(seed)
+        child_seed = ss.spawn(size)
+    local_seed = comm.scatter(child_seed, root=0)
+    local_rng = np.random.default_rng(local_seed)
+
+    # * local data and kernel
+    facet_adj = np.empty(facet_size_adj, dtype="d")
+    facet_adj[local_slice_data] = local_rng.standard_normal(size=local_data_size)
+    ft_kernel = np.fft.rfftn(kernel, local_conv_size)
+
+    # * setup communication scheme
+    # ! for the adjoint communication scheme, take overlap direction
+    # ! opposite to the one taken for the direct operator
+    (
+        dest_adj,
+        src_adj,
+        resizedsendsubarray_adj,
+        resizedrecvsubarray_adj,
+    ) = ucomm.setup_border_update(
+        cartcomm,
+        ndims,
+        facet_adj.itemsize,
+        facet_size_adj,
+        overlap_size,
+        backward=True,
+    )
+
+    # * communications
+    # adjoint operator
+    ucomm.mpi_update_borders(
+        comm,
+        facet_adj,
+        dest_adj,
+        src_adj,
+        resizedsendsubarray_adj,
+        resizedrecvsubarray_adj,
+    )
+
+    # * free custom types
+    for d in range(ndims):
+        if overlap_size[d] > 1:
+            resizedsendsubarray_adj[d].Free()
+            resizedrecvsubarray_adj[d].Free()
+
+    # * local linear convolution
+    local_x = uconv.fft_conv(facet_adj, np.conj(ft_kernel), local_conv_size)[
+        : tile_size[0], : tile_size[1]
+    ]
+
+    # * save to an h5 test file (parallel writing)
+    f = h5py.File("2d_convolution_adj_test.h5", "w", driver="mpio", comm=comm)
+
+    # TODO to be generalized to nD
+    dset = f.create_dataset("x", image_size, dtype="d")
+    dset[global_slice_tile] = local_x
+
+    yh5 = f.create_dataset("y", data_size, dtype="d")
+    yh5[global_slice_data] = facet_adj[local_slice_data]
+
+    hh5 = f.create_dataset("h", kernel_size, dtype="d")
+    if rank == 0:
+        hh5[:] = kernel
+    f.close()  # ! flush done automatically when closing the file
+    comm.Barrier()
+
+    # * test parallel load convolution result
+    g = h5py.File(
+        "2d_convolution_adj_test.h5", "r+", driver="mpio", comm=MPI.COMM_WORLD
+    )
+    dset = g["x"]
+    loaded_x = np.zeros(local_x.shape)
+    dset.read_direct(
+        loaded_x,
+        global_slice_tile,
+        (np.s_[:], np.s_[:]),
+    )
+
+    # * debugging section
+    # comm.Barrier()
+    # print(
+    #     "Process {}: |local_x - loaded_x| = {}".format(
+    #         rank, np.linalg.norm(local_x - loaded_x)
+    #     )
+    # )
+    g.close()
+
+    # * compare to full convolution
+    g = h5py.File("2d_convolution_adj_test.h5", "r+", driver="mpio", comm=comm)
+    if rank == 0:
+        y0 = g["y"][()]
+        x0 = g["x"][()]
+        h0 = g["h"][()]
+
+        H0 = np.conj(np.fft.rfftn(h0, data_size))
+        x = uconv.fft_conv(y0, H0, data_size)[: image_size[0], : image_size[1]]
+        consistency_test = np.allclose(x0, x)
+        # print("Consistency serial / parallel convolution: {}".format(consistency_test))
+        assert consistency_test
+    g.close()
+
+
+# mpiexec -n 2 python -m unittest tests/test_mpi_convolutions.py
+# mpiexec -n 2 python -m mpi4py tests/test_mpi_convolutions.py
+# mpiexec -n numprocs python -m mpi4py pyfile [arg] ...
diff --git a/tests/mpi/test_mpi_inpainting_model.py b/tests/mpi/test_mpi_inpainting_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a15bfdc59e5009edf3ccae2bd6e88510e49e065
--- /dev/null
+++ b/tests/mpi/test_mpi_inpainting_model.py
@@ -0,0 +1,208 @@
+"""Test MPI model objects (SyncInpaintingModel).
+"""
+
+import h5py
+import numpy as np
+import pytest
+from mpi4py import MPI
+
+import aaxda.utils.communications as ucomm
+from aaxda.models.data import generate_random_mask
+from aaxda.models.models import SyncInpaintingModel
+
+pytestmark = pytest.mark.mpi
+
+
+@pytest.fixture
+def comm():
+    return MPI.COMM_WORLD
+
+
+@pytest.fixture
+def size(comm):
+    return comm.Get_size()
+
+
+@pytest.fixture
+def rank(comm):
+    return comm.Get_rank()
+
+
+@pytest.fixture
+def seed():
+    return 123
+
+
+@pytest.fixture
+def ndims():
+    return 2
+
+
+@pytest.fixture
+def grid_size(ndims, size):
+    return MPI.Compute_dims(size, ndims)
+
+
+@pytest.fixture
+def ranknd(comm, rank, grid_size, ndims):
+    # * Cartesian communicator and nd rank
+    cartcomm = comm.Create_cart(
+        dims=grid_size,
+        periods=ndims * [False],
+        reorder=False,
+    )
+    return np.array(cartcomm.Get_coords(rank), dtype="i")
+
+
+@pytest.fixture
+def image_size(ndims):
+    """Full image size."""
+    return np.array(ndims * [20], dtype="i")
+
+
+@pytest.fixture
+def tile_pixels(image_size, grid_size, ranknd):
+    """Index of the pixels from the full image involved in the local image tile."""
+    return ucomm.local_split_range_nd(grid_size, image_size, ranknd)
+
+
+@pytest.fixture
+def tile_size(tile_pixels):
+    """Size of local image tiles."""
+    return tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+
+
+@pytest.fixture
+def local_rng(comm, size, seed):
+    child_seed = None
+    if rank == 0:
+        ss = np.random.SeedSequence(seed)
+        child_seed = ss.spawn(size)
+    local_seed = comm.scatter(child_seed, root=0)
+    return np.random.default_rng(local_seed)
+
+
+@pytest.fixture
+def local_mask(tile_size, local_rng):
+    """Local part of the full inpainting mask."""
+    return generate_random_mask(tile_size, 0.4, local_rng)
+
+
+def test_SyncInpaintingModel_throws_exceptions(
+    comm, size, ndims, image_size, grid_size
+):
+    """Testing errors thrown by the object."""
+
+    # checking number of elements in image_size is consistent with
+    # data_size
+    m_ = np.ones(1)
+    g_ = np.array(grid_size, dtype="i")
+    with pytest.raises(ValueError) as excinfo:
+        SyncInpaintingModel(image_size, m_, comm, g_)
+    assert "local mask and image tile should have the same size" in str(excinfo.value)
+
+
+def test_SyncInpaintingModel_2d(
+    comm, size, ndims, image_size, grid_size, local_mask, tile_pixels, local_rng
+):
+    """Testing 2d distributed inpainting model."""
+    g_ = np.array(grid_size, dtype="i")
+
+    # * distributed inpainting model
+    inpainting_model = SyncInpaintingModel(image_size, local_mask, comm, g_)
+
+    # * setup useful slices
+    # indexing into global arrays
+    global_slice_tile = tuple(
+        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    )
+
+    # * local image
+    local_image = local_rng.standard_normal(size=inpainting_model.tile_size)
+
+    # * applying distributed direct operator
+    local_data = inpainting_model.apply_direct_operator(local_image)
+
+    # * save to an h5 test file (parallel writing)
+    f = h5py.File("inpainting_test.h5", "w", driver="mpio", comm=comm)
+
+    dset_image = f.create_dataset("x", image_size, dtype="d")
+    dset_image[global_slice_tile] = local_image
+
+    # TODO: see if this step can be simplified
+    dset_data = f.create_dataset("y", image_size, dtype="d")
+    local_data_facet = np.zeros(inpainting_model.tile_size, dtype=local_data.dtype)
+    local_data_facet[inpainting_model.mask_id] = local_data
+    dset_data[global_slice_tile] = local_data_facet
+
+    dset_mask = f.create_dataset("mask", image_size, dtype="d")
+    dset_mask[global_slice_tile] = local_mask
+    f.close()
+
+    del f, dset_image, dset_data, dset_mask
+
+    # * compare to full inpainting operator
+    g = h5py.File("inpainting_test.h5", "r+", driver="mpio", comm=comm)
+    if rank == 0:
+        y0 = g["y"]
+        x0 = g["x"][()]
+        mask = g["mask"][()]
+        y0 = y0[mask]
+        y = x0[mask]
+        assert np.allclose(y, y0)
+    g.close()
+
+
+def test_adjoint_distributed_inpainting_2d(
+    comm, size, ndims, image_size, grid_size, local_mask, tile_pixels, local_rng
+):
+    """Testing adjoint distributed convolution with backward facet
+    overlap for the direct operator."""
+    g_ = np.array(grid_size, dtype="i")
+
+    # * distributed inpainting model
+    inpainting_model = SyncInpaintingModel(image_size, local_mask, comm, g_)
+
+    # * setup useful slices
+    # indexing into global arrays
+    global_slice_tile = tuple(
+        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    )
+
+    # * local data
+    local_data = local_rng.standard_normal(size=inpainting_model.local_data_size)
+
+    # * applying distributed adjoint operator
+    local_image = inpainting_model.apply_adjoint_operator(local_data)
+
+    # * save to an h5 test file (parallel writing)
+    f = h5py.File("inpainting_adjoint_test.h5", "w", driver="mpio", comm=comm)
+
+    dset_image = f.create_dataset("x", image_size, dtype="d")
+    dset_image[global_slice_tile] = local_image
+
+    dset_data = f.create_dataset("y", image_size, dtype="d")
+    local_data_facet = np.zeros(inpainting_model.tile_size, dtype=local_data.dtype)
+    local_data_facet[inpainting_model.mask_id] = local_data
+    dset_data[global_slice_tile] = local_data_facet
+
+    dset_mask = f.create_dataset("mask", image_size, dtype="d")
+    dset_mask[global_slice_tile] = local_mask
+    f.close()
+
+    del f, dset_image, dset_data, dset_mask
+
+    # * compare to full inpainting operator
+    g = h5py.File("inpainting_adjoint_test.h5", "r+", driver="mpio", comm=comm)
+    if rank == 0:
+        y0 = g["y"][()]
+        x0 = g["x"][()]
+        mask = g["mask"][()]
+
+        y0 = y0[mask]
+        y = x0[mask]
+        assert np.allclose(y, y0)
+    g.close()
+
+
+# mpiexec -n 2 python -m pytest tests/mpi/test_mpi_models_inpainting.py
diff --git a/tests/mpi/test_mpi_models.py b/tests/mpi/test_mpi_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bec0e1b78c13f47a4896b69bf84d4cb01c3cf04
--- /dev/null
+++ b/tests/mpi/test_mpi_models.py
@@ -0,0 +1,565 @@
+"""Test MPI model objects (SyncConvModel).
+"""
+
+import h5py
+import numpy as np
+import pytest
+from mpi4py import MPI
+
+import aaxda.models.convolutions as uconv
+import aaxda.utils.communications as ucomm
+from aaxda.models.data import generate_2d_gaussian_kernel
+from aaxda.models.distributed_convolutions import create_local_to_global_slice
+from aaxda.models.models import SyncConvModel
+
+pytestmark = pytest.mark.mpi
+
+
+@pytest.fixture
+def comm():
+    return MPI.COMM_WORLD
+
+
+@pytest.fixture
+def size(comm):
+    return comm.Get_size()
+
+
+@pytest.fixture
+def rank(comm):
+    return comm.Get_rank()
+
+
+@pytest.fixture
+def seed():
+    return 123
+
+
+@pytest.fixture
+def ndims():
+    return 2
+
+
+@pytest.fixture
+def image_size():
+    # overall image size
+    return np.array([20, 20], dtype="i")
+
+
+@pytest.fixture
+def kernel_size():
+    # size of convolution
+    return np.array([4, 4], dtype="i")
+
+
+@pytest.fixture
+def overlap_size(kernel_size):
+    # overlap size kernel
+    return kernel_size - 1
+
+
+@pytest.fixture
+def kernel(kernel_size):
+    # square convolution kernel
+    return generate_2d_gaussian_kernel(kernel_size[0], 0.1)
+
+
+def test_SyncConvModel_throws_exceptions(
+    comm, size, ndims, image_size, overlap_size, kernel, kernel_size
+):
+    """Testing errors thrown by the object."""
+    data_size = image_size + overlap_size
+    grid_size_ = MPI.Compute_dims(size, ndims)
+
+    # checking number of elements in image_size is consistent with
+    # data_size
+    with pytest.raises(ValueError) as excinfo:
+        SyncConvModel(
+            image_size[1:],
+            data_size,
+            kernel,
+            comm,
+            grid_size_,
+            kernel.itemsize,
+            False,
+            direction=False,
+        )
+    assert "image_size and data_size must have the same number of elements" in str(
+        excinfo.value
+    )
+
+    # checking kernel has a number of axis consistent with image_size and
+    # data_size
+    with pytest.raises(ValueError) as excinfo:
+        SyncConvModel(
+            image_size,
+            data_size,
+            kernel[0, :],
+            comm,
+            grid_size_,
+            kernel.itemsize,
+            False,
+            direction=False,
+        )
+    assert "kernel should have ndims = len(image_size) dimensions" in str(excinfo.value)
+
+    # checking kernel contains real entries
+    with pytest.raises(TypeError) as excinfo:
+        SyncConvModel(
+            image_size,
+            data_size,
+            (1 + 1j) * kernel,
+            comm,
+            grid_size_,
+            kernel.itemsize,
+            False,
+            direction=False,
+        )
+    assert "only real-valued kernel supported" in str(excinfo.value)
+
+
+def test_SyncConvModel_linear_2d_backward(
+    comm, size, ndims, image_size, kernel, kernel_size, overlap_size
+):
+    """Testing 2d distributed linear convolution model with backward facet
+    overlap."""
+    data_size = image_size + overlap_size
+    circular_boundaries = False
+    backward = True
+    grid_size_ = MPI.Compute_dims(size, ndims)
+    grid_size = np.array(grid_size_, dtype="i")
+
+    # * distributed convolution model
+    conv_model = SyncConvModel(
+        image_size,
+        data_size,
+        kernel,
+        comm,
+        grid_size_,
+        kernel.itemsize,
+        circular_boundaries,
+        direction=backward,
+    )
+
+    # tile size
+    tile_pixels = ucomm.local_split_range_nd(
+        grid_size, image_size, conv_model.ranknd, backward=backward
+    )
+    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+
+    # data size
+    # local_data_size, facet_size, facet_size_adj = calculate_local_data_size(
+    #     tile_size, conv_model.ranknd, overlap_size, grid_size, backward=backward
+    # )
+
+    # facet size (convolution)
+    facet_pixels = ucomm.local_split_range_nd(
+        grid_size,
+        image_size,
+        conv_model.ranknd,
+        overlap=overlap_size,
+        backward=backward,
+    )
+    facet_size = facet_pixels[:, 1] - facet_pixels[:, 0] + 1
+    offset = facet_size - tile_size
+
+    # * setup useful slices
+    local_slice_tile = ucomm.get_local_slice(
+        conv_model.ranknd, grid_size, offset, backward=backward
+    )  # extract tile from local conv facet
+    # indexing into global arrays
+    global_slice_tile = tuple(
+        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    )
+    global_slice_data = create_local_to_global_slice(
+        tile_pixels,
+        conv_model.ranknd,
+        overlap_size,
+        conv_model.local_data_size,
+        backward=backward,
+    )
+
+    # * parallel rng
+    child_seed = None
+    if rank == 0:
+        ss = np.random.SeedSequence(seed)
+        child_seed = ss.spawn(size)
+    local_seed = comm.scatter(child_seed, root=0)
+    local_rng = np.random.default_rng(local_seed)
+
+    # * local image and kernel
+    local_image = np.empty(conv_model.facet_size, dtype="d")
+    local_image[local_slice_tile] = local_rng.standard_normal(size=tile_size)
+
+    # * applying distributed direct operator
+    local_data = conv_model.apply_direct_operator(local_image)
+
+    # * save to an h5 test file (parallel writing)
+    f = h5py.File("convolution_test.h5", "w", driver="mpio", comm=comm)
+
+    dset_image = f.create_dataset("x", image_size, dtype="d")
+    dset_image[global_slice_tile] = local_image[local_slice_tile]
+
+    dset_data = f.create_dataset("y", data_size, dtype="d")
+    dset_data[global_slice_data] = local_data
+
+    dset_kernel = f.create_dataset("h", kernel_size, dtype="d")
+    if rank == 0:
+        dset_kernel[:] = kernel
+    f.close()
+    del f, dset_image, dset_data, dset_kernel
+
+    # * compare to full convolution
+    g = h5py.File("convolution_test.h5", "r+", driver="mpio", comm=comm)
+    if rank == 0:
+        y0 = g["y"][()]
+        x0 = g["x"][()]
+        h0 = g["h"][()]
+
+        H0 = np.fft.rfftn(h0, data_size)
+        y = uconv.fft_conv(x0, H0, data_size)
+        # print(np.allclose(y, y0))
+        assert np.allclose(y, y0)
+    g.close()
+
+
+def test_SyncConvModel_linear_2d_forward(
+    comm, size, ndims, image_size, kernel, kernel_size, overlap_size
+):
+    """Testing 2d distributed linear convolution model with forward facet
+    overlap."""
+    data_size = image_size + overlap_size
+    circular_boundaries = False
+    backward = False
+    grid_size_ = MPI.Compute_dims(size, ndims)
+    grid_size = np.array(grid_size_, dtype="i")
+
+    # * distributed convolution model
+    conv_model = SyncConvModel(
+        image_size,
+        data_size,
+        kernel,
+        comm,
+        grid_size_,
+        kernel.itemsize,
+        circular_boundaries,
+        direction=backward,
+    )
+
+    # tile size
+    tile_pixels = ucomm.local_split_range_nd(
+        grid_size, image_size, conv_model.ranknd, backward=backward
+    )
+    tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+
+    # data size
+    # local_data_size, facet_size, facet_size_adj = calculate_local_data_size(
+    #     tile_size, conv_model.ranknd, overlap_size, grid_size, backward=backward
+    # )
+
+    # facet size (convolution)
+    facet_pixels = ucomm.local_split_range_nd(
+        grid_size,
+        image_size,
+        conv_model.ranknd,
+        overlap=overlap_size,
+        backward=backward,
+    )
+    facet_size = facet_pixels[:, 1] - facet_pixels[:, 0] + 1
+    offset = facet_size - tile_size
+
+    # * setup useful slices
+    local_slice_tile = ucomm.get_local_slice(
+        conv_model.ranknd, grid_size, offset, backward=backward
+    )  # extract tile from local conv facet
+    # indexing into global arrays
+    global_slice_tile = tuple(
+        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    )
+    global_slice_data = create_local_to_global_slice(
+        tile_pixels,
+        conv_model.ranknd,
+        overlap_size,
+        conv_model.local_data_size,
+        backward=backward,
+    )
+
+    # * parallel rng
+    child_seed = None
+    if rank == 0:
+        ss = np.random.SeedSequence(seed)
+        child_seed = ss.spawn(size)
+    local_seed = comm.scatter(child_seed, root=0)
+    local_rng = np.random.default_rng(local_seed)
+
+    # * local image and kernel
+    local_image = np.empty(conv_model.facet_size, dtype="d")
+    local_image[local_slice_tile] = local_rng.standard_normal(size=tile_size)
+
+    # * applying distributed direct operator
+    local_data = conv_model.apply_direct_operator(local_image)
+
+    # * save to an h5 test file (parallel writing)
+    f = h5py.File("convolution_test.h5", "w", driver="mpio", comm=comm)
+
+    dset_image = f.create_dataset("x", image_size, dtype="d")
+    dset_image[global_slice_tile] = local_image[local_slice_tile]
+
+    dset_data = f.create_dataset("y", data_size, dtype="d")
+    dset_data[global_slice_data] = local_data
+
+    dset_kernel = f.create_dataset("h", kernel_size, dtype="d")
+    if rank == 0:
+        dset_kernel[:] = kernel
+    f.close()
+    del f, dset_image, dset_data, dset_kernel
+
+    # * compare to full convolution
+    g = h5py.File("convolution_test.h5", "r+", driver="mpio", comm=comm)
+    if rank == 0:
+        y0 = g["y"][()]
+        x0 = g["x"][()]
+        h0 = g["h"][()]
+
+        H0 = np.fft.rfftn(h0, data_size)
+        y = uconv.fft_conv(x0, H0, data_size)
+        # print(np.allclose(y, y0))
+        assert np.allclose(y, y0)
+    g.close()
+
+
+def test_adjoint_distributed_convolution_backward(
+    comm, size, ndims, image_size, kernel, kernel_size, overlap_size
+):
+    """Testing adjoint distributed convolution with backward facet
+    overlap for the direct operator."""
+    data_size = image_size + overlap_size
+    circular_boundaries = False
+    backward = True
+    grid_size_ = MPI.Compute_dims(size, ndims)
+    grid_size = np.array(grid_size_, dtype="i")
+
+    # * distributed convolution model
+    conv_model = SyncConvModel(
+        image_size,
+        data_size,
+        kernel,
+        comm,
+        grid_size_,
+        kernel.itemsize,
+        circular_boundaries,
+        direction=backward,
+    )
+
+    # tile size
+    tile_pixels = ucomm.local_split_range_nd(
+        grid_size, image_size, conv_model.ranknd, backward=backward
+    )
+    # tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+
+    # * setup useful slices
+    # indexing into global arrays
+    global_slice_tile = tuple(
+        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    )
+    global_slice_data = create_local_to_global_slice(
+        tile_pixels,
+        conv_model.ranknd,
+        overlap_size,
+        conv_model.local_data_size,
+        backward=backward,
+    )
+    # indexing into local data
+    # ! for direct operator with backward overlap, adjoint operator has
+    # ! forward overlap (local slice data has forward overlap here)
+    local_slice_data = ucomm.get_local_slice(
+        conv_model.ranknd, grid_size, conv_model.offset_adj, backward=not backward
+    )
+
+    # * parallel rng
+    child_seed = None
+    if rank == 0:
+        ss = np.random.SeedSequence(seed)
+        child_seed = ss.spawn(size)
+    local_seed = comm.scatter(child_seed, root=0)
+    local_rng = np.random.default_rng(local_seed)
+
+    # * local data and kernel
+    facet_adj = np.empty(conv_model.facet_size_adj, dtype="d")
+    facet_adj[local_slice_data] = local_rng.standard_normal(
+        size=conv_model.local_data_size
+    )
+
+    # * apply distributed adjoint operator
+    local_x = conv_model.apply_adjoint_operator(facet_adj)
+
+    # * save to an h5 test file (parallel writing)
+    f = h5py.File("2d_convolution_adj_test.h5", "w", driver="mpio", comm=comm)
+
+    # TODO to be generalized to nD
+    dset = f.create_dataset("x", image_size, dtype="d")
+    dset[global_slice_tile] = local_x
+
+    yh5 = f.create_dataset("y", data_size, dtype="d")
+    yh5[global_slice_data] = facet_adj[local_slice_data]
+
+    hh5 = f.create_dataset("h", kernel_size, dtype="d")
+    if rank == 0:
+        hh5[:] = kernel
+    f.close()  # ! flush done automatically when closing the file
+    comm.Barrier()
+
+    # * test parallel load convolution result
+    g = h5py.File(
+        "2d_convolution_adj_test.h5", "r+", driver="mpio", comm=MPI.COMM_WORLD
+    )
+    dset = g["x"]
+    loaded_x = np.zeros(local_x.shape)
+    dset.read_direct(
+        loaded_x,
+        global_slice_tile,
+        (np.s_[:], np.s_[:]),
+    )
+
+    # * debugging section
+    # comm.Barrier()
+    # print(
+    #     "Process {}: |local_x - loaded_x| = {}".format(
+    #         rank, np.linalg.norm(local_x - loaded_x)
+    #     )
+    # )
+    g.close()
+
+    # * compare to full convolution
+    g = h5py.File("2d_convolution_adj_test.h5", "r+", driver="mpio", comm=comm)
+    if rank == 0:
+        y0 = g["y"][()]
+        x0 = g["x"][()]
+        h0 = g["h"][()]
+
+        H0 = np.conj(np.fft.rfftn(h0, data_size))
+        x = uconv.fft_conv(y0, H0, data_size)[: image_size[0], : image_size[1]]
+        consistency_test = np.allclose(x0, x)
+        # print("Consistency serial / parallel convolution: {}".format(consistency_test))
+        assert consistency_test
+    g.close()
+
+
+def test_adjoint_distributed_convolution_forward(
+    comm, size, ndims, image_size, kernel, kernel_size, overlap_size
+):
+    """Testing adjoint distributed convolution with backward facet
+    overlap for the direct operator."""
+    data_size = image_size + overlap_size
+    circular_boundaries = False
+    backward = False
+    grid_size_ = MPI.Compute_dims(size, ndims)
+    grid_size = np.array(grid_size_, dtype="i")
+
+    # * distributed convolution model
+    conv_model = SyncConvModel(
+        image_size,
+        data_size,
+        kernel,
+        comm,
+        grid_size_,
+        kernel.itemsize,
+        circular_boundaries,
+        direction=backward,
+    )
+
+    # tile size
+    tile_pixels = ucomm.local_split_range_nd(
+        grid_size, image_size, conv_model.ranknd, backward=backward
+    )
+    # tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+
+    # * setup useful slices
+    # indexing into global arrays
+    global_slice_tile = tuple(
+        [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+    )
+    global_slice_data = create_local_to_global_slice(
+        tile_pixels,
+        conv_model.ranknd,
+        overlap_size,
+        conv_model.local_data_size,
+        backward=backward,
+    )
+    # indexing into local data
+    # ! for direct operator with forward overlap, adjoint operator has
+    # ! backward overlap
+    local_slice_data = ucomm.get_local_slice(
+        conv_model.ranknd, grid_size, conv_model.offset_adj, backward=not backward
+    )
+
+    # * parallel rng
+    child_seed = None
+    if rank == 0:
+        ss = np.random.SeedSequence(seed)
+        child_seed = ss.spawn(size)
+    local_seed = comm.scatter(child_seed, root=0)
+    local_rng = np.random.default_rng(local_seed)
+
+    # * local data and kernel
+    facet_adj = np.empty(conv_model.facet_size_adj, dtype="d")
+    facet_adj[local_slice_data] = local_rng.standard_normal(
+        size=conv_model.local_data_size
+    )
+
+    # * apply distributed adjoint operator
+    local_x = conv_model.apply_adjoint_operator(facet_adj)
+
+    # * save to an h5 test file (parallel writing)
+    f = h5py.File("2d_convolution_adj_test.h5", "w", driver="mpio", comm=comm)
+
+    # TODO to be generalized to nD
+    dset = f.create_dataset("x", image_size, dtype="d")
+    dset[global_slice_tile] = local_x
+
+    yh5 = f.create_dataset("y", data_size, dtype="d")
+    yh5[global_slice_data] = facet_adj[local_slice_data]
+
+    hh5 = f.create_dataset("h", kernel_size, dtype="d")
+    if rank == 0:
+        hh5[:] = kernel
+    f.close()  # ! flush done automatically when closing the file
+    comm.Barrier()
+
+    # * test parallel load convolution result
+    g = h5py.File(
+        "2d_convolution_adj_test.h5", "r+", driver="mpio", comm=MPI.COMM_WORLD
+    )
+    dset = g["x"]
+    loaded_x = np.zeros(local_x.shape)
+    dset.read_direct(
+        loaded_x,
+        global_slice_tile,
+        (np.s_[:], np.s_[:]),
+    )
+
+    # * debugging section
+    # comm.Barrier()
+    # print(
+    #     "Process {}: |local_x - loaded_x| = {}".format(
+    #         rank, np.linalg.norm(local_x - loaded_x)
+    #     )
+    # )
+    g.close()
+
+    # * compare to full convolution
+    g = h5py.File("2d_convolution_adj_test.h5", "r+", driver="mpio", comm=comm)
+    if rank == 0:
+        y0 = g["y"][()]
+        x0 = g["x"][()]
+        h0 = g["h"][()]
+
+        H0 = np.conj(np.fft.rfftn(h0, data_size))
+        x = uconv.fft_conv(y0, H0, data_size)[: image_size[0], : image_size[1]]
+        consistency_test = np.allclose(x0, x)
+        # print("Consistency serial / parallel convolution: {}".format(consistency_test))
+        assert consistency_test
+    g.close()
+
+
+# mpiexec -n 2 python -m pytest tests/mpi/test_mpi_models.py
diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/utils/test_checkpoint.py b/tests/utils/test_checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f25bea13806474b7fe27ff481f26c68a407c2b4
--- /dev/null
+++ b/tests/utils/test_checkpoint.py
@@ -0,0 +1,81 @@
+"""Test serial checkpoint functionality, extracting and restoring the state of
+a random number generator.
+"""
+import numpy as np
+import pytest
+
+import aaxda.utils.checkpoint as chkpt
+
+
+@pytest.fixture
+def checkpointer(tmp_path):
+    return chkpt.SerialCheckpoint(
+        tmp_path / "test",
+        cname="gzip",
+        clevel=5,
+        shuffle=1,
+    )
+
+
+@pytest.fixture
+def rng():
+    return np.random.default_rng(1234)
+
+
+def test_checkpoint_extract_and_restore_rng(rng):
+    state_array, inc_array = chkpt.extract_rng_state(rng)
+    a = rng.normal()
+    chkpt.restore_rng_state(rng, inc_array, state_array)
+    b = rng.normal()
+    assert np.allclose(a, b)
+
+
+def test_checkpoint_rng(rng, checkpointer):
+    checkpointer.save(1, None, rng=rng)
+    a = rng.normal()
+    checkpointer.load(1, [None], rng)
+    b = rng.normal()
+    assert np.allclose(a, b)
+
+
+def test_checkpoint_variables(checkpointer):
+    a = 3
+    b = 4
+    c = np.ones((2, 2))
+    checkpointer.save(1, [None, None, None], rng=None, a=a, b=b, c=c)
+    loaded_dic = checkpointer.load(1, 3 * [np.s_[:]], None, "a", "b", "c")
+    assert np.allclose(a, loaded_dic["a"])
+    assert np.allclose(b, loaded_dic["b"])
+    assert np.allclose(c, loaded_dic["c"])
+
+
+def test_checkpoint_variable_and_rng(rng, checkpointer):
+
+    a0 = rng.standard_normal(2)
+    checkpointer.save(
+        1,
+        [None, None],
+        rng=rng,
+        a=a0,
+        rdcc_nbytes=1024 ** 2 * 200,
+    )  # 200 MB cache
+    a = rng.standard_normal(2)
+    loaded_dic = checkpointer.load(1, [np.s_[:]], rng, "a")
+    b = rng.standard_normal(2)
+    assert np.allclose(a, b)
+    assert np.allclose(a0, loaded_dic["a"])
+    assert loaded_dic["a"].dtype == a0.dtype
+    # ! load data and byteorder (native is little endian on mac)
+    # https://numpy.org/doc/stable/reference/generated/numpy.dtype.newbyteorder.html
+    # ! check only for numpy.ndarray
+    # assert loaded_dic["a"].dtype.byteorder == a0.dtype.byteorder
+
+
+@pytest.mark.slow
+def test_checkpoint_variables_chunking(checkpointer):
+    a = np.ones((500, 500))
+    b = 3
+    checkpointer.save(1, [(100, 100), None], rng=None, a=a, b=b)
+    loaded_dic = checkpointer.load(1, 2 * [np.s_[:]], None, "a", "b")
+    assert np.allclose(a, loaded_dic["a"])
+    assert np.allclose(b, loaded_dic["b"])
diff --git a/tests/utils/test_communications.py b/tests/utils/test_communications.py
new file mode 100644
index 0000000000000000000000000000000000000000..c13b02fe59cb0f5e5293ec825447d4b9b4289cd5
--- /dev/null
+++ b/tests/utils/test_communications.py
@@ -0,0 +1,450 @@
+import unittest
+
+import numpy as np
+from mpi4py import MPI
+
+import aaxda.utils.communications as ucomm
+
+
+class TestCommunications(unittest.TestCase):
+    def setUp(self):
+        self.N = 9
+        self.nchunks = 3
+        self.overlap = 3
+
+    def test_split_range_overlap_error(self):
+        with self.assertRaises(ValueError):
+            ucomm.split_range(self.nchunks, self.N, 4)
+
+    def test_local_split_range_overlap_error(self):
+        with self.assertRaises(ValueError):
+            ucomm.local_split_range(self.nchunks, self.N, 0, 4)
+        with self.assertRaises(ValueError):
+            ucomm.local_split_range(self.nchunks, self.N, self.N)
+
+    def test_split_range_no_overlap(self):
+        rg = ucomm.split_range(self.nchunks, self.N)
+        # check that 2 consecutive start index are distant from 1
+        self.assertTrue(np.all(rg[1:, 0] - rg[:-1, 1] == 1))
+        # check size of each chunk (same size for each in this case)
+        self.assertTrue(np.all(np.diff(rg, n=1, axis=1) + 1 == 3))
+        # test single process and global versions coincide
+        rg2 = np.concatenate(
+            [
+                ucomm.local_split_range(self.nchunks, self.N, k)[None, :]
+                for k in range(self.nchunks)
+            ],
+            axis=0,
+        )
+        self.assertTrue(np.allclose(rg, rg2))
+
+    def test_split_range_overlap(self):
+        rg = ucomm.split_range(self.nchunks, self.N, self.overlap)
+        # check overlap between 2 consecutive segments (from the left)
+        self.assertTrue(np.all(np.abs(rg[:-1, 1] - rg[1:, 0] + 1) == self.overlap))
+        # test single process and global versions coincide
+        rg2 = np.concatenate(
+            [
+                ucomm.local_split_range(self.nchunks, self.N, k, overlap=self.overlap)[
+                    None, :
+                ]
+                for k in range(self.nchunks)
+            ],
+            axis=0,
+        )
+        self.assertTrue(np.allclose(rg, rg2))
+
+    def test_split_range_overlap_forward(self):
+        rg = ucomm.split_range(self.nchunks, self.N, self.overlap, False)
+        # check overlap between 2 consecutive segments (from the left)
+        self.assertTrue(np.all(np.abs(rg[:-1, 1] - rg[1:, 0] + 1) == self.overlap))
+        # test single process and global versions coincide
+        rg2 = np.concatenate(
+            [
+                ucomm.local_split_range(
+                    self.nchunks,
+                    self.N,
+                    k,
+                    overlap=self.overlap,
+                    backward=False,
+                )[None, :]
+                for k in range(self.nchunks)
+            ],
+            axis=0,
+        )
+        self.assertTrue(np.allclose(rg, rg2))
+
+    def test_local_split_range(self):
+        rg = ucomm.split_range(self.nchunks, self.N, self.overlap)
+        global_rg = np.concatenate(
+            [
+                ucomm.local_split_range(self.nchunks, self.N, k, overlap=self.overlap)[
+                    None, :
+                ]
+                for k in range(self.nchunks)
+            ],
+            axis=0,
+        )
+        self.assertTrue(np.allclose(global_rg, rg))
+
+    def test_local_split_range_overlap_n(self):
+        rg = np.concatenate(
+            (
+                ucomm.local_split_range(self.nchunks, self.N, 1, self.overlap)[None, :],
+                ucomm.local_split_range(self.nchunks, self.N, 0, self.overlap)[None, :],
+            ),
+            axis=0,
+        )
+        rg2 = rg = ucomm.local_split_range_nd(
+            np.array(2 * [self.nchunks]),
+            np.array(2 * [self.N]),
+            np.array([1, 0]),
+            np.array(2 * [self.overlap]),
+        )
+        self.assertTrue(np.allclose(rg, rg2))
+
+    # TODO: to be expanded (see if additional elements can be added to the unit-test)
+    def test_rebalance_tile_n(self):
+        array_size = np.array([20], dtype="i")
+        grid_size = np.array([self.nchunks], dtype="i")
+        overlap_size = np.array([self.overlap], dtype="i")
+
+        rg0 = np.zeros((self.nchunks, 2), dtype="i")
+        rg = np.zeros((self.nchunks, 2), dtype="i")
+
+        for k in range(self.nchunks):
+            ranknd = np.array([k], dtype="i")
+            rg[k, :] = ucomm.local_split_range_nd(grid_size, array_size, ranknd)
+            rg0[k, :] = rg[k, :]
+            ucomm.rebalance_tile_nd(
+                rg[k, :][None, :], ranknd, array_size, grid_size, overlap_size
+            )
+        pass
+
+    def test_split_range_interleaved_error(self):
+        with self.assertRaises(ValueError):
+            ucomm.split_range_interleaved(self.N + 1, self.N)
+
+    def test_local_split_range_interleaved_error(self):
+        with self.assertRaises(ValueError):
+            ucomm.local_split_range_interleaved(self.N + 1, self.N, 0)
+        with self.assertRaises(ValueError):
+            ucomm.local_split_range_interleaved(2, self.N, self.N)
+
+    def test_split_range_interleaved(self):
+        rg = ucomm.split_range_interleaved(self.nchunks, self.N)
+        self.assertTrue(np.all([rg[k].start == k for k in range(len(rg))]))
+        self.assertTrue(np.all([rg[k].stop == self.N for k in range(len(rg))]))
+        self.assertTrue(np.all([rg[k].step == self.nchunks for k in range(len(rg))]))
+        rg2 = ucomm.local_split_range_interleaved(self.nchunks, self.N, 0)
+        self.assertTrue(np.all([rg2 == rg[0]]))
+
+    def test_get_neighbour(self):
+        ranknd = np.array([0, 1], dtype="i")
+        grid_size = np.array([self.nchunks, self.nchunks], dtype="i")
+        disp = np.ones((2,), dtype="i")
+        rank = ucomm.get_neighbour(ranknd, grid_size, disp)
+        self.assertTrue(
+            rank == (ranknd[0] + disp[0]) * grid_size[-1] + ranknd[-1] + disp[-1]
+        )  # = 5
+
+    def test_isvalid_communication(self):
+
+        ranknd = np.array([1, 1, 1], dtype="i")
+        grid_size = np.array([3, 3, 2], dtype="i")
+        overlap_size = np.array([0, 1, 1], dtype="i")
+        array_size = np.array([10, 10, 10], dtype="i")
+        temp_size = array_size - overlap_size
+        ndims = array_size.size
+
+        # communication axes for 3D case: x, y, z, xy, yz, zx, xyz
+        (
+            dest,
+            src,
+            isvalid_dest,
+            isvalid_src,
+            sizes_dest,
+            sizes_src,
+            start_src,
+        ) = ucomm.isvalid_communication(ranknd, grid_size, overlap_size, array_size)
+
+        # testing valid neighbours (dest)
+        # i, j, k -> i*np.prod(N[1:]) + j*N[2] + k
+        ranky = ucomm.get_neighbour(ranknd, grid_size, np.array([0, 1, 0], dtype="i"))
+
+        self.assertTrue(
+            np.array_equal(
+                dest,
+                np.array(
+                    [
+                        MPI.PROC_NULL,
+                        ranky,
+                        MPI.PROC_NULL,
+                        MPI.PROC_NULL,
+                        MPI.PROC_NULL,
+                        MPI.PROC_NULL,
+                        MPI.PROC_NULL,
+                    ],
+                    dtype="i",
+                ),
+            )
+        )
+        self.assertTrue(
+            np.array_equal(
+                isvalid_dest,
+                np.array(
+                    [False, True, False, False, False, False, False],
+                    dtype="bool",
+                ),
+            )
+        )
+
+        # testing valid neighbours (src)
+        ranky = ucomm.get_neighbour(ranknd, grid_size, np.array([0, -1, 0], dtype="i"))
+        rankz = ucomm.get_neighbour(ranknd, grid_size, np.array([0, 0, -1], dtype="i"))
+        rankyz = ucomm.get_neighbour(
+            ranknd, grid_size, np.array([0, -1, -1], dtype="i")
+        )
+
+        self.assertTrue(
+            np.array_equal(
+                src,
+                np.array(
+                    [
+                        MPI.PROC_NULL,
+                        ranky,
+                        rankz,
+                        MPI.PROC_NULL,
+                        rankyz,
+                        MPI.PROC_NULL,
+                        MPI.PROC_NULL,
+                    ],
+                    dtype="i",
+                ),
+            )
+        )
+        self.assertTrue(
+            np.all(
+                isvalid_src
+                == np.array(
+                    [False, True, True, False, True, False, False],
+                    dtype="bool",
+                )
+            )
+        )
+
+        # testing size of the data to be communicated (extent along each
+        # dimension for each communication)
+        # ! valid destination: y
+        expected_sizes_dest = np.concatenate(
+            (
+                np.zeros((1, ndims), dtype="i"),
+                np.array([temp_size[0], overlap_size[1], *temp_size[2:]], dtype="i")[
+                    None, :
+                ],
+                np.zeros((5, ndims)),
+            ),
+            axis=0,
+        )
+
+        # ! valid source: y, z, yz
+        expected_sizes_src = np.concatenate(
+            (
+                np.zeros((1, ndims), dtype="i"),
+                np.array([temp_size[0], overlap_size[1], temp_size[2]], dtype="i")[
+                    None, :
+                ],
+                np.array([*temp_size[:2], overlap_size[2]], dtype="i")[None, :],
+                np.zeros((1, ndims), dtype="i"),
+                np.array([temp_size[0], *overlap_size[1:]], dtype="i")[None, :],
+                np.zeros((2, ndims), dtype="i"),
+            ),
+            axis=0,
+        )
+
+        self.assertTrue(np.array_equal(sizes_dest, expected_sizes_dest))
+        self.assertTrue(np.array_equal(sizes_src, expected_sizes_src))
+
+        # check where the overlap should start
+        # self.assertTrue(
+        #     np.all(
+        #         start_src
+        #         == np.array(
+        #             [0, , True, 0, True, 0, 0], dtype="bool"
+        #         )
+        #     )
+        # )
+
+    def test_isvalid_communication_forward(self):
+
+        ranknd = np.array([1, 1, 1], dtype="i")
+        grid_size = np.array([3, 3, 2], dtype="i")
+        overlap_size = np.array([0, 1, 1], dtype="i")
+        array_size = np.array([10, 10, 10], dtype="i")
+        temp_size = array_size - overlap_size
+        ndims = array_size.size
+
+        # communication axes for 3D case: x, y, z, xy, yz, zx, xyz
+        (
+            dest,
+            src,
+            isvalid_dest,
+            isvalid_src,
+            sizes_dest,
+            sizes_src,
+            start_src,
+        ) = ucomm.isvalid_communication(
+            ranknd, grid_size, overlap_size, array_size, backward=False
+        )
+
+        # testing valid neighbours (dest)
+        # i, j, k -> i*np.prod(N[1:]) + j*N[2] + k
+        ranky = ucomm.get_neighbour(ranknd, grid_size, np.array([0, -1, 0], dtype="i"))
+        rankz = ucomm.get_neighbour(ranknd, grid_size, np.array([0, 0, -1], dtype="i"))
+        rankyz = ucomm.get_neighbour(
+            ranknd, grid_size, np.array([0, -1, -1], dtype="i")
+        )
+
+        self.assertTrue(
+            np.array_equal(
+                dest,
+                np.array(
+                    [
+                        MPI.PROC_NULL,
+                        ranky,
+                        rankz,
+                        MPI.PROC_NULL,
+                        rankyz,
+                        MPI.PROC_NULL,
+                        MPI.PROC_NULL,
+                    ],
+                    dtype="i",
+                ),
+            )
+        )
+        self.assertTrue(
+            np.array_equal(
+                isvalid_dest,
+                np.array(
+                    [False, True, True, False, True, False, False],
+                    dtype="bool",
+                ),
+            )
+        )
+
+        # testing valid neighbours (src)
+        ranky = ucomm.get_neighbour(ranknd, grid_size, np.array([0, 1, 0], dtype="i"))
+        self.assertTrue(
+            np.array_equal(
+                src,
+                np.array(
+                    [
+                        MPI.PROC_NULL,
+                        ranky,
+                        MPI.PROC_NULL,
+                        MPI.PROC_NULL,
+                        MPI.PROC_NULL,
+                        MPI.PROC_NULL,
+                        MPI.PROC_NULL,
+                    ],
+                    dtype="i",
+                ),
+            )
+        )
+        self.assertTrue(
+            np.all(
+                isvalid_src
+                == np.array(
+                    [False, True, False, False, False, False, False],
+                    dtype="bool",
+                )
+            )
+        )
+
+        # testing size of the data to be communicated (extent along each
+        # dimension for each communication)
+        # ranknd = [1, 1, 1]
+        # grid_size = [3, 3, 2]
+        # size N[d] when the facet is on the border of the grid along dimension
+        # d
+        # ! valid destination: y, z, yz
+        expected_sizes_dest = np.concatenate(
+            (
+                np.zeros((1, ndims), dtype="i"),  # x
+                np.array([temp_size[0], overlap_size[1], array_size[2]], dtype="i")[
+                    None, :
+                ],  # y
+                np.array([temp_size[0], temp_size[1], overlap_size[2]], dtype="i")[
+                    None, :
+                ],  # z
+                np.zeros((1, ndims), dtype="i"),  # xy
+                np.array([temp_size[0], overlap_size[1], overlap_size[2]], dtype="i")[
+                    None, :
+                ],  # yz
+                np.zeros((2, ndims), dtype="i"),  # zx, xyz
+            ),
+            axis=0,
+        )
+
+        # ! valid source: y
+        expected_sizes_src = np.concatenate(
+            (
+                np.zeros((1, ndims), dtype="i"),
+                np.array([temp_size[0], overlap_size[1], array_size[2]], dtype="i")[
+                    None, :
+                ],
+                np.zeros((5, ndims), dtype="i"),
+            ),
+            axis=0,
+        )
+
+        self.assertTrue(np.array_equal(sizes_dest, expected_sizes_dest))
+        self.assertTrue(np.array_equal(sizes_src, expected_sizes_src))
+
+    def test_get_local_slice(self):
+
+        grid_size = np.array([self.nchunks], dtype="i")
+        overlap_size = np.array([self.overlap], dtype="i")
+        ranknd = np.empty(1, dtype="i")
+
+        for k in range(self.nchunks):
+            ranknd[0] = k
+            local_slice = ucomm.get_local_slice(
+                ranknd, grid_size, overlap_size, backward=True
+            )
+
+            self.assertTrue(local_slice[0].stop is None)
+            if k == 0:
+                self.assertTrue(local_slice[0].start is None)
+            else:
+                self.assertTrue(local_slice[0].start == overlap_size[0])
+
+            local_slice = ucomm.get_local_slice(
+                ranknd, grid_size, overlap_size, backward=False
+            )
+
+            self.assertTrue(local_slice[0].start is None)
+            if k == self.nchunks - 1:
+                self.assertTrue(local_slice[0].stop is None)
+            else:
+                self.assertTrue(local_slice[0].stop == -overlap_size[0])
+
+    def test_get_local_slice_error(self):
+
+        grid_size = np.array(2 * [self.nchunks], dtype="i")
+        overlap_size = np.array(2 * [self.overlap], dtype="i")
+        ranknd = np.ones((1), dtype="i")
+
+        with self.assertRaises(AssertionError):
+            ucomm.get_local_slice(ranknd, grid_size, overlap_size)
+
+    # def test_slice_valid_coefficients(self):
+
+    #     ranknd = np.zeros(2, dtype="i")
+    #     grid_size = np.full(2, fill_value=2, dtype="i")
+    #     overlap_size = np.array([1, 0], dtype="i")
+    #     select_y = ucomm.slice_valid_coefficients(ranknd, grid_size, overlap_size)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/tests/utils/test_communicators.py b/tests/utils/test_communicators.py
new file mode 100644
index 0000000000000000000000000000000000000000..39f9d11892d81996ce051c78a6040c52e68d0e1a
--- /dev/null
+++ b/tests/utils/test_communicators.py
@@ -0,0 +1,323 @@
+import unittest
+
+import h5py
+import numpy as np
+from mpi4py import MPI
+
+import aaxda.models.convolutions as uconv
+import aaxda.utils.communications as ucomm
+from aaxda.models.data import generate_2d_gaussian_kernel
+from aaxda.models.models import SyncConvModel
+from aaxda.utils.communicators import SyncCartesianCommunicator
+
+
+# TODO: to be completed
+# https://stackoverflow.com/questions/63004164/python-unittest-c-fortran-mpi-functions-resulting-in-mpi-init-function-getting-c
+class TestCommunicators(unittest.TestCase):
+
+    comm = MPI.COMM_WORLD
+    size = comm.Get_size()
+    rank = comm.Get_rank()
+    seed = 1234
+
+    def tearDown(self):
+        self.comm.Barrier()
+
+    def test_communication_1d(self):
+        """Test communication of a chunk of a 2d array along a single axis."""
+        N = np.array([20, 20], dtype="i")  # overall image size
+        M = np.array([2, 2], dtype="i")  # size of convolution kernel
+        overlap_size = M - 1  # overlap size
+        ndims = 2
+        grid_size = [self.size, 1]
+        circular_boundaries = False
+
+        cartcomm = self.comm.Create_cart(
+            dims=grid_size, periods=ndims * [False], reorder=False
+        )
+        grid_size_ = np.array(grid_size, dtype="i")
+        ranknd = cartcomm.Get_coords(self.rank)
+        ranknd = np.array(ranknd, dtype=int)
+
+        # * local portion of the array (including overlap)
+        local_tile = ucomm.local_split_range_nd(grid_size_, N, ranknd)
+        tile_size = local_tile[:, 1] - local_tile[:, 0] + 1
+        local_indices = ucomm.local_split_range_nd(
+            grid_size_, N, ranknd, overlap=overlap_size
+        )
+        facet_size = local_indices[:, 1] - local_indices[:, 0] + 1
+        facet = np.full(facet_size, self.rank + 1, dtype=np.float64)
+
+        # print("Worker {}: {}".format(self.rank, facet))
+
+        # * custom communicator object
+        cartcomm = self.comm.Create_cart(
+            dims=grid_size,
+            periods=ndims * [circular_boundaries],
+            reorder=False,
+        )
+        communicator = SyncCartesianCommunicator(
+            self.comm,
+            cartcomm,
+            grid_size,
+            facet.itemsize,
+            facet_size,
+            overlap_size,
+            direction=True,
+        )
+
+        # * communications
+        # ! beware type of the array to avoid segfault
+        communicator.update_borders(facet)
+
+        # * checking consistency of the results
+        (
+            dst,
+            sr,
+            isvalid_dest,
+            isvalid_src,
+            sizes_dest,
+            sizes_src,
+            start_src,
+        ) = ucomm.isvalid_communication(ranknd, grid_size_, overlap_size, N)
+
+        s0 = np.zeros(ndims * (ndims - 1) + 1)
+
+        for k in range(sizes_src.shape[0]):
+            if isvalid_src[k]:
+                sel = tuple(
+                    [
+                        np.s_[start_src[k, d] : start_src[k, d] + sizes_src[k, d]]
+                        for d in range(ndims)
+                    ]
+                )
+                s0[k] = np.sum(facet[sel])
+
+        self.assertTrue(
+            np.isclose(np.sum(facet), (self.rank + 1) * np.prod(tile_size) + np.sum(s0))
+        )
+
+        # ! beware consistency between type of input array and type assumed
+        # ! in the communicator
+        # print("Worker {}: {}".format(self.rank, facet))
+
+        self.comm.Barrier()
+        communicator.remove()
+        self.comm.Barrier()
+
+    def test_communication_2d(self):
+        """Test communication of chunks of a 2d array axis, each along one of
+        the axis.
+        """
+        N = np.array([20, 20], dtype="i")  # overall image size
+        M = np.array([2, 2], dtype="i")  # size of convolution kernel
+        overlap_size = M - 1  # overlap size
+        ndims = 2
+        grid_size = MPI.Compute_dims(self.size, ndims)
+        circular_boundaries = False
+
+        # * create Cartesian topology communicator
+        cartcomm = self.comm.Create_cart(
+            dims=grid_size, periods=ndims * [False], reorder=False
+        )
+        grid_size_ = np.array(grid_size, dtype="i")
+        ranknd = cartcomm.Get_coords(self.rank)
+        ranknd = np.array(ranknd, dtype=int)
+
+        # * local portion of the array (including overlap)
+        local_tile = ucomm.local_split_range_nd(grid_size_, N, ranknd)
+        tile_size = local_tile[:, 1] - local_tile[:, 0] + 1
+        local_indices = ucomm.local_split_range_nd(
+            grid_size_, N, ranknd, overlap=overlap_size
+        )
+        facet_size = local_indices[:, 1] - local_indices[:, 0] + 1
+        facet = np.full(facet_size, self.rank + 1, dtype=np.float64)
+
+        # print("Worker {}: {}".format(self.rank, facet))
+
+        # * custom communicator object
+        cartcomm = self.comm.Create_cart(
+            dims=grid_size,
+            periods=ndims * [circular_boundaries],
+            reorder=False,
+        )
+        communicator = SyncCartesianCommunicator(
+            self.comm,
+            cartcomm,
+            grid_size,
+            facet.itemsize,
+            facet_size,
+            overlap_size,
+            direction=True,
+        )
+
+        # * communications
+        # ! beware type of the array to avoid segfault
+        communicator.update_borders(facet)
+
+        # * checking consistency of the results
+        (
+            dst,
+            sr,
+            isvalid_dest,
+            isvalid_src,
+            sizes_dest,
+            sizes_src,
+            start_src,
+        ) = ucomm.isvalid_communication(ranknd, grid_size_, overlap_size, N)
+
+        s0 = np.zeros(ndims * (ndims - 1) + 1)
+
+        for k in range(sizes_src.shape[0]):
+            if isvalid_src[k]:
+                sel = tuple(
+                    [
+                        np.s_[start_src[k, d] : start_src[k, d] + sizes_src[k, d]]
+                        for d in range(ndims)
+                    ]
+                )
+                s0[k] = np.sum(facet[sel])
+
+        self.assertTrue(
+            np.isclose(np.sum(facet), (self.rank + 1) * np.prod(tile_size) + np.sum(s0))
+        )
+
+        # print("Worker {}: {}".format(self.rank, facet))
+
+        self.comm.Barrier()
+        communicator.remove()
+        self.comm.Barrier()
+
+    def test_distributed_convolution_backward(self):
+        """Testing 2d distributed convolution with backward facet overlap."""
+
+        image_size = np.array([20, 20], dtype="i")  # overall image size
+        kernel_size = np.array([2, 2], dtype="i")  # size of convolution
+        overlap_size = kernel_size - 1  # overlap size kernel
+        data_size = image_size + overlap_size  # full data size
+        kernel = generate_2d_gaussian_kernel(kernel_size[0], 0.1)  # square kernel
+
+        ndims = 2
+        circular_boundaries = False
+        backward = True
+        grid_size_ = MPI.Compute_dims(self.size, ndims)
+
+        # * Cartesian topology communicator and nD rank
+        cartcomm = self.comm.Create_cart(
+            dims=grid_size_, periods=ndims * [False], reorder=False
+        )
+        ranknd = cartcomm.Get_coords(self.rank)
+        ranknd = np.array(ranknd, dtype="i")
+        grid_size = np.array(grid_size_, dtype="i")
+
+        # tile size
+        tile_pixels = ucomm.local_split_range_nd(
+            grid_size, image_size, ranknd, backward=backward
+        )
+        tile_size = tile_pixels[:, 1] - tile_pixels[:, 0] + 1
+
+        # data size
+        local_data_size = (
+            tile_size + (ranknd == grid_size - 1) * overlap_size
+        )  # ! backward overlap
+
+        # facet size (convolution)
+        facet_pixels = ucomm.local_split_range_nd(
+            grid_size, image_size, ranknd, overlap=overlap_size, backward=backward
+        )
+        facet_size = facet_pixels[:, 1] - facet_pixels[:, 0] + 1
+        local_conv_size = facet_size + overlap_size
+        offset = facet_size - tile_size
+
+        # * setup useful slices
+        local_slice_tile = ucomm.get_local_slice(
+            ranknd, grid_size, offset, backward=backward
+        )  # extract tile from local conv facet
+
+        local_slice_valid_conv = ucomm.slice_valid_coefficients(
+            ranknd, grid_size, overlap_size
+        )  # extract valid coefficients after local convolutions
+
+        # indexing into global arrays
+        global_slice_tile = tuple(
+            [np.s_[tile_pixels[d, 0] : tile_pixels[d, 1] + 1] for d in range(ndims)]
+        )
+        # ! needs to be changed bepending on direction of the overlap?
+        global_slice_data = tuple(
+            [
+                np.s_[tile_pixels[d, 0] : tile_pixels[d, 0] + local_data_size[d]]
+                for d in range(ndims)
+            ]
+        )
+
+        # * parallel rng
+        child_seed = None
+        if self.rank == 0:
+            ss = np.random.SeedSequence(self.seed)
+            child_seed = ss.spawn(self.size)
+        local_seed = self.comm.scatter(child_seed, root=0)
+        local_rng = np.random.default_rng(local_seed)
+
+        # * local image and kernel
+        local_image = np.empty(facet_size, dtype="d")
+        local_image[local_slice_tile] = local_rng.standard_normal(size=tile_size)
+        ft_kernel = np.fft.rfftn(kernel, local_conv_size)
+
+        # * setup communication scheme (direct convolution)
+        cartcomm = self.comm.Create_cart(
+            dims=grid_size,
+            periods=ndims * [circular_boundaries],
+            reorder=False,
+        )
+        communicator = SyncCartesianCommunicator(
+            self.comm,
+            cartcomm,
+            grid_size,
+            local_image.itemsize,
+            facet_size,
+            overlap_size,
+            direction=backward,
+        )
+
+        # * communications
+        communicator.update_borders(local_image)
+
+        # * free custom types
+        communicator.remove()
+
+        local_data = uconv.fft_conv(local_image, ft_kernel, local_conv_size)[
+            local_slice_valid_conv
+        ]
+
+        # * save to an h5 test file (parallel writing)
+        f = h5py.File("convolution_test.h5", "w", driver="mpio", comm=self.comm)
+
+        dset_image = f.create_dataset("x", image_size, dtype="d")
+        dset_image[global_slice_tile] = local_image[local_slice_tile]
+
+        dset_data = f.create_dataset("y", data_size, dtype="d")
+        dset_data[global_slice_data] = local_data
+
+        dset_kernel = f.create_dataset("h", kernel_size, dtype="d")
+        if self.rank == 0:
+            dset_kernel[:] = kernel
+        f.close()
+        del f, dset_image, dset_data, dset_kernel
+
+        # * compare to full convolution
+        g = h5py.File("convolution_test.h5", "r+", driver="mpio", comm=self.comm)
+        if self.rank == 0:
+            y0 = g["y"][()]
+            x0 = g["x"][()]
+            h0 = g["h"][()]
+
+            H0 = np.fft.rfftn(h0, data_size)
+            y = uconv.fft_conv(x0, H0, data_size)
+            # print(np.allclose(y, y0))
+            self.assertTrue(np.allclose(y, y0))
+        g.close()
+
+
+if __name__ == "__main__":
+    unittest.main()
+    # mpiexec -n 2 python -m unittest tests/test_communicators.py