diff --git a/.appveyor.yml b/.appveyor.yml deleted file mode 100644 index 13649da..0000000 --- a/.appveyor.yml +++ /dev/null @@ -1,43 +0,0 @@ -environment: - - PYTHON_ARCH: "64" - PYTHON: "C:\\Miniconda38-x64" - - matrix: - - PYTHON_VERSION: "3.8" - -install: - # windows config (for installation) - - cmd: "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%" - - cmd: setlocal - - cmd: set ANACONDA_API_TOKEN= - # conda config - - conda config --set always_yes yes --set changeps1 no - - conda update -q conda - - conda install conda-build anaconda-client - - pip install -i https://pypi.anaconda.org/psyplot/simple --no-deps psyplot-ci-orb - - conda config --add channels conda-forge - - conda config --add channels psyplot - - conda info -a - - conda list - # windows config - - cmd: endlocal - - cmd: 'SET PYTHONWARNINGS=ignore:mode:DeprecationWarning:docutils.io:245' - - cmd: "IF NOT DEFINED APPVEYOR_REPO_TAG_NAME (SET GIT_BRANCH=%APPVEYOR_REPO_BRANCH%)" - - cmd: "IF NOT DEFINED APPVEYOR_REPO_TAG_NAME (conda config --add channels psyplot/label/%APPVEYOR_REPO_BRANCH:/=-%)" - -build: off - -test_script: - - cmd: setlocal - - cmd: set ANACONDA_API_TOKEN= - - cmd: conda build ci/conda-recipe --python %PYTHON_VERSION% - - cmd: endlocal - -deploy_script: - - cmd: " - IF NOT DEFINED APPVEYOR_REPO_TAG_NAME ( - deploy-conda-recipe -l %APPVEYOR_REPO_BRANCH:/=-% -py %PYTHON_VERSION% ci/conda-recipe - ) ELSE ( - deploy-conda-recipe -py %PYTHON_VERSION% ci/conda-recipe - )" diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 09d8850..0000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,128 +0,0 @@ -version: 2.1 - -orbs: - psyplot: psyplot/psyplot-ci-orb@1.5.31 - mattermost-plugin-notify: nathanaelhoun/mattermost-plugin-notify@1.2.0 - -executors: - default: psyplot/default - macos: psyplot/macos - -parameters: - unit-test-executor: - description: Executor for the unit tests. Can be default or macos - type: string - default: default - deploy-release: - description: Deploy the comment as a new release to github and pypi - type: boolean - default: false - run-tests: - description: Run the test suite - type: boolean - default: true - build_docs: - description: Build the documentation - type: boolean - default: true - python_version: - description: The python version to build - type: string - default: "3.8" - -workflows: - build-and-test: - unless: << pipeline.parameters.deploy-release >> - jobs: - - psyplot/install-and-build: - name: install - exec_environment: << pipeline.parameters.unit-test-executor >> - setup_env: << pipeline.parameters.run-tests >> - build_args: "--no-test" - build_docs: << pipeline.parameters.build_docs >> - python_version: << pipeline.parameters.python_version >> - env_packages: dask netcdf4 scipy - - psyplot/test-parallel: - name: test-xarray-latest - parallelism: 2 - run-job: << pipeline.parameters.run-tests >> - pytest_args: --cov=psyplot - requires: - - install - - psyplot/test-parallel: - name: test-xarray-0.17 - parallelism: 2 - run-job: << pipeline.parameters.run-tests >> - pytest_args: --cov=psyplot - packages: xarray=0.17 - requires: - - install - - psyplot/build-docs: - name: test-docs - run-job: << pipeline.parameters.build_docs >> - builders: linkcheck - requires: - - install - - mattermost-plugin-notify/approval-notification: - name: notify-deploy - context: mattermost - message: >- - Hello @all! A workflow on https://app.circleci.com/pipelines/github/psyplot/psyplot is awaiting your approval. - Please check the uploaded docs and builds prior to approval. - requires: - - test-xarray-latest - - test-xarray-0.17 - - test-docs - - hold-for-deploy: - type: approval - requires: - - notify-deploy - - psyplot/deploy-pkg: - exec_environment: << pipeline.parameters.unit-test-executor >> - context: anaconda - requires: - - hold-for-deploy - - psyplot/deploy-docs: - fingerprint: "fc:e3:0f:d0:c6:5a:6a:a5:0e:7c:d6:47:37:48:dd:67" - run-job: << pipeline.parameters.build_docs >> - requires: - - hold-for-deploy - filters: - branches: - only: master - - psyplot/trigger-release-workflow: - context: trigger-release - filters: - branches: - only: master - requires: - - psyplot/deploy-pkg - - psyplot/deploy-docs - publish-release: - when: << pipeline.parameters.deploy-release >> - jobs: - - psyplot/create-tag: - ssh-fingerprints: "fc:e3:0f:d0:c6:5a:6a:a5:0e:7c:d6:47:37:48:dd:67" - context: psyplot-admin - user-name: psyplot-admin - publish-release: true - publish-version-tag: true - - mattermost-plugin-notify/approval-notification: - name: notify-release - context: mattermost - message: >- - Hello @all! A new release has been created at https://github.com/psyplot/psyplot/releases. - Please review it carefully, publish it and approve the upload to pypi. - requires: - - psyplot/create-tag - - hold-for-pypi: - type: approval - requires: - - notify-release - - psyplot/deploy-pypi: - context: pypi - requires: - - hold-for-pypi - filters: - branches: - only: master diff --git a/.cruft.json b/.cruft.json new file mode 100644 index 0000000..3b9c548 --- /dev/null +++ b/.cruft.json @@ -0,0 +1,43 @@ +{ + "template": "https://codebase.helmholtz.cloud/hcdc/software-templates/python-package-template.git", + "commit": "2a291586c1d99092860b9cf26a034a285479131e", + "checkout": null, + "context": { + "cookiecutter": { + "project_authors": "Philipp S. Sommer", + "project_author_emails": "philipp.sommer@hereon.de", + "project_maintainers": "Philipp S. Sommer", + "project_maintainer_emails": "philipp.sommer@hereon.de", + "gitlab_host": "codebase.helmholtz.cloud", + "gitlab_username": "psyplot", + "git_remote_protocoll": "ssh", + "institution": "Helmholtz-Zentrum Hereon", + "institution_url": "https://www.hereon.de", + "copyright_holder": "Helmholtz-Zentrum hereon GmbH", + "copyright_year": "2021-2024", + "use_reuse": "yes", + "code_license": "LGPL-3.0-only", + "documentation_license": "CC-BY-4.0", + "supplementary_files_license": "CC0-1.0", + "project_title": "psyplot", + "project_slug": "psyplot", + "package_folder": "psyplot", + "project_short_description": "Python package for interactive data visualization", + "keywords": "visualization,netcdf,raster,cartopy,earth-sciences", + "documentation_url": "https://psyplot.github.io", + "use_markdown_for_documentation": "no", + "ci_matrix": "pipenv", + "deploy_package_in_ci": "yes", + "deploy_pages_in_ci": "git-push", + "_extensions": [ + "local_extensions.UnderlinedExtension" + ], + "_template": "https://codebase.helmholtz.cloud/hcdc/software-templates/python-package-template.git" + } + }, + "directory": null, + "skip": [ + ".git", + ".mypy_cache" + ] +} diff --git a/.cruft.json.license b/.cruft.json.license new file mode 100644 index 0000000..919c9c1 --- /dev/null +++ b/.cruft.json.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC0-1.0 diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..18607ff --- /dev/null +++ b/.flake8 @@ -0,0 +1,10 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + +[flake8] +extend-ignore = + E203 + E402 + E501 + W503 diff --git a/.gitattributes b/.gitattributes index 1ae880d..cd7ac8f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1,5 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + psyplot/_version.py export-subst diff --git a/.github/ISSUE_TEMPLATE/change_feature.md b/.github/ISSUE_TEMPLATE/change_feature.md deleted file mode 100644 index 436208c..0000000 --- a/.github/ISSUE_TEMPLATE/change_feature.md +++ /dev/null @@ -1,17 +0,0 @@ -#### Summary -[Describe the requested change in one or two lines. -This should also be mentioned in the title of this issue.] - -#### Reason -[Why do you think, this is useful?] - -#### Current behaviour -[How is the current behaviour/framework?] - -#### New behaviour -[Provide here some more explanation that goes beyond the summary above -(or delete this paragraph, if everything is explained above), -and describe the changes you would like to see] - -#### Examples -[images, code-snippets or URLs to other repositories] diff --git a/.github/ISSUE_TEMPLATE/new_feature.md b/.github/ISSUE_TEMPLATE/new_feature.md deleted file mode 100644 index cdb2e79..0000000 --- a/.github/ISSUE_TEMPLATE/new_feature.md +++ /dev/null @@ -1,13 +0,0 @@ -#### Summary -[Describe the new requested feature in one or two lines. -This should also be mentioned in the title of this issue.] - -#### Reason -[Why do you think, this is useful?] - -#### Detailed explanation -[Provide here some more explanation that goes beyond the summary above -(or delete this paragraph, if everything is explained above)] - -#### Examples -[images, code-snippets or URLs to other repositories] diff --git a/.github/issue_template.md b/.github/issue_template.md deleted file mode 100644 index 0c7008f..0000000 --- a/.github/issue_template.md +++ /dev/null @@ -1,30 +0,0 @@ -#### Code Sample, a copy-pastable example if possible - -```python -# Your code here - -``` -#### Problem description - -[this should explain **why** the current behavior is a problem and why the expected output is a better solution.] - -#### Expected Output -What should have been expected? You can hide large error messages within ``
`` tags, e.g. - -
-very long error message -
- -#### Output of ``psyplot -aV`` - -
-# Paste the output of the command ``psyplot -aV`` (ran from the command line) - -
- -#### NOTE -This is a bug report. - -For requesting new features, use [this template](https://github.com/psyplot/psyplot/issues/new?template=new_feature.md&title=NEW+FEATURE:). - -For changing existing features, use [this template](https://github.com/psyplot/psyplot/issues/new?template=change_feature.md&title=CHANGE+FEATURE:). diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md deleted file mode 100644 index 4eb59a1..0000000 --- a/.github/pull_request_template.md +++ /dev/null @@ -1,4 +0,0 @@ - - [ ] Closes #xxxx (remove if there is no corresponding issue, which should only be the case for minor changes) - - [ ] Tests added (for all bug fixes or enhancements) - - [ ] Tests passed (for all non-documentation changes) - - [ ] Fully documented, including `CHANGELOG.rst` for all changes diff --git a/.gitignore b/.gitignore index 72a70af..18726a3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,16 +1,17 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] +*$py.class # C extensions *.so -# MyPy cache -.mypy_cache/ - # Distribution / packaging .Python -env/ build/ develop-eggs/ dist/ @@ -22,9 +23,12 @@ lib64/ parts/ sdist/ var/ +wheels/ +share/python-wheels/ *.egg-info/ .installed.cfg *.egg +MANIFEST # PyInstaller # Usually these files are written by a python script from a template @@ -39,13 +43,17 @@ pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ +.nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml -*,cover +*.cover +*.py,cover +.hypothesis/ .pytest_cache/ +cover/ # Translations *.mo @@ -53,27 +61,95 @@ coverage.xml # Django stuff: *.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy # Sphinx documentation docs/_build/ -docs/index.doctree -docs/examples/ -docs/api/ - -# test results -*psyplot_testresults -tests/envs/cov_psyplot_py* -tests/envs/psyplot_py*.html # PyBuilder +.pybuilder/ target/ -# Spyder project +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings .spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json -# Example ipython notebook checkpoints -*.ipynb_checkpoints/ +# Pyre type checker +.pyre/ -# conda build files -recipe/meta.yaml -ci/conda-recipe/recipe_append.yaml +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +static/ + +docs/api +psyplot/migrations/00*.py +docs/_static/orcid.* + +# ignore Pipfile.lock files in ci +# if a lock-file needs to be added, add it with `git add -f` +ci/matrix/*/Pipfile.lock + + +*psyplot_testresults diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000..dd7e7cc --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,101 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + +image: python:3.9 + +variables: + PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip" + +cache: + paths: + - .cache/pip + +before_script: + # replace git internal paths in order to use the CI_JOB_TOKEN + - apt-get update -y && apt-get install -y pandoc graphviz + - python -m pip install -U pip + +test-package: + stage: test + script: + - pip install build twine + - make dist + - twine check dist/* + artifacts: + name: python-artifacts + paths: + - "dist/*" + expire_in: 7 days + +test: + stage: test + variables: + PIPENV_PIPFILE: "ci/matrix/${SCENARIO}/Pipfile" + script: + - pip install pipenv + - pipenv install + - make pipenv-test + parallel: + matrix: + - SCENARIO: + - default + coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/' + artifacts: + name: pipfile + paths: + - "ci/matrix/${SCENARIO}/*" + expire_in: 30 days + +test-docs: + stage: test + script: + - make dev-install + - make -C docs html + - make -C docs linkcheck + artifacts: + paths: + - docs/_build + + +deploy-package: + stage: deploy + needs: + - test-package + - test-docs + - test + only: + - master + script: + - pip install twine + - TWINE_PASSWORD=${CI_JOB_TOKEN} TWINE_USERNAME=gitlab-ci-token python -m twine upload --repository-url ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/pypi dist/* + + + +deploy-docs: + stage: deploy + only: + - master + needs: + - test-docs + image: node:21 + before_script: + - npm install -g gh-pages@6.1.1 + - mkdir .gh-pages-cache + script: + # make sure, the DEPLOY_TOKEN is defined + - >- + [ ${CI_DEPLOY_TOKEN} ] || + echo "The CI_DEPLOY_TOKEN variable is not set. Please create an access + token with scope 'read_repository' and 'write_repository'" && + [ ${CI_DEPLOY_TOKEN} ] + - >- + CACHE_DIR=$(realpath .gh-pages-cache) + gh-pages + --dotfiles + --nojekyll + --branch gh-pages + --repo https://ci-user:${CI_DEPLOY_TOKEN}@${CI_SERVER_HOST}/${CI_PROJECT_PATH}.git + --user "${CI_COMMIT_AUTHOR}" + --message "CI Pipeline ${CI_PIPELINE_ID}, commit ${CI_COMMIT_SHORT_SHA}" + --dist docs/_build/html diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..757e75a --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,58 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + +# https://pre-commit.com/ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + # isort should run before black as black sometimes tweaks the isort output + - repo: https://github.com/PyCQA/isort + rev: 5.12.0 + hooks: + - id: isort + args: + - --profile + - black + - --line-length + - "79" + - --filter-files + - -skip-gitignore + - --float-to-top + # https://github.com/python/black#version-control-integration + - repo: https://github.com/psf/black + rev: 23.1.0 + hooks: + - id: black + args: + - --line-length + - "79" + - --exclude + - venv + - repo: https://github.com/pycqa/flake8 + rev: 6.0.0 + hooks: + - id: flake8 + # - repo: https://github.com/pre-commit/mirrors-mypy # disabled for now + # rev: v1.0.1 + # hooks: + # - id: mypy + # additional_dependencies: + # - types-PyYAML + # args: + # - --ignore-missing-imports + + - repo: https://github.com/fsfe/reuse-tool + rev: v1.1.2 + hooks: + - id: reuse + + - repo: https://github.com/citation-file-format/cff-converter-python + # there is no release with this hook yet + rev: "44e8fc9" + hooks: + - id: validate-cff diff --git a/.reuse/add_license.py b/.reuse/add_license.py new file mode 100644 index 0000000..3342870 --- /dev/null +++ b/.reuse/add_license.py @@ -0,0 +1,118 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: LGPL-3.0-only + +"""Helper script to add licenses to files. + +This script can be used to apply the licenses and default copyright holders +to files in the repository. + +It uses the short cuts from the ``.reuse/shortcuts.yaml`` file and +adds them to the call of ``reuse annotate``. Any command line option however +overwrites the config in ``shortcuts.yaml`` + +Usage:: + + python .reuse/add_license.py [OPTIONS] +""" + +import os.path as osp +from argparse import ArgumentParser +from textwrap import dedent +from typing import Dict, Optional, TypedDict + +import yaml +from reuse.project import Project +from reuse.vcs import find_root + +try: + from reuse._annotate import add_arguments as _orig_add_arguments + from reuse._annotate import run +except ImportError: + # reuse < 3.0 + from reuse.header import add_arguments as _orig_add_arguments + from reuse.header import run + + +class LicenseShortCut(TypedDict): + """Shortcut to add a copyright statement""" + + #: The copyright statement + copyright: str + + #: year of copyright statement + year: str + + #: SPDX Identifier of the license + license: Optional[str] + + +def load_shortcuts() -> Dict[str, LicenseShortCut]: + """Load the ``shortcuts.yaml`` file.""" + + with open(osp.join(osp.dirname(__file__), "shortcuts.yaml")) as f: + return yaml.safe_load(f) + + +def add_arguments( + parser: ArgumentParser, shortcuts: Dict[str, LicenseShortCut] +): + parser.add_argument( + "shortcut", + choices=[key for key in shortcuts if not key.startswith(".")], + help=( + "What license should be applied? Shortcuts are loaded from " + ".reuse/shortcuts.yaml. Possible shortcuts are %(choices)s" + ), + ) + + _orig_add_arguments(parser) + + parser.set_defaults(func=run) + parser.set_defaults(parser=parser) + + +def main(argv=None): + shortcuts = load_shortcuts() + + parser = ArgumentParser( + prog=".reuse/add_license.py", + description=dedent( + """ + Add copyright and licensing into the header of files with shortcuts + + This script uses the ``reuse annotate`` command to add copyright + and licensing information into the header the specified files. + + It accepts the same arguments as ``reuse annotate``, plus an + additional required `shortcuts` argument. The given `shortcut` + comes from the file at ``.reuse/shortcuts.yaml`` to fill in + copyright, year and license identifier. + + For further information, please type ``reuse annotate --help``""" + ), + ) + add_arguments(parser, shortcuts) + + args = parser.parse_args(argv) + + shortcut = shortcuts[args.shortcut] + + if args.year is None: + args.year = [] + if args.copyright is None: + args.copyright = [] + + if args.license is None and shortcut.get("license"): + args.license = [shortcut["license"]] + elif args.license and shortcut.get("license"): + args.license.append(shortcut["license"]) + args.year.append(shortcut["year"]) + args.copyright.append(shortcut["copyright"]) + + project = Project(find_root()) + args.func(args, project) + + +if __name__ == "__main__": + main() diff --git a/.reuse/shortcuts.yaml b/.reuse/shortcuts.yaml new file mode 100644 index 0000000..43a4548 --- /dev/null +++ b/.reuse/shortcuts.yaml @@ -0,0 +1,23 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + +.defaults: &defaults + year: "2021-2024" + copyright: "Helmholtz-Zentrum hereon GmbH" + +# The following dictionaries items map to dictionaries with three possible +# keys: +# +# copyright: The copyright statement +# year: year of copyright statement +# license: SPDX Identifier +docs: + <<: *defaults + license: "CC-BY-4.0" +code: + <<: *defaults + license: "LGPL-3.0-only" +supp: + <<: *defaults + license: "CC0-1.0" diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 17b0708..439e215 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + +v1.4.3 +====== +Minor fix for grid files (`#53 __`) + v1.4.2 ====== Fix for compatibility with python 3.7 diff --git a/CITATION.cff b/CITATION.cff index 1ba0157..25bbf38 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + # YAML 1.2 --- cff-version: "1.2.0" diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 6e84e59..1fb3d50 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,3 +1,9 @@ + + # Contributor Covenant Code of Conduct ## Our Pledge diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2a1b721..246a7c8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,3 +1,9 @@ + + # Contributing to psyplot :+1::tada: First off, thanks for taking the time to contribute! :tada::+1: @@ -121,7 +127,7 @@ And we are always happy to help you finalizing incomplete pull requests. ### Documentation Styleguide -* Follow the [numpy documentation guidelines](https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt). +* Follow the [numpy documentation guidelines](https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard). * Use [reStructuredText](http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html). * Try to not repeat yourself and make use of the `psyplot.docstring.docstrings` diff --git a/COPYING b/COPYING deleted file mode 100644 index f288702..0000000 --- a/COPYING +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/COPYING.LESSER b/COPYING.LESSER deleted file mode 100644 index 0a04128..0000000 --- a/COPYING.LESSER +++ /dev/null @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/LICENSES/CC-BY-4.0.txt b/LICENSES/CC-BY-4.0.txt new file mode 100644 index 0000000..13ca539 --- /dev/null +++ b/LICENSES/CC-BY-4.0.txt @@ -0,0 +1,156 @@ +Creative Commons Attribution 4.0 International + + Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. + +Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. More considerations for licensors. + +Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More considerations for the public. + +Creative Commons Attribution 4.0 International Public License + +By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. + +Section 1 – Definitions. + + a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. + + d. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License. + + g. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights under this Public License. + + i. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. + + j. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. + + k. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. + +Section 2 – Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: + + A. reproduce and Share the Licensed Material, in whole or in part; and + + B. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. + + 3. Term. The term of this Public License is specified in Section 6(a). + + 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. + + 5. Downstream recipients. + + A. Offer from the Licensor – Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. + + B. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. + + 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). + +b. Other rights. + + 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this Public License. + + 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties. + +Section 3 – License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified form), You must: + + A. retain the following if it is supplied by the Licensor with the Licensed Material: + + i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of warranties; + + v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; + + B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and + + C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. + + 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's License You apply must not prevent recipients of the Adapted Material from complying with this Public License. + +Section 4 – Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database; + + b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. +For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. + +Section 5 – Disclaimer of Warranties and Limitation of Liability. + + a. Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You. + + b. To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You. + + c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. + +Section 6 – Term and Termination. + + a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or + + 2. upon express reinstatement by the Licensor. + + c. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. + + d. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. + + e. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. + +Section 7 – Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. + +Section 8 – Interpretation. + + a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. + + c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. + + d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. + +Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/LICENSES/CC0-1.0.txt b/LICENSES/CC0-1.0.txt new file mode 100644 index 0000000..0e259d4 --- /dev/null +++ b/LICENSES/CC0-1.0.txt @@ -0,0 +1,121 @@ +Creative Commons Legal Code + +CC0 1.0 Universal + + CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE + LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN + ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS + INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES + REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS + PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM + THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED + HEREUNDER. + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator +and subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for +the purpose of contributing to a commons of creative, cultural and +scientific works ("Commons") that the public can reliably and without fear +of later claims of infringement build upon, modify, incorporate in other +works, reuse and redistribute as freely as possible in any form whatsoever +and for any purposes, including without limitation commercial purposes. +These owners may contribute to the Commons to promote the ideal of a free +culture and the further production of creative, cultural and scientific +works, or to gain reputation or greater distribution for their Work in +part through the use and efforts of others. + +For these and/or other purposes and motivations, and without any +expectation of additional consideration or compensation, the person +associating CC0 with a Work (the "Affirmer"), to the extent that he or she +is an owner of Copyright and Related Rights in the Work, voluntarily +elects to apply CC0 to the Work and publicly distribute the Work under its +terms, with knowledge of his or her Copyright and Related Rights in the +Work and the meaning and intended legal effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not +limited to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, + communicate, and translate a Work; + ii. moral rights retained by the original author(s) and/or performer(s); +iii. publicity and privacy rights pertaining to a person's image or + likeness depicted in a Work; + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + v. rights protecting the extraction, dissemination, use and reuse of data + in a Work; + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation + thereof, including any amended or successor version of such + directive); and +vii. other similar, equivalent or corresponding rights throughout the + world based on applicable law or treaty, and any national + implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention +of, applicable law, Affirmer hereby overtly, fully, permanently, +irrevocably and unconditionally waives, abandons, and surrenders all of +Affirmer's Copyright and Related Rights and associated claims and causes +of action, whether now known or unknown (including existing as well as +future claims and causes of action), in the Work (i) in all territories +worldwide, (ii) for the maximum duration provided by applicable law or +treaty (including future time extensions), (iii) in any current or future +medium and for any number of copies, and (iv) for any purpose whatsoever, +including without limitation commercial, advertising or promotional +purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each +member of the public at large and to the detriment of Affirmer's heirs and +successors, fully intending that such Waiver shall not be subject to +revocation, rescission, cancellation, termination, or any other legal or +equitable action to disrupt the quiet enjoyment of the Work by the public +as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason +be judged legally invalid or ineffective under applicable law, then the +Waiver shall be preserved to the maximum extent permitted taking into +account Affirmer's express Statement of Purpose. In addition, to the +extent the Waiver is so judged Affirmer hereby grants to each affected +person a royalty-free, non transferable, non sublicensable, non exclusive, +irrevocable and unconditional license to exercise Affirmer's Copyright and +Related Rights in the Work (i) in all territories worldwide, (ii) for the +maximum duration provided by applicable law or treaty (including future +time extensions), (iii) in any current or future medium and for any number +of copies, and (iv) for any purpose whatsoever, including without +limitation commercial, advertising or promotional purposes (the +"License"). The License shall be deemed effective as of the date CC0 was +applied by Affirmer to the Work. Should any part of the License for any +reason be judged legally invalid or ineffective under applicable law, such +partial invalidity or ineffectiveness shall not invalidate the remainder +of the License, and in such case Affirmer hereby affirms that he or she +will not (i) exercise any of his or her remaining Copyright and Related +Rights in the Work or (ii) assert any associated claims and causes of +action with respect to the Work, in either case contrary to Affirmer's +express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + b. Affirmer offers the Work as-is and makes no representations or + warranties of any kind concerning the Work, express, implied, + statutory or otherwise, including without limitation warranties of + title, merchantability, fitness for a particular purpose, non + infringement, or the absence of latent or other defects, accuracy, or + the present or absence of errors, whether or not discoverable, all to + the greatest extent permissible under applicable law. + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without + limitation any person's Copyright and Related Rights in the Work. + Further, Affirmer disclaims responsibility for obtaining any necessary + consents, permissions or other rights required for any use of the + Work. + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to + this CC0 or use of the Work. diff --git a/LICENSES/LGPL-3.0-only.txt b/LICENSES/LGPL-3.0-only.txt new file mode 100644 index 0000000..513d1c0 --- /dev/null +++ b/LICENSES/LGPL-3.0-only.txt @@ -0,0 +1,304 @@ +GNU LESSER GENERAL PUBLIC LICENSE +Version 3, 29 June 2007 + +Copyright (C) 2007 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. + +This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. + +0. Additional Definitions. + +As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. + +"The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. + +An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. + +A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". + +The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. + +The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. + +1. Exception to Section 3 of the GNU GPL. +You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. + +2. Conveying Modified Versions. +If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: + + a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. + +3. Object Code Incorporating Material from Library Header Files. +The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license document. + +4. Combined Works. +You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: + + a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license document. + + c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. + + e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) + +5. Combined Libraries. +You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. + +6. Revised Versions of the GNU Lesser General Public License. +The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. + +If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. + +GNU GENERAL PUBLIC LICENSE +Version 3, 29 June 2007 + +Copyright © 2007 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. + +Preamble + +The GNU General Public License is a free, copyleft license for software and other kinds of works. + +The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. + +To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. + +For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. + +Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. + +For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. + +Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. + +Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. + +The precise terms and conditions for copying, distribution and modification follow. + +TERMS AND CONDITIONS + +0. Definitions. + +“This License” refers to version 3 of the GNU General Public License. + +“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. + +“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. + +To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. + +A “covered work” means either the unmodified Program or a work based on the Program. + +To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. + +To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. + +An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. + +1. Source Code. +The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. + +A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. + +The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. + +The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. + +The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. + +The Corresponding Source for a work in source code form is that same work. + +2. Basic Permissions. +All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. + +Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. + +3. Protecting Users' Legal Rights From Anti-Circumvention Law. +No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. + +When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. + +4. Conveying Verbatim Copies. +You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. + +You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. + +5. Conveying Modified Source Versions. +You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. + + c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. + +A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. + +6. Conveying Non-Source Forms. +You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: + + a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. + + d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. + +A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. + +A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. + +“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. + +If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). + +The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. + +Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. + +7. Additional Terms. +“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or authors of the material; or + + e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. + +All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. + +If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. + +8. Termination. +You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). + +However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. + +Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. + +Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. + +9. Acceptance Not Required for Having Copies. +You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. + +10. Automatic Licensing of Downstream Recipients. +Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. + +An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. + +11. Patents. +A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. + +A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. + +In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. + +If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. + +A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. + +12. No Surrender of Others' Freedom. +If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. + +13. Use with the GNU Affero General Public License. +Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. + +14. Revised Versions of this License. +The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. + +If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. + +Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. + +15. Disclaimer of Warranty. +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +16. Limitation of Liability. +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +17. Interpretation of Sections 15 and 16. +If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. + +END OF TERMS AND CONDITIONS + +How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the “copyright” line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + +If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an “about box”. + +You should also get your employer (if you work as a programmer) or school, if any, to sign a “copyright disclaimer” for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . + +The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . diff --git a/MANIFEST.in b/MANIFEST.in index 6013e0c..2b48ff0 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + include README.rst include COPYING include COPYING.LESSER diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..b23d43a --- /dev/null +++ b/Makefile @@ -0,0 +1,125 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + +.PHONY: clean clean-build clean-pyc clean-test coverage dist docs help install lint lint/flake8 lint/black +.DEFAULT_GOAL := help + +define BROWSER_PYSCRIPT +import os, webbrowser, sys + +from urllib.request import pathname2url + +webbrowser.open("file://" + pathname2url(os.path.abspath(sys.argv[1]))) +endef +export BROWSER_PYSCRIPT + +define PRINT_HELP_PYSCRIPT +import re, sys + +for line in sys.stdin: + match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line) + if match: + target, help = match.groups() + print("%-20s %s" % (target, help)) +endef +export PRINT_HELP_PYSCRIPT + +BROWSER := python -c "$$BROWSER_PYSCRIPT" + +help: + @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) + +clean: clean-build clean-pyc clean-test clean-venv ## remove all build, virtual environments, test, coverage and Python artifacts + +clean-build: ## remove build artifacts + rm -fr build/ + rm -fr dist/ + rm -fr .eggs/ + find . -name '*.egg-info' -exec rm -fr {} + + find . -name '*.egg' -exec rm -f {} + + +clean-pyc: ## remove Python file artifacts + find . -name '*.pyc' -exec rm -f {} + + find . -name '*.pyo' -exec rm -f {} + + find . -name '*~' -exec rm -f {} + + find . -name '__pycache__' -exec rm -fr {} + + +clean-test: ## remove test and coverage artifacts + rm -fr .tox/ + rm -f .coverage + rm -fr htmlcov/ + rm -fr .pytest_cache + +clean-venv: # remove the virtual environment + rm -rf venv + +lint/isort: ## check style with flake8 + isort --check psyplot tests +lint/flake8: ## check style with flake8 + flake8 psyplot tests +lint/black: ## check style with black + black --check psyplot tests + blackdoc --check psyplot tests +lint/reuse: ## check licenses + reuse lint + +lint: lint/isort lint/black lint/flake8 lint/reuse ## check style + +formatting: + isort psyplot tests + black psyplot tests + blackdoc psyplot tests + +quick-test: ## run tests quickly with the default Python + python -m pytest + +pipenv-test: ## run tox + pipenv run isort --check psyplot + pipenv run black --line-length 79 --check psyplot + pipenv run flake8 psyplot + pipenv run pytest -v --cov=psyplot -x + pipenv run reuse lint + pipenv run cffconvert --validate + +test: ## run tox + tox + +test-all: test test-docs ## run tests and test the docs + +coverage: ## check code coverage quickly with the default Python + python -m pytest --cov psyplot --cov-report=html + $(BROWSER) htmlcov/index.html + +docs: ## generate Sphinx HTML documentation, including API docs + $(MAKE) -C docs clean + $(MAKE) -C docs html + $(BROWSER) docs/_build/html/index.html + +test-docs: ## generate Sphinx HTML documentation, including API docs + $(MAKE) -C docs clean + $(MAKE) -C docs linkcheck + +servedocs: docs ## compile the docs watching for changes + watchmedo shell-command -p '*.rst' -c '$(MAKE) -C docs html' -R -D . + +release: dist ## package and upload a release + twine upload dist/* + +dist: clean ## builds source and wheel package + python -m build + ls -l dist + +install: clean ## install the package to the active Python's site-packages + python -m pip install . + +dev-install: clean + python -m pip install -r docs/requirements.txt + python -m pip install -e .[dev] + pre-commit install + +venv-install: clean + python -m venv venv + venv/bin/python -m pip install -r docs/requirements.txt + venv/bin/python -m pip install -e .[dev] + venv/bin/pre-commit install diff --git a/README.rst b/README.rst index 59ffbd0..7a02c79 100644 --- a/README.rst +++ b/README.rst @@ -1,79 +1,21 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + =============================================== The psyplot interactive visualization framework =============================================== .. start-badges -.. list-table:: - :stub-columns: 1 - :widths: 10 90 - - * - docs - - |docs| |joss| |zenodo| - * - tests - - |circleci| |appveyor| |codecov| - * - package - - |version| |conda| |github| - * - implementations - - |supported-versions| |supported-implementations| - * - get in touch - - |mattermost| |mailing-list| |issues| - -.. |docs| image:: https://img.shields.io/github/deployments/psyplot/psyplot/github-pages - :alt: Documentation - :target: http://psyplot.github.io/psyplot/ - -.. |circleci| image:: https://circleci.com/gh/psyplot/psyplot/tree/master.svg?style=svg - :alt: CircleCI - :target: https://circleci.com/gh/psyplot/psyplot/tree/master - -.. |appveyor| image:: https://ci.appveyor.com/api/projects/status/4nt6qrw66iw65w33/branch/master?svg=true - :alt: AppVeyor - :target: https://ci.appveyor.com/project/psyplot/psyplot/branch/master - -.. |codecov| image:: https://codecov.io/gh/psyplot/psyplot/branch/master/graph/badge.svg - :alt: Coverage - :target: https://codecov.io/gh/psyplot/psyplot - -.. |version| image:: https://img.shields.io/pypi/v/psyplot.svg?style=flat - :alt: PyPI Package latest release - :target: https://pypi.python.org/pypi/psyplot - -.. |conda| image:: https://anaconda.org/conda-forge/psyplot/badges/version.svg - :alt: conda - :target: https://anaconda.org/conda-forge/psyplot - -.. |supported-versions| image:: https://img.shields.io/pypi/pyversions/psyplot.svg?style=flat - :alt: Supported versions - :target: https://pypi.python.org/pypi/psyplot - -.. |supported-implementations| image:: https://img.shields.io/pypi/implementation/psyplot.svg?style=flat - :alt: Supported implementations - :target: https://pypi.python.org/pypi/psyplot - -.. |joss| image:: http://joss.theoj.org/papers/3535c28017003f0b5fb63b1b64118b60/status.svg - :alt: Journal of Open Source Software - :target: http://joss.theoj.org/papers/3535c28017003f0b5fb63b1b64118b60 - -.. |zenodo| image:: https://zenodo.org/badge/87944102.svg - :alt: Zenodo - :target: https://zenodo.org/badge/latestdoi/87944102 - -.. |github| image:: https://img.shields.io/github/release/psyplot/psyplot.svg - :target: https://github.com/psyplot/psyplot/releases/latest - :alt: Latest github release - -.. |mattermost| image:: https://img.shields.io/badge/chat-on%20mattermost-success?logo=mattermost - :target: https://mattermost.hzdr.de/psyplot/ - :alt: Mattermost - -.. |mailing-list| image:: https://img.shields.io/badge/join-mailing%20list-brightgreen.svg?style=flat - :target: https://www.listserv.dfn.de/sympa/subscribe/psyplot - :alt: DFN mailing list - -.. |issues| image:: https://img.shields.io/github/issues-raw/psyplot/psyplot.svg?style=flat - :target: https://github.com/psyplot/psyplot/issues - :alt: GitHub issues +|CI| +|Code coverage| +|Latest Release| +|PyPI version| +|Code style: black| +|Imports: isort| +|PEP8| +|REUSE status| .. end-badges @@ -205,3 +147,21 @@ GNU LGPL-3.0 license for more details. You should have received a copy of the GNU LGPL-3.0 license along with this program. If not, see https://www.gnu.org/licenses/. + + +.. |CI| image:: https://codebase.helmholtz.cloud/psyplot/psyplot/badges/master/pipeline.svg + :target: https://codebase.helmholtz.cloud/psyplot/psyplot/-/pipelines?page=1&scope=all&ref=master +.. |Code coverage| image:: https://codebase.helmholtz.cloud/psyplot/psyplot/badges/master/coverage.svg + :target: https://codebase.helmholtz.cloud/psyplot/psyplot/-/graphs/develop/charts +.. |Latest Release| image:: https://codebase.helmholtz.cloud/psyplot/psyplot/-/badges/release.svg + :target: https://codebase.helmholtz.cloud/psyplot/psyplot +.. |PyPI version| image:: https://img.shields.io/pypi/v/psyplot.svg + :target: https://pypi.python.org/pypi/psyplot/ +.. |Code style: black| image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black +.. |Imports: isort| image:: https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336 + :target: https://pycqa.github.io/isort/ +.. |PEP8| image:: https://img.shields.io/badge/code%20style-pep8-orange.svg + :target: https://www.python.org/dev/peps/pep-0008/ +.. |REUSE status| image:: https://api.reuse.software/badge/codebase.helmholtz.cloud/psyplot/psyplot + :target: https://api.reuse.software/info/codebase.helmholtz.cloud/psyplot/psyplot diff --git a/ci/conda-recipe/meta.yaml b/ci/conda-recipe/meta.yaml deleted file mode 100644 index b6825cb..0000000 --- a/ci/conda-recipe/meta.yaml +++ /dev/null @@ -1,70 +0,0 @@ -{% set name = "psyplot" %} -{% set data = load_setup_py_data() %} - -package: - name: {{ name|lower }} - version: {{ data.get('version') }} - -source: - git_url: ../../ - -build: - number: {{ environ.get('GIT_DESCRIBE_NUMBER', 0) }} - string: py{{ environ.get('CONDA_PY') }}{% if environ.get("BUILD_STR_END") %}_{{ environ.get("BUILD_STR_END") }}{% endif %} - script: python -m pip install . --no-deps --ignore-installed -vvv - entry_points: - - psyplot = psyplot.__main__:main - - psyplot-plugin = psyplot.plugin_template:main - skip: true # [py == 27] - -requirements: - host: - - python - - pip - run: - - python >=3.6 - - docrep >=0.3 - - matplotlib - - funcargparse - - xarray - - pyyaml - -test: - requires: - - pytest - - codecov - - pytest-cov >=2.6.1 - - pyqt # [osx] - - netcdf4 - - dask - - scipy - source_files: - - tests - commands: - - psyplot --help - - psyplot-plugin --help - - pytest -v --cov={{ name|lower }} - - codecov - imports: - - psyplot - - psyplot.gdal_store - - psyplot.compat - - psyplot.config - - psyplot.sphinxext - -about: - home: https://github.com/psyplot/psyplot - license: LGPL-3.0-only - license_family: GPL - license_file: - - COPYING - - COPYING.LESSER - summary: Python package for interactive data visualization - - description: | - psyplot is an cross-platform open source python project that mainly - combines the plotting utilities of matplotlib and the data management of - the xarray package and integrates them into a software that can be used via - command-line and via a GUI. - doc_url: https://psyplot.github.io - dev_url: https://github.com/psyplot/psyplot diff --git a/ci/matrix/default/Pipfile b/ci/matrix/default/Pipfile new file mode 100644 index 0000000..edaee53 --- /dev/null +++ b/ci/matrix/default/Pipfile @@ -0,0 +1,20 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + +[[source]] +url = "https://pypi.org/simple" +verify_ssl = true +name = "pypi" + +[packages] +psyplot = {extras = ["testsite"], file = "../../..", editable=true} +matplotlib = "3.7.*" + +[dev-packages] + +[pipenv] +allow_prereleases = true + +[requires] +python_version = "3.9" diff --git a/docs/GitHub-Mark-16px.png b/docs/GitHub-Mark-16px.png deleted file mode 100644 index 5ede5d3..0000000 Binary files a/docs/GitHub-Mark-16px.png and /dev/null differ diff --git a/docs/Makefile b/docs/Makefile index 191ffff..b1567a1 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,192 +1,24 @@ -# Makefile for Sphinx documentation +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # +# SPDX-License-Identifier: CC0-1.0 -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = _build - -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) -$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) -endif - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# Minimal makefile for Sphinx documentation +# -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build +# Put it first so that "make" without argument is like "make help". help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " applehelp to make an Apple Help Book" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - @echo " coverage to run coverage check of the documentation (if enabled)" - -clean: - rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/syplot.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/syplot.qhc" - -applehelp: - $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp - @echo - @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." - @echo "N.B. You won't be able to view it unless you put it in" \ - "~/Library/Documentation/Help or install it in your application" \ - "bundle." - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/syplot" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/syplot" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -latexpdfja: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." - -coverage: - $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage - @echo "Testing of coverage in the sources finished, look at the " \ - "results in $(BUILDDIR)/coverage/python.txt." + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." +.PHONY: help Makefile -pseudoxml: - $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/_static/.gitignore b/docs/_static/.gitignore index 91fd588..0afbd0b 100644 --- a/docs/_static/.gitignore +++ b/docs/_static/.gitignore @@ -1 +1,5 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + docs_*.png diff --git a/docs/_static/download_latest.js b/docs/_static/download_latest.js deleted file mode 100644 index 7383931..0000000 --- a/docs/_static/download_latest.js +++ /dev/null @@ -1,13 +0,0 @@ - -function GetLatestReleaseInfo(osname) { - $.getJSON("https://api.github.com/repos/Chilipp/psyplot-conda/releases/latest").done(function (release) { - var asset = release.assets[0]; - for (var i = 0; i < release.assets.length; i++) { - if (release.assets[i].name.includes(osname)) - { - var asset = release.assets[i]; - } - } - window.location.replace(asset.browser_download_url); - }); -} diff --git a/docs/_static/license_logo.png b/docs/_static/license_logo.png new file mode 100644 index 0000000..c8473a2 Binary files /dev/null and b/docs/_static/license_logo.png differ diff --git a/docs/_static/license_logo.png.license b/docs/_static/license_logo.png.license new file mode 100644 index 0000000..0329700 --- /dev/null +++ b/docs/_static/license_logo.png.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2024 Creative Commons + +SPDX-License-Identifier: CC-BY-4.0 diff --git a/docs/_static/psyplot.ico.license b/docs/_static/psyplot.ico.license new file mode 100644 index 0000000..b21fae9 --- /dev/null +++ b/docs/_static/psyplot.ico.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC-BY-4.0 diff --git a/docs/_static/psyplot.png.license b/docs/_static/psyplot.png.license new file mode 100644 index 0000000..b21fae9 --- /dev/null +++ b/docs/_static/psyplot.png.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC-BY-4.0 diff --git a/docs/_templates/footer.html b/docs/_templates/footer.html new file mode 100644 index 0000000..3e57079 --- /dev/null +++ b/docs/_templates/footer.html @@ -0,0 +1,22 @@ + + +{% extends "!footer.html" %} +{% block extrafooter %} + + + +{% endblock %} diff --git a/docs/about.rst b/docs/about.rst index 93cbb74..aab1dcb 100644 --- a/docs/about.rst +++ b/docs/about.rst @@ -1,3 +1,7 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + .. _about: About psyplot @@ -66,7 +70,7 @@ What it is .. code-block:: python - psy.plot.mapplot('my-netcdf-file.nc', lonlatbox='Germany') + psy.plot.mapplot("my-netcdf-file.nc", lonlatbox="Germany") while still providing a very high range of flexible options to adjust the visualization. No GUI, independent of it's intuitiveness, can ever beat the @@ -93,7 +97,7 @@ What it is plotting methods - it will always be free and open-source under the LGPL License. -.. _ICON: https://mpimet.mpg.de/en/science/modeling-with-icon/icon-configurations +.. _ICON: https://code.mpimet.mpg.de/projects/iconpublic .. _UGRID: https://ugrid-conventions.github.io/ugrid-conventions/ diff --git a/docs/accessors.rst b/docs/accessors.rst index 7720225..fdae0f3 100644 --- a/docs/accessors.rst +++ b/docs/accessors.rst @@ -1,3 +1,7 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + .. _accessors: .. currentmodule:: psyplot.data @@ -31,10 +35,10 @@ dataset itself, e.g. In [1]: import psyplot - In [2]: ds = psyplot.open_dataset('demo.nc') + In [2]: ds = psyplot.open_dataset("demo.nc") @savefig docs_dataset_accessor.png width=4in - In [3]: sp = ds.psy.plot.mapplot(name='t2m', cmap='Reds') + In [3]: sp = ds.psy.plot.mapplot(name="t2m", cmap="Reds") The variable ``sp`` is a psyplot subproject of the current main project. @@ -44,7 +48,8 @@ The variable ``sp`` is a psyplot subproject of the current main project. @suppress In [4]: import psyplot.project as psy - ...: psy.close('all') + ...: + ...: psy.close("all") Hence, it would be completely equivalent if you type @@ -53,7 +58,7 @@ Hence, it would be completely equivalent if you type In [5]: import psyplot.project as psyplot - In [6]: sp = psy.plot.mapplot(ds, name='t2m', cmap='Reds') + In [6]: sp = psy.plot.mapplot(ds, name="t2m", cmap="Reds") Note that the :attr:`DatasetAccessor.plot` attribute has the same plotmethods as the :attr:`psyplot.project.plot` instance. @@ -88,7 +93,7 @@ Just use the :attr:`~psyplot.data.InteractiveBase.plot` attribute the accessor. In [1]: import psyplot - In [2]: ds = psyplot.open_dataset('demo.nc') + In [2]: ds = psyplot.open_dataset("demo.nc") In [3]: da = ds.t2m[0, 0] @@ -101,7 +106,8 @@ Just use the :attr:`~psyplot.data.InteractiveBase.plot` attribute the accessor. @suppress In [6]: import matplotlib.pyplot as plt - ...: plt.close('all') + ...: + ...: plt.close("all") The resulting plotter, an instance of the :class:`psyplot.plotter.Plotter` class, is the object that visualizes the data array. It can also @@ -120,17 +126,19 @@ multiple lines. Consider the following example: In [7]: ds0 = ds.isel(lev=0) # select a subset of the dataset # create a list of arrays at different longitudes - In [8]: l = psyplot.InteractiveList([ - ...: ds0.t2m.sel(lon=2.35, lat=48.86, method='nearest'), # Paris - ...: ds0.t2m.sel(lon=13.39, lat=52.52, method='nearest'), # Berlin - ...: ds0.t2m.sel(lon=-74.01, lat=40.71, method='nearest'), # NYC - ...: ]) + In [8]: l = psyplot.InteractiveList( + ...: [ + ...: ds0.t2m.sel(lon=2.35, lat=48.86, method="nearest"), # Paris + ...: ds0.t2m.sel(lon=13.39, lat=52.52, method="nearest"), # Berlin + ...: ds0.t2m.sel(lon=-74.01, lat=40.71, method="nearest"), # NYC + ...: ] + ...: ) - In [9]: l.arr_names = ['Paris', 'Berlin', 'NYC'] + In [9]: l.arr_names = ["Paris", "Berlin", "NYC"] # plot the list @savefig docs_dataarray_accessor_2.png width=4in - In [10]: plotter = l.psy.plot.lineplot(xticks='data', xticklabels='%B') + In [10]: plotter = l.psy.plot.lineplot(xticks="data", xticklabels="%B") @suppress In [10]: import matplotlib.pyplot as plt @@ -155,14 +163,14 @@ subset of a dataset, e.g. via .. ipython:: In [1]: da = ds.t2m[0, 0] - ...: print(da.time) # January 1979 + ...: print(da.time) # January 1979 You can change to a different slice using the :meth:`InteractiveArray.update` method. .. ipython:: - In [2]: da.psy.base = ds # tell psyplot the source of the dataarray + In [2]: da.psy.base = ds # tell psyplot the source of the dataarray In [3]: da.psy.update(time=2) ...: print(da.time) # changed to March 1979 @@ -178,7 +186,7 @@ xarray framework. .. ipython:: - In [4]: da = ds.psy.create_list(time=0, lev=0, name='t2m')[0] + In [4]: da = ds.psy.create_list(time=0, lev=0, name="t2m")[0] ...: print(da.psy.base is ds) If you plotted the data, you can also change the formatoptions using the @@ -188,25 +196,29 @@ If you plotted the data, you can also change the formatoptions using the # create plot @savefig docs_dataarray_accessor_3.png width=4in - In [5]: da.psy.plot.mapplot(); + In[5]: da.psy.plot.mapplot() .. ipython:: @savefig docs_dataarray_accessor_4.png width=4in - In [6]: da.psy.update(cmap='Reds') + In [6]: da.psy.update(cmap="Reds") @suppress In [6]: import matplotlib.pyplot as plt - ...: plt.close('all') + ...: + ...: plt.close("all") The same holds for the Interactive list .. ipython:: @suppress - In [6]: plotter = l.psy.plot.lineplot(xticks='data', xticklabels='%B') + In [6]: plotter = l.psy.plot.lineplot(xticks="data", xticklabels="%B") @savefig docs_dataarray_accessor_5.png width=4in - In [7]: l.update(time=slice(1, 4), # change the data by selecting a subset of the timeslice - ...: title='Subset', # change a formatoption, the title of the plot - ...: ) + In [7]: l.update( + ...: time=slice( + ...: 1, 4 + ...: ), # change the data by selecting a subset of the timeslice + ...: title="Subset", # change a formatoption, the title of the plot + ...: ) diff --git a/docs/api.rst b/docs/api.rst new file mode 100644 index 0000000..0971484 --- /dev/null +++ b/docs/api.rst @@ -0,0 +1,14 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 +.. SPDX-License-Identifier: CC-BY-4.0 + +.. _api: + +API Reference +============= + + +.. toctree:: + + api/psyplot diff --git a/docs/apigen.bash b/docs/apigen.bash deleted file mode 100755 index 2b8261d..0000000 --- a/docs/apigen.bash +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# script to automatically generate the psyplot api documentation using -# sphinx-apidoc and sed - -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. -# -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . - -sphinx-apidoc -f -M -e -T -o api ../psyplot/ -# replace chapter title in psyplot.rst -sed -i -e 1,1s/.*/'API Reference'/ api/psyplot.rst -# add imported members at the top level module -sed -i -e /Subpackages/'i\'$'\n'".. autosummary:: \\ -\ ~psyplot.config.rcsetup.rcParams \\ -\ ~psyplot.data.InteractiveArray \\ -\ ~psyplot.data.InteractiveList \\ - \\ - " api/psyplot.rst - -sphinx-autogen -o generated *.rst */*.rst - diff --git a/docs/apple.png b/docs/apple.png deleted file mode 100644 index 6fe0b98..0000000 Binary files a/docs/apple.png and /dev/null differ diff --git a/docs/changelog.rst b/docs/changelog.rst index c393d07..e981bbd 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -1,3 +1,7 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + .. _changelog: Changelog diff --git a/docs/command_line.rst b/docs/command_line.rst index 8a31397..530b4dd 100644 --- a/docs/command_line.rst +++ b/docs/command_line.rst @@ -1,3 +1,7 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + .. highlight:: bash .. _command-line: diff --git a/docs/conf.py b/docs/conf.py index 345dd3f..c055b05 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,294 +1,269 @@ -# -*- coding: utf-8 -*- -# -# psyplot documentation build configuration file +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # +# SPDX-License-Identifier: LGPL-3.0-only -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# Configuration file for the Sphinx documentation builder. # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html +# -- Path setup -------------------------------------------------------------- -import sphinx +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# import os -import os.path as osp import sys -import re -import six -import subprocess as spr -from itertools import product import warnings +from itertools import product +from pathlib import Path -if six.PY2: - from urllib import urlopen -else: - from urllib.request import urlopen +from sphinx.ext import apidoc # make sure, psyplot from parent directory is used -sys.path.insert(0, os.path.abspath('..')) +sys.path.insert(0, os.path.abspath("..")) + +# isort: off + import psyplot + +# isort: on + from psyplot.plotter import Formatoption, Plotter # automatically import all plotter classes -psyplot.rcParams['project.auto_import'] = True +psyplot.rcParams["project.auto_import"] = True # include links to the formatoptions in the documentation of the # :attr:`psyplot.project.ProjectPlotter` methods Plotter.include_links(True) -warnings.filterwarnings('ignore', message="axes.color_cycle is deprecated") +warnings.filterwarnings("ignore", message="axes.color_cycle is deprecated") warnings.filterwarnings( - 'ignore', message=("This has been deprecated in mpl 1.5,")) -warnings.filterwarnings('ignore', message="invalid value encountered in ") -warnings.filterwarnings('ignore', message=r"\s*examples.directory") -warnings.filterwarnings('ignore', message='numpy.dtype size changed') + "ignore", message=("This has been deprecated in mpl 1.5,") +) +warnings.filterwarnings("ignore", message="invalid value encountered in ") +warnings.filterwarnings("ignore", message=r"\s*examples.directory") +warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings( - 'ignore', message='Using an implicitly registered datetime converter') + "ignore", message="Using an implicitly registered datetime converter" +) warnings.filterwarnings( - 'ignore', message=r"\s*The on_mappable_changed function") + "ignore", message=r"\s*The on_mappable_changed function" +) warnings.filterwarnings( - 'ignore', message=r".+multi-part geometries is deprecated") + "ignore", message=r".+multi-part geometries is deprecated" +) warnings.filterwarnings( - 'ignore', message=r"\s*The array interface is deprecated") + "ignore", message=r"\s*The array interface is deprecated" +) -# -- General configuration ------------------------------------------------ -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.doctest', - 'sphinx.ext.intersphinx', - 'sphinx.ext.autosummary', - 'sphinx.ext.todo', - 'sphinx.ext.viewcode', - 'sphinx.ext.extlinks', - 'matplotlib.sphinxext.plot_directive', - 'IPython.sphinxext.ipython_console_highlighting', - 'IPython.sphinxext.ipython_directive', - 'sphinxarg.ext', - 'psyplot.sphinxext.extended_napoleon', - 'autodocsumm', - 'sphinx.ext.imgconverter', -] +def generate_apidoc(app): + appdir = Path(app.__file__).parent + apidoc.main( + ["-fMEeTo", str(api), str(appdir), str(appdir / "migrations" / "*")] + ) -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] -linkcheck_anchors_ignore = ["^install$"] +api = Path("api") + +if not api.exists(): + generate_apidoc(psyplot) + +# -- Project information ----------------------------------------------------- + +project = "psyplot" +copyright = "2021-2024 Helmholtz-Zentrum hereon GmbH" +author = "Philipp S. Sommer" + linkcheck_ignore = [ + # we do not check link of the psyplot as the + # badges might not yet work everywhere. Once psyplot + # is settled, the following link should be removed + r"https://.*psyplot" # HACK: SNF seems to have a temporary problem r"https://p3.snf.ch/project-\d+", ] -# create the api documentation -if not osp.exists(osp.join(osp.dirname(__file__), 'api')): - spr.check_call(['bash', 'apigen.bash']) +linkcheck_anchors_ignore = ["^install$"] -napoleon_use_admonition_for_examples = True -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = '.rst' +# -- General configuration --------------------------------------------------- -# The master toctree document. -master_doc = 'index' +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "hereon_nc_sphinxext", + "sphinx.ext.doctest", + "sphinx.ext.intersphinx", + "sphinx_design", + "sphinx.ext.autosummary", + "sphinx.ext.todo", + "sphinx.ext.viewcode", + "sphinx.ext.extlinks", + "matplotlib.sphinxext.plot_directive", + "IPython.sphinxext.ipython_console_highlighting", + "IPython.sphinxext.ipython_directive", + "sphinxarg.ext", + "psyplot.sphinxext.extended_napoleon", + "autodocsumm", + "sphinx.ext.imgconverter", +] -autodoc_default_options = { - 'show_inheritance': True, - 'autosummary': True, -} -autoclass_content = 'both' +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] -not_document_data = ['psyplot.config.rcsetup.defaultParams', - 'psyplot.config.rcsetup.rcParams'] +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] -ipython_savefig_dir = os.path.join(os.path.dirname(__file__), '_static') +napoleon_use_admonition_for_examples = True -# General information about the project. -project = 'psyplot' -copyright = ", ".join( - psyplot.__copyright__.strip().replace("Copyright (C) ", "").splitlines() -) -author = psyplot.__author__ -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = re.match('\d+\.\d+\.\d+', psyplot.__version__).group() -# The full version, including alpha/beta/rc tags. -release = psyplot.__version__ +autodoc_default_options = { + "show_inheritance": True, + "members": True, + "autosummary": True, +} -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None +autoclass_content = "both" -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['_build'] +not_document_data = [ + "psyplot.config.rcsetup.defaultParams", + "psyplot.config.rcsetup.rcParams", +] -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +ipython_savefig_dir = "_static" -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True +# fontawesome icons +html_css_files = [ + "https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.1.1/css/all.min.css" +] +sd_fontawesome_latex = True -# -- Options for HTML output ---------------------------------------------- +# -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" -# Add any paths that contain custom static files (such as style sheets) -# here, relative to this directory. They are copied after the builtin -# static files, so a file named "default.css" will overwrite the builtin -# "default.css". -html_static_path = ['_static'] +html_theme_options = { + "collapse_navigation": False, + "includehidden": False, +} -# Output file base name for HTML help builder. -htmlhelp_basename = 'psyplotdoc' +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] # The name of an image file (relative to this directory) to place at the top # of the sidebar. -html_logo = '_static/psyplot.png' +html_logo = "_static/psyplot.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -html_favicon = '_static/psyplot.ico' - -# Custom sidebar templates, maps document names to template names. -html_sidebars = { - 'index': ['sidebarlogo.html', 'sidebarusefullinks.html', 'searchbox.html'], - '**': ['sidebarlogo.html', 'relations.html', 'searchbox.html', - 'localtoc.html', 'sidebarusefullinks.html'] -} +html_favicon = "_static/psyplot.ico" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # Additional stuff for the LaTeX preamble. - 'preamble': '\setcounter{tocdepth}{10}' + "preamble": r"\setcounter{tocdepth}{10}" } -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'psyplot.tex', u'psyplot Documentation', - author, 'manual'), -] - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'psyplot', u'psyplot Documentation', - [author], 1) -] - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'psyplot', u'psyplot Documentation', - author, 'psyplot', 'Python framework for interactive data documentation', - 'Miscellaneous'), -] - - -# -- Options for Epub output ---------------------------------------------- - -# Bibliographic Dublin Core info. -epub_title = project -epub_author = author -epub_publisher = author -epub_copyright = copyright - -# A list of files that should not be packed into the epub file. -epub_exclude_files = ['search.html'] - -# Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { - 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None), - 'numpy': ('https://numpy.org/doc/stable/', None), - 'matplotlib': ('https://matplotlib.org/stable/', None), - 'seaborn': ('https://seaborn.pydata.org/', None), - 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), - 'xarray': ('https://xarray.pydata.org/en/stable/', None), - 'cartopy': ('https://scitools.org.uk/cartopy/docs/latest/', None), - 'mpl_toolkits': ('https://matplotlib.org/basemap/', None), - 'psy_maps': ('https://psyplot.github.io/psy-maps/', None), - 'psy_simple': ('https://psyplot.github.io/psy-simple/', None), - 'psy_reg': ('https://psyplot.github.io/psy-reg/', None), - 'psyplot_gui': ('https://psyplot.github.io/psyplot-gui/', None), - 'psy_view': ('https://psyplot.github.io/psy-view/', None), - "psyplot_examples": ('https://psyplot.github.io/examples/', None), - 'python': ('https://docs.python.org/3/', None), + "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), + "numpy": ("https://numpy.org/doc/stable/", None), + "matplotlib": ("https://matplotlib.org/stable/", None), + "seaborn": ("https://seaborn.pydata.org/", None), + "sphinx": ("https://www.sphinx-doc.org/en/master/", None), + "xarray": ("https://xarray.pydata.org/en/stable/", None), + "cartopy": ("https://scitools.org.uk/cartopy/docs/latest/", None), + "psy_maps": ("https://psyplot.github.io/psy-maps/", None), + "psy_simple": ("https://psyplot.github.io/psy-simple/", None), + "psy_reg": ("https://psyplot.github.io/psy-reg/", None), + "psyplot_gui": ("https://psyplot.github.io/psyplot-gui/", None), + "psy_view": ("https://psyplot.github.io/psy-view/", None), + "psyplot_examples": ("https://psyplot.github.io/examples/", None), + "python": ("https://docs.python.org/3/", None), } replacements = { - '`psyplot.rcParams`': '`~psyplot.config.rcsetup.rcParams`', - '`psyplot.InteractiveList`': '`~psyplot.data.InteractiveList`', - '`psyplot.InteractiveArray`': '`~psyplot.data.InteractiveArray`', - '`psyplot.open_dataset`': '`~psyplot.data.open_dataset`', - '`psyplot.open_mfdataset`': '`~psyplot.data.open_mfdataset`', - } + "`psyplot.rcParams`": "`~psyplot.config.rcsetup.rcParams`", + "`psyplot.InteractiveList`": "`~psyplot.data.InteractiveList`", + "`psyplot.InteractiveArray`": "`~psyplot.data.InteractiveArray`", + "`psyplot.open_dataset`": "`~psyplot.data.open_dataset`", + "`psyplot.open_mfdataset`": "`~psyplot.data.open_mfdataset`", +} def link_aliases(app, what, name, obj, options, lines): - for (key, val), (i, line) in product(six.iteritems(replacements), - enumerate(lines)): + for (key, val), (i, line) in product( + replacements.items(), enumerate(lines) + ): lines[i] = line.replace(key, val) fmt_attrs_map = { - 'Interface to other formatoptions': [ - 'children', 'dependencies', 'connections', 'parents', - 'shared', 'shared_by'], - 'Formatoption intrinsic': [ - 'value', 'value2share', 'value2pickle', 'default', 'validate'], - 'Interface for the plotter': [ - 'lock', 'diff', 'set_value', 'check_and_set', 'initialize_plot', - 'update', 'share', 'finish_update', 'remove', 'changed', 'plotter', - 'priority', 'key', 'plot_fmt', 'update_after_plot', - 'requires_clearing', 'requires_replot'], - 'Interface to the data': ['data_dependent', 'index_in_list', 'project', - 'ax', 'raw_data', 'decoder', 'any_decoder', - 'data', 'iter_data', 'iter_raw_data', - 'set_data', 'set_decoder'], - 'Information attributes': ['group', 'name', 'groupname', 'default_key'], - 'Miscellaneous': ['init_kwargs', 'logger'], + "Interface to other formatoptions": [ + "children", + "dependencies", + "connections", + "parents", + "shared", + "shared_by", + ], + "Formatoption intrinsic": [ + "value", + "value2share", + "value2pickle", + "default", + "validate", + ], + "Interface for the plotter": [ + "lock", + "diff", + "set_value", + "check_and_set", + "initialize_plot", + "update", + "share", + "finish_update", + "remove", + "changed", + "plotter", + "priority", + "key", + "plot_fmt", + "update_after_plot", + "requires_clearing", + "requires_replot", + ], + "Interface to the data": [ + "data_dependent", + "index_in_list", + "project", + "ax", + "raw_data", + "decoder", + "any_decoder", + "data", + "iter_data", + "iter_raw_data", + "set_data", + "set_decoder", + ], + "Information attributes": ["group", "name", "groupname", "default_key"], + "Miscellaneous": ["init_kwargs", "logger"], } @@ -296,10 +271,10 @@ def group_fmt_attributes(app, what, name, obj, section, parent): if parent is Formatoption: return next( (group for group, val in fmt_attrs_map.items() if name in val), - None) + None, + ) def setup(app): - app.connect('autodoc-process-docstring', link_aliases) - app.connect('autodocsumm-grouper', group_fmt_attributes) - return {'version': sphinx.__display_version__, 'parallel_read_safe': True} + app.connect("autodoc-process-docstring", link_aliases) + app.connect("autodocsumm-grouper", group_fmt_attributes) diff --git a/docs/configuration.rst b/docs/configuration.rst index f9b356e..10beb63 100644 --- a/docs/configuration.rst +++ b/docs/configuration.rst @@ -1,3 +1,7 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + .. _configuration: Configuration @@ -27,6 +31,7 @@ object. Without any plugins, this looks like @suppress In [1]: # is not shown because we have to disable the plugins ...: from psyplot.config.rcsetup import RcParams, defaultParams_orig + ...: ...: rcParams = RcParams(defaultParams=defaultParams_orig) ...: rcParams.update_from_defaultParams() @@ -38,7 +43,7 @@ example, if you do not want, that the seaborn_ package is imported when the .. ipython:: - In [3]: rcParams['project.import_seaborn'] = False + In [3]: rcParams["project.import_seaborn"] = False Additionally, you can make these changes permanent. At every first import of the ``psyplot`` module, the rcParams are updated from a yaml configuration @@ -55,13 +60,15 @@ To make our changes from above permanent, we could just do: In [4]: import yaml ...: from psyplot.config.rcsetup import psyplot_fname - In [5]: with open(psyplot_fname(if_exists=False), 'w') as f: - ...: yaml.dump({'project.import_seaborn': False}, f) + In [5]: with open(psyplot_fname(if_exists=False), "w") as f: + ...: yaml.dump({"project.import_seaborn": False}, f) # or we use the dump method - In [6]: rcParams.dump(psyplot_fname(if_exists=False), - ...: overwrite=True, # update the existing file - ...: include_keys=['project.import_seaborn']) + In [6]: rcParams.dump( + ...: psyplot_fname(if_exists=False), + ...: overwrite=True, # update the existing file + ...: include_keys=["project.import_seaborn"], + ...: ) Default formatoptions --------------------- diff --git a/docs/contribute.rst b/docs/contribute.rst deleted file mode 100644 index 09ba60e..0000000 --- a/docs/contribute.rst +++ /dev/null @@ -1,252 +0,0 @@ -.. _how-to-contribute: - -Contributing to psyplot -======================= - -First off, thanks for taking the time to contribute! - -The following is a set of guidelines for contributing to psyplot and its -packages, which are hosted on GitHub. These are mostly guidelines, not -rules. Use your best judgment, and feel free to propose changes to this -document in a pull request. - -.. contents:: Table of Contents - -Code of Conduct ---------------- - -This project and everyone participating in it is governed by the -`psyplot Code of Conduct `__. -By participating, you are expected to uphold this code. - -What should I know before I get started? ----------------------------------------- - -The psyplot framework -~~~~~~~~~~~~~~~~~~~~~ - -``psyplot`` is just the framework that allows interactive data analysis -and visualization. Much of the functionality however is implemented by -other packages. What package is the correct one for your bug -report/feature request, can be determined by the following list - -- `psyplot-gui `__: - Everything specific to the graphical user interface -- `psy-view `__: - Everything specific to the psy-view graphical user interface -- `psy-simple `__: - Everything concerning, e.g. the ``lineplot``, ``plot2d``, ``density`` - or ``vector`` plot methods -- `psy-maps `__: Everything - concerning, e.g. the ``mapplot``, ``mapvector`` ``mapcombined`` plot - methods -- `psy-reg `__: Everything - concerning, e.g. the ``linreg`` or ``densityreg`` plot methods -- `psyplot `__: Everything - concerning the general framework, e.g. data handling, parallel - update, etc. - -Concerning plot methods, you can simply find out which module -implemented it via - -.. code:: python - - import psyplot.project as psy - print(psy.plot.name-of-your-plot-method._plugin) - -If you still don’t know, where to open the issue, just go for -`psyplot `__. - -How Can I Contribute? ---------------------- - -Reporting Bugs -~~~~~~~~~~~~~~ - -This section guides you through submitting a bug report for psyplot. -Following these guidelines helps maintainers and the community -understand your report, reproduce the behavior, and find related -reports. - -Before creating bug reports, please check existing issues and pull -requests as you might find out that you don’t need to create one. When -you are creating a bug report, please `include as many details as -possible <#how-do-i-submit-a-good-bug-report>`__. Fill out `the required -template `__, the information it asks for -helps us resolve issues faster. - - **Note:** If you find a **Closed** issue that seems like it is the - same thing that you’re experiencing, open a new issue and include a - link to the original issue in the body of your new one. - -How Do I Submit A (Good) Bug Report? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Bugs are tracked as `GitHub -issues `__. After you’ve -determined `which repository <#the-psyplot-framework>`__ your bug is -related to, create an issue on that repository and provide the following -information by filling in `the template `__. - -Explain the problem and include additional details to help maintainers -reproduce the problem: - -- **Use a clear and descriptive title** for the issue to identify the - problem. -- **Describe the exact steps which reproduce the problem** in as many - details as possible. For example, start by explaining how you started - psyplot, e.g. which command exactly you used in the terminal, or how - you started psyplot otherwise. When listing steps, **don’t just say - what you did, but explain how you did it**. For example, did you - update via GUI or console and what? -- **Provide specific examples to demonstrate the steps**. Include links - to files or GitHub projects, or copy/pasteable snippets, which you - use in those examples. If you’re providing snippets in the issue, use - `Markdown code blocks - `__. -- **Describe the behavior you observed after following the steps** and - point out what exactly is the problem with that behavior. -- **Explain which behavior you expected to see instead and why.** -- **Include screenshots and animated GIFs** which show you following - the described steps and clearly demonstrate the problem. -- **If the problem is related to your data structure**, include a small - example how a similar data structure can be generated - -Include details about your configuration and environment: - -- **Which version of psyplot are you using?** You can get the exact - version by running ``psyplot -aV`` in your terminal, or by starting - the psyplot-gui and open Help->Dependencies. -- **What’s the name and version of the OS you’re using**? - -Suggesting Enhancements -~~~~~~~~~~~~~~~~~~~~~~~ - -This section guides you through submitting an enhancement suggestion for -psyplot, including completely new features and minor improvements to -existing functionality. - -If you want to change an existing feature, use the `change feature -template `__, -otherwise fill in the `new feature -template `__. - -How Do I Submit A (Good) Enhancement Suggestion? -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Enhancement suggestions are tracked as `GitHub -issues `__. After you’ve -determined `which repository <#the-psyplot-framework>`__ your -enhancement suggestion is related to, create an issue on that repository -and provide the following information: - -- **Use a clear and descriptive title** for the issue to identify the - suggestion. -- **Provide a step-by-step description of the suggested enhancement** - in as many details as possible. -- **Provide specific examples to demonstrate the steps**. Include - copy/pasteable snippets which you use in those examples, as - `Markdown code blocks - `__. -- **Describe the current behavior** and **explain which behavior you - expected to see instead** and why. -- **Include screenshots and animated GIFs** which help you demonstrate - the steps or point out the part of psyplot which the suggestion is - related to. -- **Explain why this enhancement would be useful** to most psyplot - users. -- **List some other analysis software or applications where this - enhancement exists.** -- **Specify which version of psyplot you’re using.** You can get the - exact version by running ``psyplot -aV`` in your terminal, or by - starting the psyplot-gui and open Help->Dependencies. -- **Specify the name and version of the OS you’re using.** - -Pull Requests -~~~~~~~~~~~~~ - -- Fill in `the required template `__ -- Do not include issue numbers in the PR title -- Include screenshots and animated GIFs in your pull request whenever - possible. -- Document new code based on the `Documentation - Styleguide <#documentation-styleguide>`__ -- End all files with a newline and follow the - `PEP8 `__, e.g. by using - `flake8 `__ - -Adding new examples -~~~~~~~~~~~~~~~~~~~ - -You have new examples? Great! If you want to add them to the -documentation, please just fork the correct github repository and add a -jupyter notebook in the `examples repository on GitHub`_, together with -all the necessary data files. - -And we are always happy to help you finalizing incomplete pull requests. - -.. _examples repository on GitHub: https://github.com/psyplot/examples - -Styleguides ------------ - -Git Commit Messages -~~~~~~~~~~~~~~~~~~~ - -- Use the present tense (“Add feature” not “Added feature”) -- Use the imperative mood (“Move cursor to…” not “Moves cursor to…”) -- Limit the first line (summary) to 72 characters or less -- Reference issues and pull requests liberally after the first line -- When only changing documentation, include ``[ci skip]`` in the commit - title - -Documentation Styleguide -~~~~~~~~~~~~~~~~~~~~~~~~ - -- Follow the `numpy documentation - guidelines `__. -- Use - `reStructuredText `__. -- Try to not repeat yourself and make use of the - ``psyplot.docstring.docstrings`` - -Example -^^^^^^^ - -.. code:: python - - @docstrings.get_sections(base='new_function') - def new_function(a=1): - """Make some cool new feature - - This function implements a cool new feature - - Parameters - ---------- - a: int - First parameter - - Returns - ------- - something awesome - The result""" - ... - - @docstrings.dedent - def another_new_function(a=1, b=2): - """Make another cool new feature - - Parameters - ---------- - %(new_function.parameters)s - b: int - Another parameter - - Returns - ------- - Something even more awesome""" - ... - -.. note:: - - This document has been inspired by `the contribution guidelines of Atom `__ diff --git a/docs/contributing.rst b/docs/contributing.rst new file mode 100644 index 0000000..5448814 --- /dev/null +++ b/docs/contributing.rst @@ -0,0 +1,226 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 +.. SPDX-License-Identifier: CC-BY-4.0 + +.. _contributing: + +Contribution and development hints +================================== + +.. warning:: + + This page has been automatically generated as has not yet been reviewed by the + authors of psyplot! + +The psyplot project is developed by the +`Helmholtz-Zentrum Hereon`_. It is open-source +as we believe that this analysis can be helpful for reproducibility and +collaboration, and we are looking forward for your feedback, +questions and especially for your contributions. + +- If you want to ask a question, are missing a feature or have comments on the + docs, please `open an issue at the source code repository`_ +- If you have suggestions for improvement, please let us know in an issue, or + fork the repository and create a merge request. See also :ref:`development`. + + + +.. contents:: Table of Contents + +Code of Conduct +--------------- + +This project and everyone participating in it is governed by the +`psyplot Code of Conduct `__. +By participating, you are expected to uphold this code. + +What should I know before I get started? +---------------------------------------- + +The psyplot framework +~~~~~~~~~~~~~~~~~~~~~ + +``psyplot`` is just the framework that allows interactive data analysis +and visualization. Much of the functionality however is implemented by +other packages. What package is the correct one for your bug +report/feature request, can be determined by the following list + +- `psyplot-gui `__: + Everything specific to the graphical user interface +- `psy-view `__: + Everything specific to the psy-view graphical user interface +- `psy-simple `__: + Everything concerning, e.g. the ``lineplot``, ``plot2d``, ``density`` + or ``vector`` plot methods +- `psy-maps `__: Everything + concerning, e.g. the ``mapplot``, ``mapvector`` ``mapcombined`` plot + methods +- `psy-reg `__: Everything + concerning, e.g. the ``linreg`` or ``densityreg`` plot methods +- `psyplot `__: Everything + concerning the general framework, e.g. data handling, parallel + update, etc. + +Concerning plot methods, you can simply find out which module +implemented it via + +.. code:: python + + import psyplot.project as psy + + print(psy.plot.name - of - your - plot - method._plugin) + +If you still don’t know, where to open the issue, just go for +`psyplot `__. + +.. _Helmholtz-Zentrum Hereon: https://www.hereon.de +.. _open an issue at the source code repository: https://codebase.helmholtz.cloud/psyplot/psyplot + +.. _development: + +Contributing in the development +------------------------------- + +.. note:: + + We use automated formatters to ensure a high quality and maintanability of + our source code. Getting familiar with these techniques can take quite some + time and you might get error messages that are hard to understand. + + We not slow down your development and we do our best to support you with + these techniques. If you have any troubles, just commit with + ``git commit --no-verify`` (see below) and the maintainers will take care + of the tests and continuous integration. + +Thanks for your wish to contribute to this project!! The source code of +the `psyplot` package is hosted at +https://codebase.helmholtz.cloud/psyplot/psyplot. + + +This is an open gitlab where you can register via the Helmholtz AAI. If your +home institution is not listed in the Helmholtz AAI, please use one of the +social login providers, such as Google, GitHub or OrcID. + + +Once you created an account in this gitlab, you can fork_ this +repository to your own user account and implement the changes. + +Afterwards, please make a merge request into the main repository. If you +have any questions, please do not hesitate to create an issue on gitlab +and contact the maintainers of this package. + +Once you created you fork, you can clone it via + +.. code-block:: bash + + git clone https://codebase.helmholtz.cloud//psyplot.git + +we recommend that you change into the directory and create a virtual +environment via:: + + cd psyplot + python -m venv venv + source venv/bin/activate # (or venv/Scripts/Activate.bat on windows) + +and install it in development mode with the ``[dev]`` option via:: + + pip install -e ./psyplot/[dev] + + +Helpers +------- + +Shortcuts with make +~~~~~~~~~~~~~~~~~~~ +There are several shortcuts available with the ``Makefile`` in the root of +the repository. On Linux, you can execute ``make help`` to get an overview. + +Annotating licenses +~~~~~~~~~~~~~~~~~~~ + +If you want to create new files, you need to set license and copyright +statements correctly. We use ``reuse`` to check that the licenses are +correctly encoded. As a helper script, you can use the script at +``.reuse/add_license.py`` that provides several shortcuts from +``.reuse/shortcuts.yaml``. Please select the correct shortcut, namely + +- If you create a new python file, you should run:: + + python .reuse/add_license.py code .py + +- If you created a new file for the docs, you should run:: + + python .reuse/add_license.py docs .py + +- If you created any other non-code file, you should run:: + + python .reuse/add_license.py supp .py + +If you have any questions on how licenses are handled, please do not hesitate +to contact the maintainers of `psyplot`. + + +Fixing the docs +--------------- +The documentation for this package is written in restructured Text and built +with sphinx_ and deployed on readthedocs_. + +If you found something in the docs that you want to fix, head over to the +``docs`` folder, install the necessary requirements via +``pip install -r requirements.txt ../[docs]`` and build the docs with +``make html`` (or ``make.bat`` on windows). + +The docs are then available in ``docs/_build/html/index.html`` that you can +open with your local browser. + +Implement your fixes in the corresponding ``.rst``-file and push them to your +fork on gitlab. + +Contributing to the code +------------------------ +We use automated formatters (see their config in ``pyproject.toml``), namely + +- `Black `__ for standardized + code formatting +- `blackdoc `__ for + standardized code formatting in documentation +- `Flake8 `__ for general code + quality +- `isort `__ for standardized order in + imports. +- `mypy `__ for static type checking on + `type hints `__ +- `reuse `__ for handling of licenses +- `cffconvert `__ + for validating the ``CITATION.cff`` file. + +We highly recommend that you setup +`pre-commit hooks `__ to automatically run all the +above tools every time you make a git commit. This can be done by running:: + + pre-commit install + +from the root of the repository. You can skip the pre-commit checks with +``git commit --no-verify`` but note that the CI will fail if it +encounters any formatting errors. + +You can also run the ``pre-commit`` step manually by invoking:: + + pre-commit run --all-files + + +.. _fork: https://codebase.helmholtz.cloud/psyplot/psyplot/-/forks/new + +.. _sphinx: https://www.sphinx-doc.org +.. _readthedocs: https://readthedocs.org + + +Updating the skeleton for this package +-------------------------------------- + +This package has been generated from the template +`https://codebase.helmholtz.cloud/hcdc/software-templates/python-package-template.git`__. + +See the template repository for instructions on how to update the skeleton for +this package. diff --git a/docs/demo.nc.license b/docs/demo.nc.license new file mode 100644 index 0000000..919c9c1 --- /dev/null +++ b/docs/demo.nc.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC0-1.0 diff --git a/docs/develop/framework.rst b/docs/develop/framework.rst index 5c16e95..c2bc01c 100644 --- a/docs/develop/framework.rst +++ b/docs/develop/framework.rst @@ -1,3 +1,7 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + .. _framework: The psyplot framework diff --git a/docs/develop/index.rst b/docs/develop/index.rst index 845f369..cb1b3da 100644 --- a/docs/develop/index.rst +++ b/docs/develop/index.rst @@ -1,3 +1,7 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + .. _developers-guide: Developers guide diff --git a/docs/develop/plugins_guide.rst b/docs/develop/plugins_guide.rst index c878ed9..c03938b 100644 --- a/docs/develop/plugins_guide.rst +++ b/docs/develop/plugins_guide.rst @@ -1,3 +1,7 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + .. _plugins_guide: How to implement your own plotters and plugins @@ -327,6 +331,11 @@ The advantages of this methodology are basically: Creating new plugins -------------------- + +.. todo:: + + The plugin generation needs to be revised + Now that you have created your plotter, you may want to include it in the plot methods of the :class:`~psyplot.project.Project` class such that you can do something like @@ -353,6 +362,7 @@ For our demonstration, let's create a plugin named my-plugin. This is simply done via .. ipython:: + :verbatim: In [1]: !psyplot-plugin my-plugin @@ -373,10 +383,6 @@ The following files are created in a directory named ``'my-plugin'``: If you want to see more, look into the comments in the created files. -.. ipython:: - - @suppress - In [4]: !rm -r my-plugin .. _psy-maps: https://psyplot.github.io/psy-maps/ .. _psy-simple: https://psyplot.github.io/psy-simple/ diff --git a/docs/develop/psyplot_framework.ai.license b/docs/develop/psyplot_framework.ai.license new file mode 100644 index 0000000..b21fae9 --- /dev/null +++ b/docs/develop/psyplot_framework.ai.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC-BY-4.0 diff --git a/docs/develop/psyplot_framework.gif.license b/docs/develop/psyplot_framework.gif.license new file mode 100644 index 0000000..d46171c --- /dev/null +++ b/docs/develop/psyplot_framework.gif.license @@ -0,0 +1,4 @@ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC-BY-4.0 +SPDX-License-Identifier: CC-BY-4.0 diff --git a/docs/develop/psyplot_framework.png.license b/docs/develop/psyplot_framework.png.license new file mode 100644 index 0000000..b21fae9 --- /dev/null +++ b/docs/develop/psyplot_framework.png.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC-BY-4.0 diff --git a/docs/environment.yml b/docs/environment.yml deleted file mode 100644 index e1c9d7b..0000000 --- a/docs/environment.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: psyplot_docs -channels: - - local - - psyplot/label/develop - - psyplot/label/master - - conda-forge -dependencies: - - python=3.8 - - dask - - netCDF4 - - seaborn - - sphinx_rtd_theme - - ipython - - sphinx - - psy-maps - - autodocsumm - - pip - - pip: - - cdo - - sphinx-argparse diff --git a/docs/getting_started.rst b/docs/getting_started.rst index e327586..d270966 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -1,3 +1,7 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + .. _getting-started: Getting started diff --git a/docs/index.rst b/docs/index.rst index 4fd85d4..08d318f 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,5 +1,8 @@ -.. psyplot documentation master file, created by - sphinx-quickstart on Mon Jul 20 18:01:33 2015. +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + +.. psyplot documentation master file You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. @@ -8,6 +11,15 @@ Interactive data visualization with python ========================================== +|CI| +|Code coverage| +|Latest Release| +|PyPI version| +|Code style: black| +|Imports: isort| +|PEP8| +|REUSE status| + .. image:: _static/psyplot.png :width: 50% :alt: psyplot logo @@ -32,88 +44,6 @@ graphical user interface (GUI) from the If you want more motivation: Have a look into the :ref:`about` section. -The package is very new and there are many features that will be included in -the future. So we are very pleased for feedback! Please simply raise an issue -on `GitHub `__. - - -.. start-badges - -.. only:: html and not epub - - .. list-table:: - :stub-columns: 1 - :widths: 10 90 - - * - docs - - |docs| |joss| |zenodo| - * - tests - - |circleci| |appveyor| |codecov| - * - package - - |version| |conda| |github| |zenodo| - * - implementations - - |supported-versions| |supported-implementations| - * - get in touch - - |mattermost| |mailing-list| |issues| - - .. |docs| image:: https://img.shields.io/github/deployments/psyplot/psyplot/github-pages - :alt: Documentation - :target: http://psyplot.github.io/psyplot/ - - .. |circleci| image:: https://circleci.com/gh/psyplot/psyplot/tree/master.svg?style=svg - :alt: CircleCI - :target: https://circleci.com/gh/psyplot/psyplot/tree/master - - .. |appveyor| image:: https://ci.appveyor.com/api/projects/status/4nt6qrw66iw65w33/branch/master?svg=true - :alt: AppVeyor - :target: https://ci.appveyor.com/project/psyplot/psyplot/branch/master - - .. |codecov| image:: https://codecov.io/gh/psyplot/psyplot/branch/master/graph/badge.svg - :alt: Coverage - :target: https://codecov.io/gh/psyplot/psyplot - - .. |version| image:: https://img.shields.io/pypi/v/psyplot.svg?style=flat - :alt: PyPI Package latest release - :target: https://pypi.python.org/pypi/psyplot - - .. |conda| image:: https://anaconda.org/conda-forge/psyplot/badges/version.svg - :alt: conda - :target: https://anaconda.org/conda-forge/psyplot - - .. |supported-versions| image:: https://img.shields.io/pypi/pyversions/psyplot.svg?style=flat - :alt: Supported versions - :target: https://pypi.python.org/pypi/psyplot - - .. |supported-implementations| image:: https://img.shields.io/pypi/implementation/psyplot.svg?style=flat - :alt: Supported implementations - :target: https://pypi.python.org/pypi/psyplot - - .. |joss| image:: http://joss.theoj.org/papers/3535c28017003f0b5fb63b1b64118b60/status.svg - :alt: Journal of Open Source Software - :target: http://joss.theoj.org/papers/3535c28017003f0b5fb63b1b64118b60 - - .. |zenodo| image:: https://zenodo.org/badge/87944102.svg - :alt: Zenodo - :target: https://zenodo.org/badge/latestdoi/87944102 - - .. |github| image:: https://img.shields.io/github/release/psyplot/psyplot.svg - :target: https://github.com/psyplot/psyplot/releases/latest - :alt: Latest github release - - .. |mattermost| image:: https://img.shields.io/badge/chat-on%20mattermost-success?logo=mattermost - :target: https://mattermost.hzdr.de/psyplot/ - :alt: Mattermost - - .. |mailing-list| image:: https://img.shields.io/badge/join-mailing%20list-brightgreen.svg?style=flat - :target: https://www.listserv.dfn.de/sympa/subscribe/psyplot - :alt: DFN mailing list - - .. |issues| image:: https://img.shields.io/github/issues-raw/psyplot/psyplot.svg?style=flat - :target: https://github.com/psyplot/psyplot/issues - :alt: GitHub issues - -.. end-badges - Documentation ------------- @@ -131,8 +61,8 @@ Documentation plugins command_line develop/index - contribute - api/psyplot + contributing + api todos changelog @@ -157,33 +87,45 @@ about good bug reports. .. _citation: -How to cite psyplot -------------------- +How to cite this software +------------------------- + +.. card:: Please do cite this software! -When using psyplot, you should at least cite the publication in -`the Journal of Open Source Software`_: + .. tab-set:: -.. only:: html and not epub + .. tab-item:: APA - .. image:: http://joss.theoj.org/papers/3535c28017003f0b5fb63b1b64118b60/status.svg - :alt: Journal of Open Source Software - :target: http://joss.theoj.org/papers/3535c28017003f0b5fb63b1b64118b60 + .. citation-info:: + :format: apalike -Sommer, P. S.: The psyplot interactive visualization framework, -*The Journal of Open Source Software*, 2, doi:10.21105/joss.00363, -https://doi.org/10.21105/joss.00363, 2017. + .. tab-item:: BibTex -:download:`BibTex ` - :download:`EndNote ` + .. citation-info:: + :format: bibtex + + .. tab-item:: RIS + + .. citation-info:: + :format: ris + + .. tab-item:: Endnote + + .. citation-info:: + :format: endnote + + .. tab-item:: CFF + + .. citation-info:: + :format: cff Furthermore, each release of psyplot and it's :ref:`subprojects ` is -associated with a DOI using zenodo.org_. If you want to cite a specific +associated with a DOI on zenodo_. If you want to cite a specific version or plugin, please refer to the `releases page of psyplot` or the releases page of the corresponding subproject. +.. _zenodo: https://zenodo.org -.. _the Journal of Open Source Software: http://joss.theoj.org/ -.. _zenodo.org: https://zenodo.org/ -.. _releases page of psyplot: https://github.com/psyplot/psyplot/releases/ Acknowledgment -------------- @@ -220,3 +162,22 @@ Indices and tables * :ref:`genindex` * :ref:`modindex` * :ref:`search` + + +.. |CI| image:: https://codebase.helmholtz.cloud/psyplot/psyplot/badges/master/pipeline.svg + :target: https://codebase.helmholtz.cloud/psyplot/psyplot/-/pipelines?page=1&scope=all&ref=master +.. |Code coverage| image:: https://codebase.helmholtz.cloud/psyplot/psyplot/badges/master/coverage.svg + :target: https://codebase.helmholtz.cloud/psyplot/psyplot/-/graphs/develop/charts +.. |Latest Release| image:: https://codebase.helmholtz.cloud/psyplot/psyplot/-/badges/release.svg + :target: https://codebase.helmholtz.cloud/psyplot/psyplot +.. |PyPI version| image:: https://img.shields.io/pypi/v/psyplot.svg + :target: https://pypi.python.org/pypi/psyplot/ +.. |Code style: black| image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black +.. |Imports: isort| image:: https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336 + :target: https://pycqa.github.io/isort/ +.. |PEP8| image:: https://img.shields.io/badge/code%20style-pep8-orange.svg + :target: https://www.python.org/dev/peps/pep-0008/ +.. TODO: uncomment the following line when the package is registered at https://api.reuse.software +.. .. |REUSE status| image:: https://api.reuse.software/badge/codebase.helmholtz.cloud/psyplot/psyplot +.. :target: https://api.reuse.software/info/codebase.helmholtz.cloud/psyplot/psyplot diff --git a/docs/installing.rst b/docs/installing.rst index f5d2ae9..9c80a55 100644 --- a/docs/installing.rst +++ b/docs/installing.rst @@ -1,3 +1,7 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + .. _install: .. highlight:: bash diff --git a/docs/linux.png b/docs/linux.png deleted file mode 100644 index d5c2418..0000000 Binary files a/docs/linux.png and /dev/null differ diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 0000000..2e7d9b0 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,39 @@ +REM SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +REM +REM SPDX-License-Identifier: CC0-1.0 + +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/plugins.rst b/docs/plugins.rst index 1760b23..8b49db3 100644 --- a/docs/plugins.rst +++ b/docs/plugins.rst @@ -1,3 +1,7 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + .. _plugins: Psyplot plugins diff --git a/docs/projects.rst b/docs/projects.rst index afaa509..3ee7f93 100644 --- a/docs/projects.rst +++ b/docs/projects.rst @@ -1,3 +1,7 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + .. _projects: Subprojects @@ -8,54 +12,43 @@ splitted into several subprojects. Each of them is accessible via ``https://psyplot.github.io/`` - the :ref:`psyplot_gui ` package: The GUI to psyplot - |psyplot-gui-github| |psyplot-gui-release| + :bdg-link-primary:`Source ` + |psyplot-gui-release| - the :ref:`psy_view ` package: An ncview-like interface for - psyplot |psy-view-github| |psy-view-release| + psyplot + :bdg-link-primary:`Source ` + |psy-view-release| - the :ref:`psy-simple ` package: A plugin for simple - visualization |psy-simple-github| |psy-simple-release| + visualization + :bdg-link-primary:`Source ` + |psy-simple-release| - the :ref:`psy-maps ` package: A psyplot plugin for - visualizing data on a map |psy-maps-github| |psy-maps-release| + visualizing data on a map + :bdg-link-primary:`Source ` + |psy-maps-release| - the :ref:`psy-reg ` package: A psyplot plugin for visualizing - and calculating regression fits |psy-reg-github| |psy-reg-release| + and calculating regression fits + :bdg-link-primary:`Source ` + |psy-reg-release| See :ref:`plugins` for more informations on the plugins. -.. |psyplot-gui-github| image:: GitHub-Mark-16px.png - :target: https://github.com/psyplot/psyplot-gui - :alt: github - -.. |psyplot-gui-release| image:: https://img.shields.io/github/v/release/psyplot/psyplot-gui.svg?style=flat-square - :target: https://github.com/psyplot/psyplot-gui/releases/latest +.. |psyplot-gui-release| image:: https://codebase.helmholtz.cloud/psyplot/psyplot-gui/-/badges/release.svg + :target: https://codebase.helmholtz.cloud/psyplot/psyplot-gui/-/releases :alt: Latest release -.. |psy-view-github| image:: GitHub-Mark-16px.png - :target: https://github.com/psyplot/psy-view - :alt: github - -.. |psy-view-release| image:: https://img.shields.io/github/v/release/psyplot/psy-view.svg?style=flat-square - :target: https://github.com/psyplot/psy-view/releases/latest +.. |psy-view-release| image:: https://codebase.helmholtz.cloud/psyplot/psy-view/-/badges/release.svg + :target: https://codebase.helmholtz.cloud/psyplot/psy-view/-/releases :alt: Latest release -.. |psy-simple-github| image:: GitHub-Mark-16px.png - :target: https://github.com/psyplot/psy-simple - :alt: github - -.. |psy-simple-release| image:: https://img.shields.io/github/v/release/psyplot/psy-simple.svg?style=flat-square - :target: https://github.com/psyplot/psy-simple/releases/latest +.. |psy-simple-release| image:: https://codebase.helmholtz.cloud/psyplot/psy-simple/-/badges/release.svg + :target: https://codebase.helmholtz.cloud/psyplot/psy-simple/-/releases :alt: Latest release -.. |psy-maps-github| image:: GitHub-Mark-16px.png - :target: https://github.com/psyplot/psy-maps - :alt: github - -.. |psy-maps-release| image:: https://img.shields.io/github/v/release/psyplot/psy-maps.svg?style=flat-square - :target: https://github.com/psyplot/psy-maps/releases/latest +.. |psy-maps-release| image:: https://codebase.helmholtz.cloud/psyplot/psy-maps/-/badges/release.svg + :target: https://codebase.helmholtz.cloud/psyplot/psy-maps/-/releases :alt: Latest release -.. |psy-reg-github| image:: GitHub-Mark-16px.png - :target: https://github.com/psyplot/psy-reg - :alt: github - -.. |psy-reg-release| image:: https://img.shields.io/github/v/release/psyplot/psy-reg.svg?style=flat-square - :target: https://github.com/psyplot/psy-reg/releases/latest +.. |psy-reg-release| image:: https://codebase.helmholtz.cloud/psyplot/psy-reg/-/badges/release.svg + :target: https://codebase.helmholtz.cloud/psyplot/psy-reg/-/releases :alt: Latest release diff --git a/docs/psyplot_entry.bib b/docs/psyplot_entry.bib deleted file mode 100755 index 7de3bab..0000000 --- a/docs/psyplot_entry.bib +++ /dev/null @@ -1,13 +0,0 @@ - -@Article{Sommer2017, - author = {Philipp S Sommer}, - title = {The psyplot interactive visualization framework}, - journal = {The Journal of Open Source Software}, - year = {2017}, - volume = {2}, - number = {16}, - month = {aug}, - doi = {10.21105/joss.00363}, - publisher = {The Open Journal}, - url = {https://doi.org/10.21105/joss.00363}, -} diff --git a/docs/psyplot_entry.enw b/docs/psyplot_entry.enw deleted file mode 100644 index 38718c7..0000000 --- a/docs/psyplot_entry.enw +++ /dev/null @@ -1,14 +0,0 @@ -%0 Journal Article -%A Sommer, Philipp S. -%D 2017 -%T The psyplot interactive visualization framework -%B The Journal of Open Source Software -%V 2 -%N 16 -%8 aug -%! The psyplot interactive visualization framework -%F Sommer2017 -%U https://doi.org/10.21105/joss.00363 - - - diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000..b6c239f --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,7 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + +git+https://codebase.helmholtz.cloud/hcdc/hereon-netcdf/sphinxext.git +git+https://codebase.helmholtz.cloud/psyplot/psy-simple.git@fix-ci +git+https://codebase.helmholtz.cloud/psyplot/psy-maps.git@fix-ci diff --git a/docs/todos.rst b/docs/todos.rst index 5b6c5dc..f37b1ff 100644 --- a/docs/todos.rst +++ b/docs/todos.rst @@ -1,3 +1,7 @@ +.. SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +.. +.. SPDX-License-Identifier: CC-BY-4.0 + ToDos ===== diff --git a/docs/windows.png b/docs/windows.png deleted file mode 100644 index 51ca682..0000000 Binary files a/docs/windows.png and /dev/null differ diff --git a/icon/CreateICNS.sh b/icon/CreateICNS.sh index b355a4b..cac708a 100644 --- a/icon/CreateICNS.sh +++ b/icon/CreateICNS.sh @@ -1,27 +1,11 @@ # Create the iconset file for the psyplot icon. -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: CC-BY-4.0 mkdir main.iconset sips -z 16 16 icon1024.png --out main.iconset/icon_16x16.png diff --git a/icon/CreateICO.sh b/icon/CreateICO.sh index 18b7819..06f45c2 100644 --- a/icon/CreateICO.sh +++ b/icon/CreateICO.sh @@ -1,26 +1,10 @@ # Create the psyplot.ico file for the psyplot icon -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: CC-BY-4.0 convert icon1024.png -define icon:auto-resize=64,48,32,16 psyplot.ico diff --git a/icon/icon.py b/icon/icon.py index a484622..99fdc8c 100644 --- a/icon/icon.py +++ b/icon/icon.py @@ -3,53 +3,43 @@ This script creates the psyplot icon with a dpi of 128 and a width and height of 8 inches. The file is saved it to ``'icon1024.pkl'``""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only -import matplotlib.pyplot as plt import cartopy.crs as ccrs import cartopy.feature as cf +import matplotlib.pyplot as plt from matplotlib.text import FontProperties # The path to the font -fontpath = '/Library/Fonts/FreeSansBoldOblique.ttf' +fontpath = "/Library/Fonts/FreeSansBoldOblique.ttf" fig = plt.figure(figsize=(8, 8), dpi=128) -ax = fig.add_axes([0.0, 0.0, 1.0, 1.0], projection=ccrs.Orthographic( - central_latitude=5)) +ax = fig.add_axes( + [0.0, 0.0, 1.0, 1.0], projection=ccrs.Orthographic(central_latitude=5) +) -land = ax.add_feature(cf.LAND, facecolor='0.975') -ocean = ax.add_feature(cf.OCEAN, facecolor=plt.get_cmap('Blues')(0.5)) +land = ax.add_feature(cf.LAND, facecolor="0.975") +ocean = ax.add_feature(cf.OCEAN, facecolor=plt.get_cmap("Blues")(0.5)) text = ax.text( - 0.47, 0.5, 'Psy', + 0.47, + 0.5, + "Psy", transform=fig.transFigure, - name='FreeSans', + name="FreeSans", fontproperties=FontProperties(fname=fontpath), - size=256, ha='center', va='center', - weight=400) + size=256, + ha="center", + va="center", + weight=400, +) -ax.outline_patch.set_edgecolor('none') +ax.outline_patch.set_edgecolor("none") -plt.savefig('icon1024.png', transparent=True) +plt.savefig("icon1024.png", transparent=True) diff --git a/icon/icon1024.png.license b/icon/icon1024.png.license new file mode 100644 index 0000000..b21fae9 --- /dev/null +++ b/icon/icon1024.png.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC-BY-4.0 diff --git a/paper.bib b/paper.bib index adfa0e4..0c7a5d1 100644 --- a/paper.bib +++ b/paper.bib @@ -1,3 +1,9 @@ +@Comment{ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC-BY-4.0 +} + @article{Hunter2007, author={J. D. Hunter}, journal={Computing in Science Engineering}, diff --git a/paper.md b/paper.md index bb35d11..88375c2 100644 --- a/paper.md +++ b/paper.md @@ -1,3 +1,9 @@ + + --- title: 'The psyplot interactive visualization framework' tags: diff --git a/postBuild b/postBuild deleted file mode 100644 index 79f0e84..0000000 --- a/postBuild +++ /dev/null @@ -1,2 +0,0 @@ -conda uninstall --force psyplot -python -m pip install . --no-deps --ignore-installed -vvv diff --git a/psyplot/__init__.py b/psyplot/__init__.py index c129bd5..8c5d556 100755 --- a/psyplot/__init__.py +++ b/psyplot/__init__.py @@ -1,47 +1,37 @@ """psyplot visualization framework.""" -# Disclaimer -# ---------- +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. -# -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only -import sys import datetime as dt import logging as _logging -from psyplot.warning import warn, critical, disable_warnings -from psyplot.config.rcsetup import rcParams +import sys + import psyplot.config as config -from psyplot.data import ( - ArrayList, InteractiveArray, InteractiveList, open_dataset, open_mfdataset) +from psyplot.config.rcsetup import rcParams +from psyplot.data import ( # noqa: F401 + ArrayList, + InteractiveArray, + InteractiveList, + open_dataset, + open_mfdataset, +) +from psyplot.warning import critical, disable_warnings, warn # noqa: F401 from ._version import get_versions -__version__ = get_versions()['version'] + +__version__ = get_versions()["version"] del get_versions __author__ = "Philipp S. Sommer" __copyright__ = """ -Copyright (C) 2021 Helmholtz-Zentrum Hereon -Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -Copyright (C) 2016-2021 University of Lausanne +2016-2024 University of Lausanne +2020-2021 Helmholtz-Zentrum Geesthacht +2021-2024 Helmholtz-Zentrum hereon GmbH """ __credits__ = ["Philipp S. Sommer"] __license__ = "LGPL-3.0-only" @@ -55,7 +45,9 @@ logger = _logging.getLogger(__name__) logger.debug( "%s: Initializing psyplot, version %s", - dt.datetime.now().isoformat(), __version__) + dt.datetime.now().isoformat(), + __version__, +) logger.debug("Logging configuration file: %s", config.logcfg_path) logger.debug("Configuration file: %s", config.config_path) @@ -101,35 +93,33 @@ def get_versions(requirements=True, key=None): .. code-block:: python import json + print(json.dumps(psyplot.get_versions(), indent=4)) { - "psy_simple.plugin": { - "version": "1.0.0.dev0" - }, + "psy_simple.plugin": {"version": "1.0.0.dev0"}, "psyplot": { "version": "1.0.0.dev0", "requirements": { "matplotlib": "1.5.3", "numpy": "1.11.3", "pandas": "0.19.2", - "xarray": "0.9.1" - } + "xarray": "0.9.1", + }, }, "psy_maps.plugin": { "version": "1.0.0.dev0", - "requirements": { - "cartopy": "0.15.0" - } - } + "requirements": {"cartopy": "0.15.0"}, + }, } """ from psyplot.utils import plugin_entrypoints + eps = plugin_entrypoints("psyplot", "plugin") - ret = {'psyplot': _get_versions(requirements)} + ret = {"psyplot": _get_versions(requirements)} for ep in eps: if str(ep) in rcParams._plugins: - logger.debug('Loading entrypoint %s', ep) + logger.debug("Loading entrypoint %s", ep) try: ep.module @@ -140,39 +130,46 @@ def get_versions(requirements=True, key=None): continue try: mod = ep.load() - except (ImportError, ModuleNotFoundError) as e: - logger.debug("Could not import %s" % (ep, ), exc_info=True) - logger.warning("Could not import %s" % (ep, ), exc_info=True) + except (ImportError, ModuleNotFoundError): + logger.debug("Could not import %s" % (ep,), exc_info=True) + logger.warning("Could not import %s" % (ep,), exc_info=True) else: try: ret[str(ep.module)] = mod.get_versions(requirements) except AttributeError: ret[str(ep.module)] = { - 'version': getattr( - mod, 'plugin_version', - getattr(mod, '__version__', '')) - } + "version": getattr( + mod, + "plugin_version", + getattr(mod, "__version__", ""), + ) + } if key is None: try: import psyplot_gui except ImportError: pass else: - ret['psyplot_gui'] = psyplot_gui.get_versions(requirements) + ret["psyplot_gui"] = psyplot_gui.get_versions(requirements) return ret def _get_versions(requirements=True): if requirements: import matplotlib as mpl - import xarray as xr - import pandas as pd import numpy as np - return {'version': __version__, - 'requirements': {'matplotlib': mpl.__version__, - 'xarray': xr.__version__, - 'pandas': pd.__version__, - 'numpy': np.__version__, - 'python': ' '.join(sys.version.splitlines())}} + import pandas as pd + import xarray as xr + + return { + "version": __version__, + "requirements": { + "matplotlib": mpl.__version__, + "xarray": xr.__version__, + "pandas": pd.__version__, + "numpy": np.__version__, + "python": " ".join(sys.version.splitlines()), + }, + } else: - return {'version': __version__} + return {"version": __version__} diff --git a/psyplot/__main__.py b/psyplot/__main__.py index 796e754..425ff8a 100644 --- a/psyplot/__main__.py +++ b/psyplot/__main__.py @@ -1,46 +1,31 @@ # -*- coding: utf-8 -*- """Main commandline entrypoint for psyplot.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only -import os -import os.path as osp -import sys import argparse -import pickle -import six import glob -from itertools import chain +import logging +import os.path as osp +import pickle +import sys from collections import defaultdict -import yaml +from itertools import chain + +import six import xarray as xr +import yaml +from funcargparse import FuncArgParser + import psyplot from psyplot.docstring import docstrings +from psyplot.utils import get_default_value from psyplot.warning import warn -from psyplot.compat.pycompat import get_default_value -from funcargparse import FuncArgParser -import logging rcParams = psyplot.rcParams @@ -61,10 +46,10 @@ def main(args=None): The parser that has been used from the command line""" try: from psyplot_gui import get_parser as _get_parser - except (ImportError, ModuleNotFoundError) as e: - logger.debug('Failed to import gui', exc_info=True) + except (ImportError, ModuleNotFoundError): + logger.debug("Failed to import gui", exc_info=True) parser = get_parser(create=False) - parser.update_arg('output', required=True) + parser.update_arg("output", required=True) parser.create_arguments() parser.parse2func(args) else: @@ -73,14 +58,27 @@ def main(args=None): parser.parse_known2func(args) -@docstrings.get_sections(base='make_plot') +@docstrings.get_sections(base="make_plot") @docstrings.dedent -def make_plot(fnames=[], name=[], dims=None, plot_method=None, - output=None, project=None, engine=None, formatoptions=None, - tight=False, rc_file=None, encoding=None, enable_post=False, - seaborn_style=None, output_project=None, - concat_dim=get_default_value(xr.open_mfdataset, 'concat_dim'), - chname={}, preset=None): +def make_plot( + fnames=[], + name=[], + dims=None, + plot_method=None, + output=None, + project=None, + engine=None, + formatoptions=None, + tight=False, + rc_file=None, + encoding=None, + enable_post=False, + seaborn_style=None, + output_project=None, + concat_dim=get_default_value(xr.open_mfdataset, "concat_dim"), + chname={}, + preset=None, +): """ Eventually start the QApplication or only make a plot @@ -141,8 +139,10 @@ def make_plot(fnames=[], name=[], dims=None, plot_method=None, :func:`~psyplot.config.rcsetup.get_configdir`). """ if project is not None and (name != [] or dims is not None): - warn('The `name` and `dims` parameter are ignored if the `project`' - ' parameter is set!') + warn( + "The `name` and `dims` parameter are ignored if the `project`" + " parameter is set!" + ) if rc_file is not None: rcParams.load_from_file(rc_file) @@ -154,25 +154,35 @@ def make_plot(fnames=[], name=[], dims=None, plot_method=None, if not fnames and not project: raise ValueError( "Either a filename or a project file must be provided if " - "the output parameter is set!") + "the output parameter is set!" + ) elif project is None and plot_method is None: raise ValueError( "A plotting method must be provided if the output parameter " - "is set and not the project!") + "is set and not the project!" + ) if seaborn_style is not None: import seaborn as sns + sns.set_style(seaborn_style) import psyplot.project as psy + if project is not None: - fnames = [s.split(',') for s in fnames] + fnames = [s.split(",") for s in fnames] chname = dict(chname) - single_files = (l[0] for l in fnames if len(l) == 1) + single_files = (fn_list[0] for fn_list in fnames if len(fn_list) == 1) alternative_paths = defaultdict(lambda: next(single_files, None)) - alternative_paths.update([l for l in fnames if len(l) == 2]) + alternative_paths.update( + (fn_list for fn_list in fnames if len(fn_list) == 2) + ) p = psy.Project.load_project( - project, alternative_paths=alternative_paths, - engine=engine, encoding=encoding, enable_post=enable_post, - chname=chname) + project, + alternative_paths=alternative_paths, + engine=engine, + encoding=encoding, + enable_post=enable_post, + chname=chname, + ) if preset: p.load_preset(preset) if formatoptions is not None: @@ -182,11 +192,17 @@ def make_plot(fnames=[], name=[], dims=None, plot_method=None, pm = getattr(psy.plot, plot_method, None) if pm is None: raise ValueError("Unknown plot method %s!" % plot_method) - kwargs = {'name': name} if name else {} + kwargs = {"name": name} if name else {} p = pm( - fnames, dims=dims or {}, engine=engine, preset=preset, - fmt=formatoptions or {}, mf_mode=True, concat_dim=concat_dim, - **kwargs) + fnames, + dims=dims or {}, + engine=engine, + preset=preset, + fmt=formatoptions or {}, + mf_mode=True, + concat_dim=concat_dim, + **kwargs, + ) p.export(output, tight=tight) if output_project is not None: p.save_project(output_project) @@ -202,7 +218,9 @@ def get_parser(create=True): psyplot.parser.FuncArgParser The :class:`argparse.ArgumentParser` instance""" #: The parse that is used to parse arguments from the command line - epilog = docstrings.get_sections(docstrings.dedent(""" + epilog = docstrings.get_sections( + docstrings.dedent( + """ Examples -------- @@ -237,95 +255,157 @@ def get_parser(create=True): $ echo 'title: my title' > fmt.yaml $ psyplot myfile.nc -n t2m -pm mapplot -fmt fmt.yaml -o test.pdf - """), 'parser', ['Examples']) + """ + ), + "parser", + ["Examples"], + ) - epilog = '.. rubric:: Examples\n' + '\n'.join(epilog.splitlines()[2:]) + epilog = ".. rubric:: Examples\n" + "\n".join(epilog.splitlines()[2:]) parser = FuncArgParser( description=""" Load a dataset, make the plot and save the result to a file""", epilog=epilog, - formatter_class=argparse.RawDescriptionHelpFormatter) + formatter_class=argparse.RawDescriptionHelpFormatter, + ) info_grp = parser.add_argument_group( - 'Info options', - 'Options that print informations and quit afterwards') + "Info options", "Options that print informations and quit afterwards" + ) - parser.update_arg('version', short='V', long='version', action='version', - version=psyplot.__version__, if_existent=False, - group=info_grp) + parser.update_arg( + "version", + short="V", + long="version", + action="version", + version=psyplot.__version__, + if_existent=False, + group=info_grp, + ) - parser.update_arg('all_versions', short='aV', long='all-versions', - action=AllVersionsAction, if_existent=False, - group=info_grp) + parser.update_arg( + "all_versions", + short="aV", + long="all-versions", + action=AllVersionsAction, + if_existent=False, + group=info_grp, + ) - parser.update_arg('list_plugins', short='lp', long='list-plugins', - action=ListPluginsAction, if_existent=False, - group=info_grp) parser.update_arg( - 'list_plot_methods', short='lpm', long='list-plot-methods', - action=ListPlotMethodsAction, if_existent=False, group=info_grp) + "list_plugins", + short="lp", + long="list-plugins", + action=ListPluginsAction, + if_existent=False, + group=info_grp, + ) + parser.update_arg( + "list_plot_methods", + short="lpm", + long="list-plot-methods", + action=ListPlotMethodsAction, + if_existent=False, + group=info_grp, + ) parser.update_arg( - 'list_datasets', short='lds', long='list-datasets', - action=ListDsNamesAction, if_existent=False, group=info_grp, - help="""List the used dataset names in the given `project`.""") + "list_datasets", + short="lds", + long="list-datasets", + action=ListDsNamesAction, + if_existent=False, + group=info_grp, + help="""List the used dataset names in the given `project`.""", + ) parser.update_arg( - 'list_presets', short='lps', long='list-presets', - action=ListPresetsAction, if_existent=False, group=info_grp) + "list_presets", + short="lps", + long="list-presets", + action=ListPresetsAction, + if_existent=False, + group=info_grp, + ) parser.setup_args(make_plot) output_grp = parser.add_argument_group( - 'Output options', - 'Options that only have an effect if the `-o` option is set.') + "Output options", + "Options that only have an effect if the `-o` option is set.", + ) - parser.update_arg('fnames', positional=True, nargs='*') + parser.update_arg("fnames", positional=True, nargs="*") - parser.update_arg('name', short='n', nargs='*', metavar='variable_name', - const=None) - - parser.update_arg('dims', short='d', nargs='+', type=_load_dims, - metavar='dim,val1[,val2[,...]]') + parser.update_arg( + "name", short="n", nargs="*", metavar="variable_name", const=None + ) - pm_choices = {pm for pm, d in filter( - lambda t: t[1].get('plot_func', True), - six.iteritems(rcParams['project.plotters']))} + parser.update_arg( + "dims", + short="d", + nargs="+", + type=_load_dims, + metavar="dim,val1[,val2[,...]]", + ) + + pm_choices = { + pm + for pm, d in filter( + lambda t: t[1].get("plot_func", True), + six.iteritems(rcParams["project.plotters"]), + ) + } if psyplot._project_imported: import psyplot.project as psy + pm_choices.update(set(psy.plot._plot_methods)) - parser.update_arg('plot_method', short='pm', choices=pm_choices, - metavar='{%s}' % ', '.join(map(repr, pm_choices))) + parser.update_arg( + "plot_method", + short="pm", + choices=pm_choices, + metavar="{%s}" % ", ".join(map(repr, pm_choices)), + ) - parser.update_arg('output', short='o', group=output_grp) - parser.update_arg('output_project', short='op', group=output_grp) + parser.update_arg("output", short="o", group=output_grp) + parser.update_arg("output_project", short="op", group=output_grp) - parser.update_arg('project', short='p') + parser.update_arg("project", short="p") parser.update_arg( - 'formatoptions', short='fmt', type=_load_dict, help=""" + "formatoptions", + short="fmt", + type=_load_dict, + help=""" The path to a yaml (``'.yml'`` or ``'.yaml'``) or pickle file defining a dictionary of formatoption that is applied to the data - visualized by the chosen `plot_method`""", metavar='FILENAME') + visualized by the chosen `plot_method`""", + metavar="FILENAME", + ) parser.update_arg( - 'chname', type=lambda s: s.split(','), nargs='*', help=""" + "chname", + type=lambda s: s.split(","), + nargs="*", + help=""" A mapping from variable names in the project to variable names in the datasets that should be used instead. Variable names should be - separated by a comma.""", metavar='project-variable,variable-to-use') + separated by a comma.""", + metavar="project-variable,variable-to-use", + ) - parser.update_arg('tight', short='t', group=output_grp) + parser.update_arg("tight", short="t", group=output_grp) - parser.update_arg('rc_file', short='rc') - parser.pop_key('rc_file', 'metavar') + parser.update_arg("rc_file", short="rc") + parser.pop_key("rc_file", "metavar") - parser.update_arg('encoding', short='e') + parser.update_arg("encoding", short="e") - parser.pop_key('enable_post', 'short') + parser.pop_key("enable_post", "short") - parser.update_arg('seaborn_style', short='sns') + parser.update_arg("seaborn_style", short="sns") - parser.update_arg('concat_dim', short='cd') + parser.update_arg("concat_dim", short="cd") if create: parser.create_arguments() @@ -335,30 +415,36 @@ def get_parser(create=True): def _load_dict(fname): with open(fname) as f: - if fname.endswith('.yml') or fname.endswith('.yaml'): + if fname.endswith(".yml") or fname.endswith(".yaml"): return yaml.load(f, Loader=yaml.SafeLoader) return pickle.load(f) def _load_dims(s): - s = s.split(',') + s = s.split(",") if len(s) > 1: return {s[0]: list(map(int, s[1:]))} return {} class AllVersionsAction(argparse.Action): - - def __init__(self, option_strings, dest=argparse.SUPPRESS, nargs=None, - default=argparse.SUPPRESS, **kwargs): + def __init__( + self, + option_strings, + dest=argparse.SUPPRESS, + nargs=None, + default=argparse.SUPPRESS, + **kwargs, + ): if nargs is not None: raise ValueError("nargs not allowed") - kwargs['help'] = ("Print the versions of all plugins and requirements " - "and exit") - kwargs['default'] = default + kwargs["help"] = ( + "Print the versions of all plugins and requirements " "and exit" + ) + kwargs["default"] = default super(AllVersionsAction, self).__init__( - option_strings, nargs=0, dest=dest, - **kwargs) + option_strings, nargs=0, dest=dest, **kwargs + ) def __call__(self, parser, namespace, values, option_string=None): print(yaml.dump(psyplot.get_versions(), default_flow_style=False)) @@ -366,36 +452,51 @@ def __call__(self, parser, namespace, values, option_string=None): class ListPresetsAction(argparse.Action): - - def __init__(self, option_strings, dest=argparse.SUPPRESS, nargs=None, - default=argparse.SUPPRESS, **kwargs): + def __init__( + self, + option_strings, + dest=argparse.SUPPRESS, + nargs=None, + default=argparse.SUPPRESS, + **kwargs, + ): if nargs is not None: raise ValueError("nargs not allowed") - kwargs['help'] = ("Print available presets and exit") - kwargs['default'] = default + kwargs["help"] = "Print available presets and exit" + kwargs["default"] = default super().__init__(option_strings, nargs=0, dest=dest, **kwargs) def __call__(self, parser, namespace, values, option_string=None): from psyplot.config.rcsetup import get_configdir - presets_dir = osp.join(get_configdir(), 'presets') + + presets_dir = osp.join(get_configdir(), "presets") if not osp.exists(presets_dir): sys.exit(0) else: - presets = {osp.splitext(osp.basename(fname))[0]: fname - for fname in glob.glob(osp.join(presets_dir, '*.yml'))} - print('\n'.join(map(': '.join, presets.items()))) + presets = { + osp.splitext(osp.basename(fname))[0]: fname + for fname in glob.glob(osp.join(presets_dir, "*.yml")) + } + print("\n".join(map(": ".join, presets.items()))) sys.exit(0) -class ListPluginsAction(argparse.Action): - def __init__(self, option_strings, dest=argparse.SUPPRESS, nargs=None, - default=argparse.SUPPRESS, **kwargs): +class ListPluginsAction(argparse.Action): + def __init__( + self, + option_strings, + dest=argparse.SUPPRESS, + nargs=None, + default=argparse.SUPPRESS, + **kwargs, + ): if nargs is not None: raise ValueError("nargs not allowed") - kwargs['help'] = ("Print the names of the plugins and exit") - kwargs['default'] = default + kwargs["help"] = "Print the names of the plugins and exit" + kwargs["default"] = default super(ListPluginsAction, self).__init__( - option_strings, nargs=0, dest=dest, **kwargs) + option_strings, nargs=0, dest=dest, **kwargs + ) def __call__(self, parser, namespace, values, option_string=None): print(yaml.dump(psyplot.rcParams._plugins, default_flow_style=False)) @@ -403,25 +504,35 @@ def __call__(self, parser, namespace, values, option_string=None): class ListPlotMethodsAction(argparse.Action): - - def __init__(self, option_strings, dest=argparse.SUPPRESS, nargs=None, - default=argparse.SUPPRESS, **kwargs): + def __init__( + self, + option_strings, + dest=argparse.SUPPRESS, + nargs=None, + default=argparse.SUPPRESS, + **kwargs, + ): if nargs is not None: raise ValueError("nargs not allowed") - kwargs['help'] = "List the available plot methods and what they do" - kwargs['default'] = default + kwargs["help"] = "List the available plot methods and what they do" + kwargs["default"] = default super(ListPlotMethodsAction, self).__init__( - option_strings, nargs=0, dest=dest, **kwargs) + option_strings, nargs=0, dest=dest, **kwargs + ) def __call__(self, parser, namespace, values, option_string=None): pm_choices = {} - for pm, d in filter(lambda t: t[1].get('plot_func', True), - six.iteritems(rcParams['project.plotters'])): - pm_choices[pm] = d.get('summary') or ( - 'Open and plot data via :class:`%s.%s` plotters' % ( - d['module'], d['plotter_name'])) + for pm, d in filter( + lambda t: t[1].get("plot_func", True), + six.iteritems(rcParams["project.plotters"]), + ): + pm_choices[pm] = d.get("summary") or ( + "Open and plot data via :class:`%s.%s` plotters" + % (d["module"], d["plotter_name"]) + ) if psyplot._project_imported: import psyplot.project as psy + pm_choices.update(psy.plot._plot_methods) print(yaml.dump(pm_choices, default_flow_style=False)) sys.exit(0) @@ -430,29 +541,41 @@ def __call__(self, parser, namespace, values, option_string=None): class ListDsNamesAction(argparse.Action): """An action to list the used file names in a project""" - def __init__(self, option_strings, dest=argparse.SUPPRESS, nargs=None, - default=argparse.SUPPRESS, **kwargs): + def __init__( + self, + option_strings, + dest=argparse.SUPPRESS, + nargs=None, + default=argparse.SUPPRESS, + **kwargs, + ): if nargs is not None: raise ValueError("nargs not allowed") - kwargs['default'] = default + kwargs["default"] = default super(ListDsNamesAction, self).__init__( - option_strings, nargs=0, dest=dest, **kwargs) + option_strings, nargs=0, dest=dest, **kwargs + ) def __call__(self, parser, namespace, values, option_string=None): if namespace.project is None: - print('A project is required before this argument! Call syntax:\n' - '%s -p .pkl %s' % (parser.prog, option_string)) + print( + "A project is required before this argument! Call syntax:\n" + "%s -p .pkl %s" % (parser.prog, option_string) + ) sys.exit(1) - import psyplot.data as psyd import pickle - with open(namespace.project, 'rb') as f: - d = pickle.load(f)['arrays'] - names = list(filter(None, ( - t[0] for t in psyd.ArrayList._get_dsnames(d)))) + + import psyplot.data as psyd + + with open(namespace.project, "rb") as f: + d = pickle.load(f)["arrays"] + names = list( + filter(None, (t[0] for t in psyd.ArrayList._get_dsnames(d))) + ) if names: print(yaml.dump(names, default_flow_style=False)) sys.exit(0) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/psyplot/_version.py b/psyplot/_version.py index a0debd9..cc7e4a0 100644 --- a/psyplot/_version.py +++ b/psyplot/_version.py @@ -1,35 +1,15 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: LGPL-3.0-only # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. -# -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. -# -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . -# -# This file is originally released into the public domain. Generated by -# versioneer-0.18 (https://github.com/warner/python-versioneer) + +# This file is released into the public domain. Generated by +# versioneer-0.21 (https://github.com/python-versioneer/python-versioneer) """Git implementation of _version.py.""" @@ -38,6 +18,7 @@ import re import subprocess import sys +from typing import Callable, Dict def get_keywords(): @@ -75,36 +56,42 @@ class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" -LONG_VERSION_PY = {} -HANDLERS = {} +LONG_VERSION_PY: Dict[str, str] = {} +HANDLERS: Dict[str, Dict[str, Callable]] = {} def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" + """Create decorator to mark a method as the handler of a VCS.""" + def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f + return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): +def run_command( + commands, args, cwd=None, verbose=False, hide_stderr=False, env=None +): """Call the given command(s).""" assert isinstance(commands, list) - p = None - for c in commands: + process = None + for command in commands: try: - dispcmd = str([c] + args) + dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + process = subprocess.Popen( + [command] + args, + cwd=cwd, + env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr else None), + ) break - except EnvironmentError: + except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue @@ -116,15 +103,13 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, if verbose: print("unable to find command, tried %s" % (commands,)) return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) - return None, p.returncode - return stdout, p.returncode + return None, process.returncode + return stdout, process.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): @@ -136,19 +121,24 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): """ rootdirs = [] - for i in range(3): + for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level + return { + "version": dirname[len(parentdir_prefix) :], + "full-revisionid": None, + "dirty": False, + "error": None, + "date": None, + } + rootdirs.append(root) + root = os.path.dirname(root) # up a level if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) + print( + "Tried directories %s but none started with prefix %s" + % (str(rootdirs), parentdir_prefix) + ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @@ -161,22 +151,21 @@ def git_get_keywords(versionfile_abs): # _version.py. keywords = {} try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: + with open(versionfile_abs, "r") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: pass return keywords @@ -184,10 +173,14 @@ def git_get_keywords(versionfile_abs): @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because @@ -200,11 +193,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) + refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d @@ -213,7 +206,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = {r for r in refs if re.search(r"\d", r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: @@ -221,23 +214,35 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] + r = ref[len(tag_prefix) :] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r"\d", r): + continue if verbose: print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + "date": date, + } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + "date": None, + } @register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): +def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* @@ -245,11 +250,14 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): version string, meaning we're inside a checked out source tree. """ GITS = ["git"] + TAG_PREFIX_REGEX = "*" if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] + TAG_PREFIX_REGEX = r"\*" - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) + _, rc = runner( + GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True + ) if rc != 0: if verbose: print("Directory %s not under git control" % root) @@ -257,15 +265,24 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) + describe_out, rc = runner( + GITS, + [ + "describe", + "--tags", + "--dirty", + "--always", + "--long", + "--match", + "%s%s" % (tag_prefix, TAG_PREFIX_REGEX), + ], + cwd=root, + ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() @@ -275,6 +292,40 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None + branch_name, rc = runner( + GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root + ) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out @@ -283,17 +334,18 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] + git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + # unparsable. Maybe git-describe is misbehaving? + pieces["error"] = ( + "unable to parse git-describe output: '%s'" % describe_out + ) return pieces # tag @@ -302,10 +354,12 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) + pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( + full_tag, + tag_prefix, + ) return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] + pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) @@ -316,13 +370,16 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) + count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() + date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[ + 0 + ].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces @@ -353,26 +410,77 @@ def render_pep440(pieces): rendered += ".dirty" else: # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) + rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. +def render_pep440_branch(pieces): + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). Exceptions: - 1: no tags. 0.post.devDISTANCE + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver): + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces): + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] + # update the post release segment + tag_version, post_version = pep440_split_post( + pieces["closest-tag"] + ) + rendered = tag_version + if post_version is not None: + rendered += ".post%d.dev%d" % ( + post_version + 1, + pieces["distance"], + ) + else: + rendered += ".post0.dev%d" % (pieces["distance"]) + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] else: # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] + rendered = "0.post0.dev%d" % pieces["distance"] return rendered @@ -403,12 +511,41 @@ def render_pep440_post(pieces): return rendered +def render_pep440_post_branch(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. - Eexceptions: + Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: @@ -468,21 +605,27 @@ def render_git_describe_long(pieces): def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None, + } if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": @@ -492,9 +635,13 @@ def render(pieces, style): else: raise ValueError("unknown style '%s'" % style) - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + "date": pieces.get("date"), + } def get_versions(): @@ -508,8 +655,9 @@ def get_versions(): verbose = cfg.verbose try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) + return git_versions_from_keywords( + get_keywords(), cfg.tag_prefix, verbose + ) except NotThisMethod: pass @@ -518,13 +666,16 @@ def get_versions(): # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): + for _ in cfg.versionfile_source.split("/"): root = os.path.dirname(root) except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None, + } try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) @@ -538,6 +689,10 @@ def get_versions(): except NotThisMethod: pass - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + "date": None, + } diff --git a/psyplot/compat/__init__.py b/psyplot/compat/__init__.py deleted file mode 100755 index 83b0766..0000000 --- a/psyplot/compat/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Compatibility module for psyplot.""" - -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. -# -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . diff --git a/psyplot/compat/pycompat.py b/psyplot/compat/pycompat.py deleted file mode 100755 index f248053..0000000 --- a/psyplot/compat/pycompat.py +++ /dev/null @@ -1,119 +0,0 @@ -"""Compatibility module for different python versions""" - -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. -# -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . - -import os -import six -import inspect - -if six.PY3: - - class DictMethods(object): - - @staticmethod - def iteritems(d): - return iter(dict.items(d)) - - @staticmethod - def itervalues(d): - return iter(dict.values(d)) - - @staticmethod - def iterkeys(d): - return iter(dict.keys(d)) - - def getcwd(*args, **kwargs): - return os.getcwd(*args, **kwargs) - - def get_default_value(func, arg): - argspec = inspect.getfullargspec(func) - return next(default for a, default in zip(reversed(argspec[0]), - reversed(argspec.defaults)) - if a == arg) - - basestring = str - unicode_type = str - bytes_type = bytes - range = range - zip = zip - filter = filter - map = map - from functools import reduce - from itertools import filterfalse - import builtins - from queue import Queue - -elif six.PY2: - # Python 2 - - class DictMethods(object): - """okay""" - @staticmethod - def iteritems(d): - "checked" - return dict.iteritems(d) - - @staticmethod - def itervalues(d): - return dict.itervalues(d) - - @staticmethod - def iterkeys(d): - return dict.iterkeys(d) - - def getcwd(*args, **kwargs): - return os.getcwdu(*args, **kwargs) - - def get_default_value(func, arg): - argspec = inspect.getargspec(func) - return next(default for a, default in zip(reversed(argspec[0]), - reversed(argspec.defaults)) - if a == arg) - - basestring = basestring - unicode_type = unicode - bytes_type = str - range = xrange - from itertools import (izip as zip, imap as map, ifilter as filter, - ifilterfalse as filterfalse) - reduce = reduce - import __builtin__ as builtins - from Queue import Queue - -try: - from cyordereddict import OrderedDict -except ImportError: - try: - from collections import OrderedDict - except ImportError: - from ordereddict import OrderedDict - -try: - from collections import UserDict -except ImportError: - from UserDict import IterableUserDict as UserDict - - -def isstring(s): - return isinstance(s, six.string_types) diff --git a/psyplot/config/__init__.py b/psyplot/config/__init__.py index 33b6ae4..1b2a52d 100755 --- a/psyplot/config/__init__.py +++ b/psyplot/config/__init__.py @@ -5,38 +5,20 @@ dictionary, however you can set up your own configuration in a yaml file (see :func:`psyplot.load_rc_from_file`)""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only from .logsetup import setup_logging +from .rcsetup import psyplot_fname #: :class:`str`. Path to the yaml logging configuration file logcfg_path = setup_logging() -from .rcsetup import psyplot_fname - - #: class:`str` or ``None``. Path to the yaml configuration file (if found). #: See :func:`~psyplot.config.rcsetup.psyplot_fname` for further information config_path = psyplot_fname() diff --git a/psyplot/config/logging.yml b/psyplot/config/logging.yml index cbf8ba9..050ffe0 100755 --- a/psyplot/config/logging.yml +++ b/psyplot/config/logging.yml @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + --- # logging settings for the nc2map module @@ -79,4 +83,4 @@ loggers: propagate: False level: WARNING -... \ No newline at end of file +... diff --git a/psyplot/config/logging_debug.yml b/psyplot/config/logging_debug.yml index fa1e53e..4dc6ec3 100755 --- a/psyplot/config/logging_debug.yml +++ b/psyplot/config/logging_debug.yml @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + --- # debug logging settings (sets the level of the nc2map logger to DEBUG) @@ -79,4 +83,4 @@ loggers: propagate: False level: WARNING -... \ No newline at end of file +... diff --git a/psyplot/config/logsetup.py b/psyplot/config/logsetup.py index 34de24b..9d12a26 100755 --- a/psyplot/config/logsetup.py +++ b/psyplot/config/logsetup.py @@ -3,35 +3,21 @@ This module defines the essential functions for setting up the :class:`logging.Logger` instances that are used by the psyplot package.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only -import os -import six -import sys import logging import logging.config +import os +import sys + +import six import yaml + from psyplot.docstring import dedent @@ -44,7 +30,7 @@ def _get_home(): This function is copied from matplotlib version 1.4.3, Jan 2016 """ try: - if six.PY2 and sys.platform == 'win32': + if six.PY2 and sys.platform == "win32": path = os.path.expanduser(b"~").decode(sys.getfilesystemencoding()) else: path = os.path.expanduser("~") @@ -54,7 +40,7 @@ def _get_home(): else: if os.path.isdir(path): return path - for evar in ('HOME', 'USERPROFILE', 'TMP'): + for evar in ("HOME", "USERPROFILE", "TMP"): path = os.environ.get(evar) if path is not None and os.path.isdir(path): return path @@ -62,8 +48,9 @@ def _get_home(): @dedent -def setup_logging(default_path=None, default_level=logging.INFO, - env_key='LOG_PSYPLOT'): +def setup_logging( + default_path=None, default_level=logging.INFO, env_key="LOG_PSYPLOT" +): """ Setup logging configuration @@ -89,18 +76,18 @@ def setup_logging(default_path=None, default_level=logging.INFO, Function taken from http://victorlin.me/posts/2012/08/26/good-logging-practice-in-python""" path = default_path or os.path.join( - os.path.dirname(__file__), 'logging.yml') + os.path.dirname(__file__), "logging.yml" + ) value = os.getenv(env_key, None) home = _get_home() if value: path = value if os.path.exists(path): - with open(path, 'rt') as f: + with open(path, "rt") as f: config = yaml.load(f.read(), Loader=yaml.SafeLoader) - for handler in config.get('handlers', {}).values(): - if '~' in handler.get('filename', ''): - handler['filename'] = handler['filename'].replace( - '~', home) + for handler in config.get("handlers", {}).values(): + if "~" in handler.get("filename", ""): + handler["filename"] = handler["filename"].replace("~", home) logging.config.dictConfig(config) else: path = None diff --git a/psyplot/config/rcsetup.py b/psyplot/config/rcsetup.py index 8124674..d05fe08 100755 --- a/psyplot/config/rcsetup.py +++ b/psyplot/config/rcsetup.py @@ -7,54 +7,39 @@ .. _matplotlib: http://matplotlib.org/api/""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only +import contextlib +import inspect +import logging import os +import re import sys +from collections import UserDict, defaultdict +from itertools import chain + import six -import logging -import re -import inspect import yaml -import contextlib -from itertools import chain -from collections import defaultdict -from psyplot.warning import warn -from psyplot.compat.pycompat import ( - UserDict, DictMethods, getcwd, zip, isstring, map) -from psyplot.docstring import docstrings, dedent, safe_modulo + from psyplot.config.logsetup import _get_home +from psyplot.docstring import dedent, docstrings, safe_modulo +from psyplot.utils import isstring +from psyplot.warning import warn -@docstrings.get_sections(base='safe_list') +@docstrings.get_sections(base="safe_list") @dedent -def safe_list(l): +def safe_list(iterable): """Function to create a list Parameters ---------- - l: iterable or anything else + iterable: iterable or anything else Parameter that shall be converted to a list. - If string or any non-iterable, it will be put into a list @@ -64,20 +49,20 @@ def safe_list(l): ------- list `l` put (or converted) into a list""" - if isstring(l): - return [l] + if isstring(iterable): + return [iterable] try: - return list(l) + return list(iterable) except TypeError: - return [l] + return [iterable] -class SubDict(UserDict, dict): +class SubDict(UserDict, dict): # type: ignore """Class that keeps week reference to the base dictionary -This class is used by the :meth:`RcParams.find_and_replace` method -to provide an easy handable instance that keeps reference to the -base rcParams dictionary.""" + This class is used by the :meth:`RcParams.find_and_replace` method + to provide an easy handable instance that keeps reference to the + base rcParams dictionary.""" @property def data(self): @@ -87,7 +72,7 @@ def data(self): -------- iteritems """ - return dict(self.iteritems()) + return dict(list(self.iteritems())) @property def replace(self): @@ -100,22 +85,23 @@ def replace(self, value): def replace_base(key): for pattern in self.patterns: try: - return pattern.match(key).group('key') + return pattern.match(key).group("key") except AttributeError: # if match is None pass raise KeyError( "Could not find any matching key for %s in the base " - "dictionary!" % key) + "dictionary!" % key + ) value = bool(value) - if hasattr(self, '_replace') and value == self._replace: + if hasattr(self, "_replace") and value == self._replace: return - if not hasattr(self, '_replace'): + if not hasattr(self, "_replace"): self._replace = value return # if the value has changed, we change the key in the SubDict instance # to match the ones in the base dictionary (if they exist) - for key, val in DictMethods.iteritems(self): + for key, val in iter(dict.items(self)): try: if value: new_key = replace_base(key) @@ -141,11 +127,12 @@ def replace_base(key): #: :class:`bool`. If True, changes are traced back to the :attr:`base` dict trace = False - @docstrings.get_sections(base='SubDict.add_base_str') + @docstrings.get_sections(base="SubDict.add_base_str") @dedent - def add_base_str(self, base_str, pattern='.+', pattern_base=None, - append=True): - """ + def add_base_str( + self, base_str, pattern=".+", pattern_base=None, append=True + ): + r""" Add further base string to this instance Parameters @@ -176,25 +163,40 @@ def add_base_str(self, base_str, pattern='.+', pattern_base=None, base_str = safe_list(base_str) pattern_base = safe_list(pattern_base or []) for i, s in enumerate(base_str): - if '%(key)s' not in s: - base_str[i] += '%(key)s' + if "%(key)s" not in s: + base_str[i] += "%(key)s" if pattern_base: for i, s in enumerate(pattern_base): - if '%(key)s' not in s: - pattern_base[i] += '%(key)s' + if "%(key)s" not in s: + pattern_base[i] += "%(key)s" else: pattern_base = base_str self.base_str = base_str + self.base_str - self.patterns = list(map(lambda s: re.compile(s.replace( - '%(key)s', '(?P%s)' % pattern)), pattern_base)) + \ - self.patterns - - docstrings.delete_params('SubDict.add_base_str.parameters', 'append') - - @docstrings.get_sections(base='SubDict') + self.patterns = ( + list( + map( + lambda s: re.compile( + s.replace("%(key)s", "(?P%s)" % pattern) + ), + pattern_base, + ) + ) + + self.patterns + ) + + docstrings.delete_params("SubDict.add_base_str.parameters", "append") + + @docstrings.get_sections(base="SubDict") @docstrings.dedent - def __init__(self, base, base_str, pattern='.+', pattern_base=None, - trace=False, replace=True): + def __init__( + self, + base, + base_str, + pattern=".+", + pattern_base=None, + trace=False, + replace=True, + ): """ Parameters ---------- @@ -271,11 +273,12 @@ def __init__(self, base, base_str, pattern='.+', pattern_base=None, self.patterns = [] self.replace = bool(replace) self.trace = bool(trace) - self.add_base_str(base_str, pattern=pattern, pattern_base=pattern_base, - append=False) + self.add_base_str( + base_str, pattern=pattern, pattern_base=pattern_base, append=False + ) def __getitem__(self, key): - if key in DictMethods.iterkeys(self): + if key in iter(dict.keys(self)): return dict.__getitem__(self, key) if not self.replace: return self.base[key] @@ -312,20 +315,20 @@ def _get_val_and_base(self, key): return m.group(), self.base[m.group()] else: raise KeyError( - "{0} does not match the specified pattern!".format( - s)) - except KeyError as e: + "{0} does not match the specified pattern!".format(s) + ) + except KeyError: pass if not found: if e is not None: raise - raise KeyError("{0} does not match the specified pattern!".format( - key)) + raise KeyError("{0} does not match the specified pattern!".format(key)) def _iter_base_and_pattern(self, key): return zip( - map(lambda s: safe_modulo(s, {'key': key}), self.base_str), - self.patterns) + map(lambda s: safe_modulo(s, {"key": key}), self.base_str), + self.patterns, + ) def iterkeys(self): """Unsorted iterator over keys""" @@ -336,12 +339,12 @@ def iterkeys(self): for pattern in patterns: m = pattern.match(key) if m: - ret = m.group('key') if replace else m.group() + ret = m.group("key") if replace else m.group() if ret not in seen: seen.add(ret) yield ret break - for key in DictMethods.iterkeys(self): + for key in iter(dict.keys(self)): if key not in seen: yield key @@ -359,7 +362,7 @@ def update(self, *args, **kwargs): self[k] = v -docstrings.delete_params('SubDict.parameters', 'base') +docstrings.delete_params("SubDict.parameters", "base") class RcParams(dict): @@ -376,15 +379,20 @@ class RcParams(dict): def validate(self): """Dictionary with validation methods as values""" depr = self._all_deprecated - return dict((key, val[1]) for key, val in - six.iteritems(self.defaultParams) - if key not in depr) + return dict( + (key, val[1]) + for key, val in six.iteritems(self.defaultParams) + if key not in depr + ) @property def descriptions(self): """The description of each keyword in the rcParams dictionary""" - return {key: val[2] for key, val in six.iteritems(self.defaultParams) - if len(val) >= 3} + return { + key: val[2] + for key, val in six.iteritems(self.defaultParams) + if len(val) >= 3 + } HEADER = """Configuration parameters of the psyplot module @@ -408,7 +416,7 @@ def _all_deprecated(self): @property def defaultParams(self): - return getattr(self, '_defaultParams', defaultParams) + return getattr(self, "_defaultParams", defaultParams) @defaultParams.setter def defaultParams(self, value): @@ -433,7 +441,7 @@ def __init__(self, *args, **kwargs): *args, **kwargs Any key-value pair for the initialization of the dictionary """ - defaultParams = kwargs.pop('defaultParams', None) + defaultParams = kwargs.pop("defaultParams", None) if defaultParams is not None: self.defaultParams = defaultParams self._deprecated_map = {} @@ -443,8 +451,11 @@ def __init__(self, *args, **kwargs): self[k] = v except (ValueError, RuntimeError): # force the issue - warn(_rcparam_warn_str.format(key=repr(k), value=repr(v), - func='__init__')) + warn( + _rcparam_warn_str.format( + key=repr(k), value=repr(v), func="__init__" + ) + ) dict.__setitem__(self, k, v) def __setitem__(self, key, val): @@ -471,8 +482,9 @@ def _get_depreceated(self, key, *args): return None, None elif key not in self.defaultParams: raise KeyError( - '%s is not a valid rc parameter. See rcParams.keys() for a ' - 'list of valid parameters.' % (key,)) + "%s is not a valid rc parameter. See rcParams.keys() for a " + "list of valid parameters." % (key,) + ) return key, args[0] if args else None def __getitem__(self, key): @@ -532,12 +544,14 @@ def update(self, *args, **kwargs): self[k] = v except (ValueError, RuntimeError): # force the issue - warn(_rcparam_warn_str.format(key=repr(k), value=repr(v), - func='update')) + warn( + _rcparam_warn_str.format( + key=repr(k), value=repr(v), func="update" + ) + ) dict.__setitem__(self, k, v) - def update_from_defaultParams(self, defaultParams=None, - plotters=True): + def update_from_defaultParams(self, defaultParams=None, plotters=True): """Update from the a dictionary like the :attr:`defaultParams` Parameters @@ -549,21 +563,29 @@ def update_from_defaultParams(self, defaultParams=None, If True, ``'project.plotters'`` will be updated too""" if defaultParams is None: defaultParams = self.defaultParams - self.update({key: val[0] for key, val in defaultParams.items() - if plotters or key != 'project.plotters'}) + self.update( + { + key: val[0] + for key, val in defaultParams.items() + if plotters or key != "project.plotters" + } + ) def __repr__(self): import pprint + class_name = self.__class__.__name__ indent = len(class_name) + 1 - repr_split = pprint.pformat(dict(self), indent=1, - width=80 - indent).split('\n') - repr_indented = ('\n' + ' ' * indent).join(repr_split) - return '{0}({1})'.format(class_name, repr_indented) + repr_split = pprint.pformat( + dict(self), indent=1, width=80 - indent + ).split("\n") + repr_indented = ("\n" + " " * indent).join(repr_split) + return "{0}({1})".format(class_name, repr_indented) def __str__(self): - return '\n'.join('{0}: {1}'.format(k, v) - for k, v in sorted(self.items())) + return "\n".join( + "{0}: {1}".format(k, v) for k, v in sorted(self.items()) + ) def keys(self): """ @@ -606,8 +628,11 @@ def find_all(self, pattern): pattern_re = re.compile(pattern) ret = RcParams() ret.defaultParams = self.defaultParams - ret.update((key, value) for key, value in self.items() - if pattern_re.search(key)) + ret.update( + (key, value) + for key, value in self.items() + if pattern_re.search(key) + ) return ret @docstrings.dedent @@ -672,13 +697,21 @@ def load_from_file(self, fname=None): with open(fname) as f: d = yaml.load(f, Loader=yaml.SafeLoader) self.update(d) - if (d.get('project.plotters.user') and - 'project.plotters' in self): - self['project.plotters'].update(d['project.plotters.user']) - - def dump(self, fname=None, overwrite=True, include_keys=None, - exclude_keys=['project.plotters'], include_descriptions=True, - **kwargs): + if ( + d.get("project.plotters.user") + and "project.plotters" in self + ): + self["project.plotters"].update(d["project.plotters.user"]) + + def dump( + self, + fname=None, + overwrite=True, + include_keys=None, + exclude_keys=["project.plotters"], + include_descriptions=True, + **kwargs, + ): """Dump this instance to a yaml file Parameters @@ -715,38 +748,45 @@ def dump(self, fname=None, overwrite=True, include_keys=None, load_from_file""" if fname is not None and not overwrite and os.path.exists(fname): raise IOError( - '%s already exists! Set overwrite=True to overwrite it!' % ( - fname)) + "%s already exists! Set overwrite=True to overwrite it!" + % (fname) + ) if six.PY2: - kwargs.setdefault('encoding', 'utf-8') - d = {key: val for key, val in six.iteritems(self) if ( - include_keys is None or key in include_keys) and - key not in exclude_keys} - kwargs['default_flow_style'] = False + kwargs.setdefault("encoding", "utf-8") + d = { + key: val + for key, val in six.iteritems(self) + if (include_keys is None or key in include_keys) + and key not in exclude_keys + } + kwargs["default_flow_style"] = False if include_descriptions: s = yaml.dump(d, **kwargs) desc = self.descriptions i = 2 - header = self.HEADER.splitlines() + [ - '', 'Created with python', ''] + sys.version.splitlines() + [ - '', ''] - lines = ['# ' + l for l in header] + s.splitlines() - for l in lines[2:]: - key = l.split(':')[0] + header = ( + self.HEADER.splitlines() + + ["", "Created with python", ""] + + sys.version.splitlines() + + ["", ""] + ) + lines = ["# " + line for line in header] + s.splitlines() + for line in lines[2:]: + key = line.split(":")[0] if key in desc: - lines.insert(i, '# ' + '\n# '.join(desc[key].splitlines())) + lines.insert(i, "# " + "\n# ".join(desc[key].splitlines())) i += 1 i += 1 - s = '\n'.join(lines) + s = "\n".join(lines) if fname is None: return s else: - with open(fname, 'w') as f: + with open(fname, "w") as f: f.write(s) else: if fname is None: return yaml.dump(d, **kwargs) - with open(fname, 'w') as f: + with open(fname, "w") as f: yaml.dump(d, f, **kwargs) return None @@ -760,16 +800,15 @@ def _load_plugin_entrypoints(self): from psyplot.utils import plugin_entrypoints def load_plugin(ep): - try: ep.module except AttributeError: # python<3.10 try: ep.module = ep.pattern.match(ep.value).group("module") - except AttributeError: # python<3.8 + except AttributeError: # python<3.8 ep.module = ep.module_name - if plugins_env == ['no']: + if plugins_env == ["no"]: return False elif ep.module in exclude_plugins: return False @@ -779,19 +818,19 @@ def load_plugin(ep): self._plugins = self._plugins or [] - plugins_env = os.getenv('PSYPLOT_PLUGINS', '').split('::') - include_plugins = [s[4:] for s in plugins_env if s.startswith('yes:')] - exclude_plugins = [s[3:] for s in plugins_env if s.startswith('no:')] + plugins_env = os.getenv("PSYPLOT_PLUGINS", "").split("::") + include_plugins = [s[4:] for s in plugins_env if s.startswith("yes:")] + exclude_plugins = [s[3:] for s in plugins_env if s.startswith("no:")] logger = logging.getLogger(__name__) eps = plugin_entrypoints("psyplot", "plugin") for ep in eps: if not load_plugin(ep): - logger.debug('Skipping entrypoint %s', ep) + logger.debug("Skipping entrypoint %s", ep) continue self._plugins.append(str(ep)) - logger.debug('Loading entrypoint %s', ep) + logger.debug("Loading entrypoint %s", ep) yield ep def load_plugins(self, raise_error=False): @@ -809,83 +848,108 @@ def load_plugins(self, raise_error=False): If True, an error is raised when multiple plugins define the same plotter or rcParams key. Otherwise only a warning is raised""" - pm_env = os.getenv('PSYPLOT_PLOTMETHODS', '').split('::') - include_pms = [s[4:] for s in pm_env if s.startswith('yes:')] - exclude_pms = [s[3:] for s in pm_env if s.startswith('no:')] + pm_env = os.getenv("PSYPLOT_PLOTMETHODS", "").split("::") + include_pms = [s[4:] for s in pm_env if s.startswith("yes:")] + exclude_pms = [s[3:] for s in pm_env if s.startswith("no:")] logger = logging.getLogger(__name__) - plotters = self['project.plotters'] - def_plots = {'default': list(plotters)} + plotters = self["project.plotters"] + def_plots = {"default": list(plotters)} defaultParams = self.defaultParams - def_keys = {'default': defaultParams} + def_keys = {"default": defaultParams} def register_pm(ep, name): - full_name = '%s:%s' % (ep.module, name) + full_name = "%s:%s" % (ep.module, name) ret = True - if pm_env == ['no']: + if pm_env == ["no"]: ret = False elif name in exclude_pms or full_name in exclude_pms: ret = False - elif include_pms and (name not in include_pms and - full_name not in include_pms): + elif include_pms and ( + name not in include_pms and full_name not in include_pms + ): ret = False if not ret: - logger.debug('Skipping plot method %s', full_name) + logger.debug("Skipping plot method %s", full_name) return ret for ep in self._load_plugin_entrypoints(): try: plugin_mod = ep.load() except (ModuleNotFoundError, ImportError): - logger.debug("Failed to import %s!" % (ep, ), exc_info=True) - logger.warning("Failed to import %s!" % (ep, )) + logger.debug("Failed to import %s!" % (ep,), exc_info=True) + logger.warning("Failed to import %s!" % (ep,)) continue rc = plugin_mod.rcParams # load the plotters plugin_plotters = { - key: val for key, val in rc.get('project.plotters', {}).items() - if register_pm(ep, key)} + key: val + for key, val in rc.get("project.plotters", {}).items() + if register_pm(ep, key) + } already_defined = set(plotters).intersection(plugin_plotters) if already_defined: - msg = ("Error while loading psyplot plugin %s! The " - "following plotters have already been " - "defined") % ep - msg += 'and will be overwritten:' if not raise_error else ':' - msg += '\n' + '\n'.join(chain.from_iterable( - (('%s by %s' % (key, plugin) - for plugin, keys in def_plots.items() if key in keys) - for key in already_defined))) + msg = ( + "Error while loading psyplot plugin %s! The " + "following plotters have already been " + "defined" + ) % ep + msg += "and will be overwritten:" if not raise_error else ":" + msg += "\n" + "\n".join( + chain.from_iterable( + ( + ( + "%s by %s" % (key, plugin) + for plugin, keys in def_plots.items() + if key in keys + ) + for key in already_defined + ) + ) + ) if raise_error: raise ImportError(msg) else: warn(msg) for d in plugin_plotters.values(): - d['plugin'] = ep.module + d["plugin"] = ep.module plotters.update(plugin_plotters) def_plots[ep] = list(plugin_plotters) # load the defaultParams keys plugin_defaultParams = rc.defaultParams already_defined = set(defaultParams).intersection( - plugin_defaultParams) - {'project.plotters'} + plugin_defaultParams + ) - {"project.plotters"} if already_defined: - msg = ("Error while loading psyplot plugin %s! The " - "following default keys have already been " - "defined:") % ep - msg += '\n' + '\n'.join(chain.from_iterable( - (('%s by %s' % (key, plugin) - for plugin, keys in def_keys.items() if key in keys) - for key in already_defined))) + msg = ( + "Error while loading psyplot plugin %s! The " + "following default keys have already been " + "defined:" + ) % ep + msg += "\n" + "\n".join( + chain.from_iterable( + ( + ( + "%s by %s" % (key, plugin) + for plugin, keys in def_keys.items() + if key in keys + ) + for key in already_defined + ) + ) + ) if raise_error: raise ImportError(msg) else: warn(msg) - update_keys = set(plugin_defaultParams) - {'project.plotters'} + update_keys = set(plugin_defaultParams) - {"project.plotters"} def_keys[ep] = update_keys self.defaultParams.update( - {key: plugin_defaultParams[key] for key in update_keys}) + {key: plugin_defaultParams[key] for key in update_keys} + ) # load the rcParams (without validation) super(RcParams, self).update({key: rc[key] for key in update_keys}) @@ -915,8 +979,7 @@ def catch(self): super().update(save) # reset settings -def psyplot_fname(env_key='PSYPLOTRC', fname='psyplotrc.yml', - if_exists=True): +def psyplot_fname(env_key="PSYPLOTRC", fname="psyplotrc.yml", if_exists=True): """ Get the location of the config file. @@ -960,7 +1023,7 @@ def psyplot_fname(env_key='PSYPLOTRC', fname='psyplotrc.yml', ----- This function is motivated by the :func:`matplotlib.matplotlib_fname` function""" - cwd = getcwd() + cwd = os.getcwd() full_fname = os.path.join(cwd, fname) if os.path.exists(full_fname): return full_fname @@ -984,7 +1047,7 @@ def psyplot_fname(env_key='PSYPLOTRC', fname='psyplotrc.yml', return None -def get_configdir(name='psyplot', env_key='PSYPLOTCONFIGDIR'): +def get_configdir(name="psyplot", env_key="PSYPLOTCONFIGDIR"): """ Return the string representing the configuration directory. @@ -1018,14 +1081,15 @@ def get_configdir(name='psyplot', env_key='PSYPLOTCONFIGDIR'): p = None h = _get_home() - if ((sys.platform.startswith('linux') or sys.platform == 'darwin') and - h is not None): - p = os.path.join(h, '.config/' + name) + if ( + sys.platform.startswith("linux") or sys.platform == "darwin" + ) and h is not None: + p = os.path.join(h, ".config/" + name) elif h is not None: - p = os.path.join(h, '.' + name) + p = os.path.join(h, "." + name) if not os.path.exists(p): - os.makedirs(p) + os.makedirs(p, exist_ok=True) return p @@ -1039,10 +1103,9 @@ def validate_path_exists(s): raise ValueError('"%s" should be a path but it does not exist' % s) -def validate_files_exist(l): +def validate_files_exist(files): """Validate if all pathnames in a given list exists""" - return [validate_str(s) and validate_path_exists(s) - for s in l] + return [validate_str(fn) and validate_path_exists(fn) for fn in files] def validate_dict(d): @@ -1072,10 +1135,10 @@ def validate_dict(d): def validate_bool_maybe_none(b): - 'Convert b to a boolean or raise' + "Convert b to a boolean or raise" if isinstance(b, six.string_types): b = b.lower() - if b is None or b == 'none': + if b is None or b == "none": return None return validate_bool(b) @@ -1084,9 +1147,9 @@ def validate_bool(b): """Convert b to a boolean or raise""" if isinstance(b, six.string_types): b = b.lower() - if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): + if b in ("t", "y", "yes", "on", "true", "1", 1, True): return True - elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): + elif b in ("f", "n", "no", "off", "false", "0", 0, False): return False else: raise ValueError('Could not convert "%s" to boolean' % b) @@ -1127,7 +1190,7 @@ def validate_stringlist(s): ------ ValueError""" if isinstance(s, six.string_types): - return [six.text_type(v.strip()) for v in s.split(',') if v.strip()] + return [six.text_type(v.strip()) for v in s.split(",") if v.strip()] else: try: return list(map(validate_str, s)) @@ -1156,95 +1219,136 @@ def validate_stringset(*args, **kwargs): #: :class:`dict` with default values and validation functions defaultParams = { # user defined plotter keys - 'plotter.user': [ - {}, validate_dict, - inspect.cleandoc(""" + "plotter.user": [ + {}, + validate_dict, + inspect.cleandoc( + """ formatoption keys and values that are defined by the user to be used by the specified plotters. For example to modify the title of all :class:`psyplot.plotter.maps.FieldPlotter` instances, set - ``{'plotter.fieldplotter.title': 'my title'}``""")], - - 'gridweights.use_cdo': [ - None, validate_bool_maybe_none, - 'Boolean flag to control whether CDOs (Climate Data Operators) should ' - 'be used to calculate grid weights. If None, they are tried to be ' - 'used.'], - + ``{'plotter.fieldplotter.title': 'my title'}``""" + ), + ], + "gridweights.use_cdo": [ + None, + validate_bool_maybe_none, + "Boolean flag to control whether CDOs (Climate Data Operators) should " + "be used to calculate grid weights. If None, they are tried to be " + "used.", + ], # decoder - 'decoder.x': [set(), validate_stringset, - 'names that shall be interpreted as the longitudinal x dim'], - 'decoder.y': [set(), validate_stringset, - 'names that shall be interpreted as the latitudinal y dim'], - 'decoder.z': [set(), validate_stringset, - 'names that shall be interpreted as the vertical z dim'], - 'decoder.t': [{'time'}, validate_stringset, - 'names that shall be interpreted as the time dimension'], - 'decoder.interp_kind': [ - 'linear', validate_str, - 'interpolation method to calculate 2D-bounds (see the `kind` parameter' - 'in the :meth:`psyplot.data.CFDecoder.get_plotbounds` method)'], - + "decoder.x": [ + set(), + validate_stringset, + "names that shall be interpreted as the longitudinal x dim", + ], + "decoder.y": [ + set(), + validate_stringset, + "names that shall be interpreted as the latitudinal y dim", + ], + "decoder.z": [ + set(), + validate_stringset, + "names that shall be interpreted as the vertical z dim", + ], + "decoder.t": [ + {"time"}, + validate_stringset, + "names that shall be interpreted as the time dimension", + ], + "decoder.interp_kind": [ + "linear", + validate_str, + "interpolation method to calculate 2D-bounds (see the `kind` parameter" + "in the :meth:`psyplot.data.CFDecoder.get_plotbounds` method)", + ], # specify automatic drawing and showing of figures - 'auto_draw': [True, validate_bool, - ('Automatically draw the figures if the draw keyword in the ' - 'update and start_update methods is None')], - 'auto_show': [False, validate_bool, - ('Automatically show the figures after the update and' - 'start_update methods')], - + "auto_draw": [ + True, + validate_bool, + ( + "Automatically draw the figures if the draw keyword in the " + "update and start_update methods is None" + ), + ], + "auto_show": [ + False, + validate_bool, + ( + "Automatically show the figures after the update and" + "start_update methods" + ), + ], # data - 'datapath': [None, validate_path_exists, 'path for supplementary data'], - + "datapath": [None, validate_path_exists, "path for supplementary data"], # list settings - 'lists.auto_update': [True, validate_bool, - 'default value (boolean) for the auto_update ' - 'parameter in the initialization of Plotter, ' - 'Project, etc. instances'], - + "lists.auto_update": [ + True, + validate_bool, + "default value (boolean) for the auto_update " + "parameter in the initialization of Plotter, " + "Project, etc. instances", + ], # project settings # auto_import: If True the plotters in project,plotters are automatically # imported - 'project.auto_import': [False, validate_bool, - 'boolean controlling whether all plotters ' - 'specified in the project.plotters item will be ' - 'automatically imported when importing the ' - 'psyplot.project module'], - 'project.import_seaborn': [ - None, validate_bool_maybe_none, - 'boolean controlling whether the seaborn module shall be imported ' - 'when importing the project module. If None, it is only tried to ' - 'import the module.'], - 'project.plotters': [ - {}, validate_dict, - 'mapping from identifier to plotter definitions for the Project class.' - ' See the :func:`psyplot.project.register_plotter` function for ' - 'possible keywords and values. See ' - ':attr:`psyplot.project.registered_plotters` for examples.'], - - 'project.plotters.user': [ - {}, validate_dict, + "project.auto_import": [ + False, + validate_bool, + "boolean controlling whether all plotters " + "specified in the project.plotters item will be " + "automatically imported when importing the " + "psyplot.project module", + ], + "project.import_seaborn": [ + None, + validate_bool_maybe_none, + "boolean controlling whether the seaborn module shall be imported " + "when importing the project module. If None, it is only tried to " + "import the module.", + ], + "project.plotters": [ + {}, + validate_dict, + "mapping from identifier to plotter definitions for the Project class." + " See the :func:`psyplot.project.register_plotter` function for " + "possible keywords and values. See " + ":attr:`psyplot.project.registered_plotters` for examples.", + ], + "project.plotters.user": [ + {}, + validate_dict, "Plot methods that are defined by the user and overwrite those in the" "``'project.plotters'`` key. Use this if you want to define your own " - "plotters without writing a plugin"], - + "plotters without writing a plugin", + ], # presets - 'presets.trusted': [ - [], validate_files_exist, - "A list of filenames with trusted presets"] - } + "presets.trusted": [ + [], + validate_files_exist, + "A list of filenames with trusted presets", + ], +} -_rcparam_warn_str = ("Trying to set {key} to {value} via the {func} " - "method of RcParams which does not validate cleanly. ") +_rcparam_warn_str = ( + "Trying to set {key} to {value} via the {func} " + "method of RcParams which does not validate cleanly. " +) -_seq_err_msg = ('You must supply exactly {n:d} values, you provided ' - '{num:d} values: {s}') +_seq_err_msg = ( + "You must supply exactly {n:d} values, you provided " "{num:d} values: {s}" +) -_str_err_msg = ('You must supply exactly {n:d} comma-separated values, ' - 'you provided ' - '{num:d} comma-separated values: {s}') +_str_err_msg = ( + "You must supply exactly {n:d} comma-separated values, " + "you provided " + "{num:d} comma-separated values: {s}" +) #: :class:`~psyplot.config.rcsetup.RcParams` instance that stores default #: formatoptions and configuration settings. diff --git a/psyplot/data.py b/psyplot/data.py index 2aacebd..61f5571 100755 --- a/psyplot/data.py +++ b/psyplot/data.py @@ -1,60 +1,46 @@ """Data management core routines of psyplot.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only from __future__ import division + +import datetime as dt +import inspect +import logging import os import os.path as osp -import inspect -from threading import Thread +import re +from collections import defaultdict from functools import partial from glob import glob from importlib import import_module -import re +from itertools import chain, count, cycle, islice, product, repeat, starmap +from queue import Queue +from threading import Thread +from warnings import warn + +import numpy as np import six -from collections import defaultdict -from itertools import chain, product, repeat, starmap, count, cycle, islice import xarray as xr -from xarray.core.utils import NDArrayMixin -from xarray.core.formatting import first_n_items, format_item - import xarray.backends.api as xarray_api from pandas import to_datetime -import numpy as np -import datetime as dt -import logging +from xarray.core.formatting import first_n_items, format_item +from xarray.core.utils import NDArrayMixin + +import psyplot.utils as utils from psyplot.config.rcsetup import rcParams, safe_list from psyplot.docstring import dedent, docstrings -from psyplot.compat.pycompat import ( - zip, map, isstring, OrderedDict, filter, range, getcwd, - Queue) +from psyplot.utils import isstring from psyplot.warning import PsyPlotRuntimeWarning -from warnings import warn -import psyplot.utils as utils try: - import dask + import dask # noqa: F401 + with_dask = True except ImportError: with_dask = False @@ -70,7 +56,7 @@ _NODATA = object -VARIABLELABEL = 'variable' +VARIABLELABEL = "variable" logger = logging.getLogger(__name__) @@ -78,7 +64,7 @@ _ds_counter = count(1) -xr_version = tuple(map(int, xr.__version__.split('.')[:2])) +xr_version = tuple(map(int, xr.__version__.split(".")[:2])) def _no_auto_update_getter(self): @@ -99,7 +85,7 @@ def _no_auto_update_getter(self): >>> data.no_auto_update = True >>> data.update(time=1) >>> data.no_auto_update = False # reenable automatical update""" - if getattr(self, '_no_auto_update', None) is not None: + if getattr(self, "_no_auto_update", None) is not None: return self._no_auto_update else: self._no_auto_update = utils._TempBool() @@ -148,7 +134,7 @@ def _fix_times(dims): dims[key] = to_datetime([val])[0] -@docstrings.get_sections(base='setup_coords') +@docstrings.get_sections(base="setup_coords") @dedent def setup_coords(arr_names=None, sort=[], dims={}, **kwargs): """ @@ -166,12 +152,12 @@ def setup_coords(arr_names=None, sort=[], dims={}, **kwargs): number of dictionaries in the return depend in this case on the `dims` and ``**furtherdims`` - dictionary: - Then nothing happens and an :class:`OrderedDict` version of + Then nothing happens and an :class:`dict` version of `arr_names` is returned. sort: list of strings This parameter defines how the dictionaries are ordered. It has no effect if `arr_names` is a dictionary (use a - :class:`~collections.OrderedDict` for that). It can be a list of + :class:`dict` for that). It can be a list of dimension strings matching to the dimensions in `dims` for the variable. dims: dict @@ -188,23 +174,23 @@ def setup_coords(arr_names=None, sort=[], dims={}, **kwargs): Returns ------- - ~collections.OrderedDict + dict A mapping from the keys in `arr_names` and to dictionaries. Each dictionary corresponds defines the coordinates of one data array to load""" try: - return OrderedDict(arr_names) + return dict(arr_names) except (ValueError, TypeError): - # ValueError for cyordereddict, TypeError for collections.OrderedDict + # ValueError for cydict, TypeError for dic pass if arr_names is None: - arr_names = repeat('arr{0}') + arr_names = repeat("arr{0}") elif isstring(arr_names): arr_names = repeat(arr_names) - dims = OrderedDict(dims) + dims = dict(dims) for key, val in six.iteritems(kwargs): dims.setdefault(key, val) - sorted_dims = OrderedDict() + sorted_dims = dict() if sort: for key in sort: sorted_dims[key] = dims.pop(key) @@ -212,19 +198,22 @@ def setup_coords(arr_names=None, sort=[], dims={}, **kwargs): sorted_dims[key] = val else: # make sure, it is first sorted for the variable names - if 'name' in dims: - sorted_dims['name'] = None + if "name" in dims: + sorted_dims["name"] = None for key, val in sorted(dims.items()): sorted_dims[key] = val for key, val in six.iteritems(kwargs): sorted_dims.setdefault(key, val) for key, val in six.iteritems(sorted_dims): sorted_dims[key] = iter(safe_list(val)) - return OrderedDict([ - (arr_name.format(i), dict(zip(sorted_dims.keys(), dim_tuple))) - for i, (arr_name, dim_tuple) in enumerate(zip( - arr_names, product( - *map(list, sorted_dims.values()))))]) + return dict( + [ + (arr_name.format(i), dict(zip(sorted_dims.keys(), dim_tuple))) + for i, (arr_name, dim_tuple) in enumerate( + zip(arr_names, product(*map(list, sorted_dims.values()))) + ) + ] + ) def to_slice(arr): @@ -287,16 +276,16 @@ def get_index_from_coord(coord, base_index): #: mapping that translates datetime format strings to regex patterns t_patterns = { - '%Y': '[0-9]{4}', - '%m': '[0-9]{1,2}', - '%d': '[0-9]{1,2}', - '%H': '[0-9]{1,2}', - '%M': '[0-9]{1,2}', - '%S': '[0-9]{1,2}', - } + "%Y": "[0-9]{4}", + "%m": "[0-9]{1,2}", + "%d": "[0-9]{1,2}", + "%H": "[0-9]{1,2}", + "%M": "[0-9]{1,2}", + "%S": "[0-9]{1,2}", +} -@docstrings.get_sections(base='get_tdata') +@docstrings.get_sections(base="get_tdata") @dedent def get_tdata(t_format, files): """ @@ -322,27 +311,41 @@ def get_tdata(t_format, files): References ---------- .. [1] https://docs.python.org/2/library/datetime.html""" + def median(arr): - return arr.min() + (arr.max() - arr.min())/2 + return arr.min() + (arr.max() - arr.min()) / 2 + import re + from pandas import Index + t_pattern = t_format for fmt, patt in t_patterns.items(): t_pattern = t_pattern.replace(fmt, patt) t_pattern = re.compile(t_pattern) time = list(range(len(files))) for i, f in enumerate(files): - time[i] = median(np.array(list(map( - lambda s: np.datetime64(dt.datetime.strptime(s, t_format)), - t_pattern.findall(f))))) + time[i] = median( + np.array( + list( + map( + lambda s: np.datetime64( + dt.datetime.strptime(s, t_format) + ), + t_pattern.findall(f), + ) + ) + ) + ) ind = np.argsort(time) # sort according to time files = np.array(files)[ind] time = np.array(time)[ind] - return to_datetime(Index(time, name='time')), files + return to_datetime(Index(time, name="time")), files -docstrings.get_sections(xr.Dataset.to_netcdf.__doc__, - 'xarray.Dataset.to_netcdf') +docstrings.get_sections( + xr.Dataset.to_netcdf.__doc__, "xarray.Dataset.to_netcdf" +) @docstrings.dedent @@ -362,13 +365,17 @@ def to_netcdf(ds, *args, **kwargs): """ to_update = {} for v, obj in six.iteritems(ds.variables): - units = obj.attrs.get('units', obj.encoding.get('units', None)) - if units == 'day as %Y%m%d.%f' and np.issubdtype( - obj.dtype, np.datetime64): + units = obj.attrs.get("units", obj.encoding.get("units", None)) + if units == "day as %Y%m%d.%f" and np.issubdtype( + obj.dtype, np.datetime64 + ): to_update[v] = xr.Variable( - obj.dims, AbsoluteTimeEncoder(obj), attrs=obj.attrs.copy(), - encoding=obj.encoding) - to_update[v].attrs['units'] = units + obj.dims, + AbsoluteTimeEncoder(obj), + attrs=obj.attrs.copy(), + encoding=obj.encoding, + ) + to_update[v].attrs["units"] = units if to_update: ds = ds.copy() ds.update(to_update) @@ -377,7 +384,7 @@ def to_netcdf(ds, *args, **kwargs): def _get_fname_netCDF4(store): """Try to get the file name from the NetCDF4DataStore store""" - return getattr(store, '_filename', None) + return getattr(store, "_filename", None) def _get_fname_scipy(store): @@ -420,11 +427,12 @@ def connect(self, func): self._connections.append(func) def emit(self, *args, **kwargs): - if (not getattr(self.owner, 'block_signals', False) and - not getattr(self.instance, 'block_signals', False)): - logger.debug('Emitting signal %s', self.name) + if not getattr(self.owner, "block_signals", False) and not getattr( + self.instance, "block_signals", False + ): + logger.debug("Emitting signal %s", self.name) for func in self._connections[:]: - logger.debug('Calling %s', func) + logger.debug("Calling %s", func) func(*args, **kwargs) def disconnect(self, func=None): @@ -451,7 +459,7 @@ def __get__(self, instance, owner): get_fname_funcs = [_get_fname_netCDF4, _get_fname_scipy, _get_fname_nio] -@docstrings.get_sections(base='get_filename_ds') +@docstrings.get_sections(base="get_filename_ds") @docstrings.dedent def get_filename_ds(ds, dump=True, paths=None, **kwargs): """ @@ -502,12 +510,12 @@ def dump_nc(): # make sure that the data store is not closed by providing a # write argument if xr_version < (0, 11): - kwargs.setdefault('writer', xarray_api.ArrayWriter()) + kwargs.setdefault("writer", xarray_api.ArrayWriter()) store = to_netcdf(ds, fname, **kwargs) else: # `writer` parameter was removed by # https://github.com/pydata/xarray/pull/2261 - kwargs.setdefault('multifile', True) + kwargs.setdefault("multifile", True) store = to_netcdf(ds, fname, **kwargs)[1] store_mod = store.__module__ store_cls = store.__class__.__name__ @@ -516,39 +524,7 @@ def dump_nc(): def tmp_it(): while True: - yield NamedTemporaryFile(suffix='.nc').name - - def _legacy_get_filename_ds(ds): - # try to get the filename from the data store of the obj - # - # Outdated possibility since the backend plugin methodology of - # xarray 0.18 - if store_mod is not None: - store = ds._file_obj - # try several engines - if hasattr(store, 'file_objs'): - fname = [] - store_mod = [] - store_cls = [] - for obj in store.file_objs: # mfdataset - _fname = None - for func in get_fname_funcs: - if _fname is None: - _fname = func(obj) - if _fname is not None: - fname.append(_fname) - store_mod.append(obj.__module__) - store_cls.append(obj.__class__.__name__) - fname = tuple(fname) - store_mod = tuple(store_mod) - store_cls = tuple(store_cls) - else: - for func in get_fname_funcs: - fname = func(store) - if fname is not None: - break - - return fname, store_mod, store_cls + yield NamedTemporaryFile(suffix=".nc").name fname = None if paths is True or (dump and paths is None): @@ -559,14 +535,11 @@ def _legacy_get_filename_ds(ds): else: paths = iter(paths) store_mod, store_cls = ds.psy.data_store - if xr_plugins is None: - fname, store_mod, store_cls = _legacy_get_filename_ds(ds) - elif "source" in ds.encoding: + if "source" in ds.encoding: fname = ds.encoding["source"] store_mod = None store_cls = None - # check if paths is provided and if yes, save the file if fname is None and paths is not None: fname = next(paths, None) @@ -592,9 +565,9 @@ def logger(self): try: return self._logger except AttributeError: - name = '%s.%s' % (self.__module__, self.__class__.__name__) + name = "%s.%s" % (self.__module__, self.__class__.__name__) self._logger = logging.getLogger(name) - self.logger.debug('Initializing...') + self.logger.debug("Initializing...") return self._logger @logger.setter @@ -603,10 +576,10 @@ def logger(self, value): def __init__(self, ds=None, x=None, y=None, z=None, t=None): self.ds = ds - self.x = rcParams['decoder.x'].copy() if x is None else set(x) - self.y = rcParams['decoder.y'].copy() if y is None else set(y) - self.z = rcParams['decoder.z'].copy() if z is None else set(z) - self.t = rcParams['decoder.t'].copy() if t is None else set(t) + self.x = rcParams["decoder.x"].copy() if x is None else set(x) + self.y = rcParams["decoder.y"].copy() if y is None else set(y) + self.z = rcParams["decoder.z"].copy() if z is None else set(z) + self.t = rcParams["decoder.t"].copy() if t is None else set(t) @staticmethod def register_decoder(decoder_class, pos=0): @@ -624,8 +597,9 @@ def register_decoder(decoder_class, pos=0): CFDecoder._registry.insert(pos, decoder_class) @classmethod - @docstrings.get_sections(base='CFDecoder.can_decode', sections=['Parameters', - 'Returns']) + @docstrings.get_sections( + base="CFDecoder.can_decode", sections=["Parameters", "Returns"] + ) def can_decode(cls, ds, var): """ Class method to determine whether the object can be decoded by this @@ -673,8 +647,9 @@ def get_decoder(cls, ds, var, *args, **kwargs): return CFDecoder(ds, *args, **kwargs) @staticmethod - @docstrings.get_sections(base='CFDecoder.decode_coords', sections=[ - 'Parameters', 'Returns']) + @docstrings.get_sections( + base="CFDecoder.decode_coords", sections=["Parameters", "Returns"] + ) def decode_coords(ds, gridfile=None): """ Sets the coordinates and bounds in a dataset @@ -696,14 +671,16 @@ def decode_coords(ds, gridfile=None): ------- xarray.Dataset `ds` with additional coordinates""" + def add_attrs(obj): - if 'coordinates' in obj.attrs: - extra_coords.update(obj.attrs['coordinates'].split()) - obj.encoding['coordinates'] = obj.attrs.pop('coordinates') - if 'grid_mapping' in obj.attrs: - extra_coords.add(obj.attrs['grid_mapping']) - if 'bounds' in obj.attrs: - extra_coords.add(obj.attrs['bounds']) + if "coordinates" in obj.attrs: + extra_coords.update(obj.attrs["coordinates"].split()) + obj.encoding["coordinates"] = obj.attrs.pop("coordinates") + if "grid_mapping" in obj.attrs: + extra_coords.add(obj.attrs["grid_mapping"]) + if "bounds" in obj.attrs: + extra_coords.add(obj.attrs["bounds"]) + if gridfile is not None and not isinstance(gridfile, xr.Dataset): gridfile = open_dataset(gridfile) extra_coords = set(ds.coords) @@ -711,23 +688,30 @@ def add_attrs(obj): add_attrs(v) add_attrs(ds) if gridfile is not None: - ds.update({k: v for k, v in six.iteritems(gridfile.variables) - if k in extra_coords}) + ds.update( + { + k: v + for k, v in six.iteritems(gridfile.variables) + if k in extra_coords + } + ) if xr_version < (0, 11): - ds.set_coords(extra_coords.intersection(ds.variables), - inplace=True) + ds.set_coords( + extra_coords.intersection(ds.variables), inplace=True + ) else: ds._coord_names.update(extra_coords.intersection(ds.variables)) return ds - @docstrings.get_sections(base='CFDecoder.is_unstructured', sections=[ - 'Parameters', 'Returns']) - - @docstrings.get_sections(base= - 'CFDecoder.get_cell_node_coord', - sections=['Parameters', 'Returns']) + @docstrings.get_sections( + base="CFDecoder.is_unstructured", sections=["Parameters", "Returns"] + ) + @docstrings.get_sections( + base="CFDecoder.get_cell_node_coord", + sections=["Parameters", "Returns"], + ) @dedent - def get_cell_node_coord(self, var, coords=None, axis='x', nans=None): + def get_cell_node_coord(self, var, coords=None, axis="x", nans=None): """ Checks whether the bounds in the variable attribute are triangular @@ -751,65 +735,82 @@ def get_cell_node_coord(self, var, coords=None, axis='x', nans=None): if coords is None: coords = self.ds.coords axis = axis.lower() - get_coord = self.get_x if axis == 'x' else self.get_y + get_coord = self.get_x if axis == "x" else self.get_y coord = get_coord(var, coords=coords) if coord is not None: - bounds = self._get_coord_cell_node_coord(coord, coords, nans, - var=var) + bounds = self._get_coord_cell_node_coord( + coord, coords, nans, var=var + ) if bounds is None: bounds = self.get_plotbounds(coord) if bounds.ndim == 1: dim0 = coord.dims[-1] bounds = xr.DataArray( np.dstack([bounds[:-1], bounds[1:]])[0], - dims=(dim0, '_bnds'), attrs=coord.attrs.copy(), - name=coord.name + '_bnds') + dims=(dim0, "_bnds"), + attrs=coord.attrs.copy(), + name=coord.name + "_bnds", + ) elif bounds.ndim == 2: warn("2D bounds are not yet sufficiently tested!") bounds = xr.DataArray( - np.dstack([bounds[1:, 1:].ravel(), - bounds[1:, :-1].ravel(), - bounds[:-1, :-1].ravel(), - bounds[:-1, 1:].ravel()])[0], - dims=(''.join(var.dims[-2:]), '_bnds'), + np.dstack( + [ + bounds[1:, 1:].ravel(), + bounds[1:, :-1].ravel(), + bounds[:-1, :-1].ravel(), + bounds[:-1, 1:].ravel(), + ] + )[0], + dims=("".join(var.dims[-2:]), "_bnds"), attrs=coord.attrs.copy(), - name=coord.name + '_bnds') + name=coord.name + "_bnds", + ) else: raise NotImplementedError( - "More than 2D-bounds are not supported") + "More than 2D-bounds are not supported" + ) if bounds is not None and bounds.shape[-1] == 2: # normal CF-Conventions for rectangular grids arr = bounds.values - if axis == 'y': - stacked = np.c_[arr[..., :1], arr[..., :1], - arr[..., 1:], arr[..., 1:]] + if axis == "y": + stacked = np.c_[ + arr[..., :1], arr[..., :1], arr[..., 1:], arr[..., 1:] + ] if bounds.ndim == 2: stacked = np.repeat( stacked.reshape((-1, 4)), - len(self.get_x(var, coords)), axis=0) + len(self.get_x(var, coords)), + axis=0, + ) else: stacked = stacked.reshape((-1, 4)) else: stacked = np.c_[arr, arr[..., ::-1]] if bounds.ndim == 2: stacked = np.tile( - stacked, (len(self.get_y(var, coords)), 1)) + stacked, (len(self.get_y(var, coords)), 1) + ) else: stacked = stacked.reshape((-1, 4)) bounds = xr.DataArray( stacked, - dims=('cell', bounds.dims[1]), name=bounds.name, - attrs=bounds.attrs) + dims=("cell", bounds.dims[1]), + name=bounds.name, + attrs=bounds.attrs, + ) return bounds return None - docstrings.delete_params('CFDecoder.get_cell_node_coord.parameters', - 'var', 'axis') + docstrings.delete_params( + "CFDecoder.get_cell_node_coord.parameters", "var", "axis" + ) @docstrings.dedent - def _get_coord_cell_node_coord(self, coord, coords=None, nans=None, - var=None): + def _get_coord_cell_node_coord( + self, coord, coords=None, nans=None, var=None + ): """ Get the boundaries of an unstructed coordinate @@ -823,39 +824,47 @@ def _get_coord_cell_node_coord(self, coord, coords=None, nans=None, ------- %(CFDecoder.get_cell_node_coord.returns)s """ - bounds = coord.attrs.get('bounds') + bounds = coord.attrs.get("bounds") if bounds is not None: bounds = self.ds.coords.get(bounds) if bounds is not None: if coords is not None: - bounds = bounds.sel(**{ - key: coords[key] - for key in set(coords).intersection(bounds.dims)}) + bounds = bounds.sel( + **{ + key: coords[key] + for key in set(coords).intersection(bounds.dims) + } + ) if nans is not None and var is None: raise ValueError("Need the variable to deal with NaN!") elif nans is None: pass - elif nans == 'skip': + elif nans == "skip": dims = [dim for dim in set(var.dims) - set(bounds.dims)] mask = var.notnull().all(list(dims)) if dims else var.notnull() try: bounds = bounds[mask.values] except IndexError: # 3D bounds bounds = bounds.where(mask) - elif nans == 'only': + elif nans == "only": dims = [dim for dim in set(var.dims) - set(bounds.dims)] mask = var.isnull().all(list(dims)) if dims else var.isnull() bounds = bounds[mask.values] else: raise ValueError( "`nans` must be either None, 'skip', or 'only'! " - "Not {0}!".format(str(nans))) + "Not {0}!".format(str(nans)) + ) return bounds - @docstrings.get_sections(base='CFDecoder._check_unstructured_bounds', sections=[ - 'Parameters', 'Returns']) + @docstrings.get_sections( + base="CFDecoder._check_unstructured_bounds", + sections=["Parameters", "Returns"], + ) @docstrings.dedent - def _check_unstructured_bounds(self, var, coords=None, axis='x', nans=None): + def _check_unstructured_bounds( + self, var, coords=None, axis="x", nans=None + ): """ Checks whether the bounds in the variable attribute are triangular @@ -893,12 +902,16 @@ def is_unstructured(self, var): ----- Currently this is the same as :meth:`is_unstructured` method, but may change in the future to support hexagonal grids""" - if str(var.attrs.get('grid_type')) == 'unstructured': + if str(var.attrs.get("grid_type")) == "unstructured": return True xcoord = self.get_x(var) if xcoord is not None: bounds = self._get_coord_cell_node_coord(xcoord) - if bounds is not None and bounds.ndim == 2 and bounds.shape[-1] > 2: + if ( + bounds is not None + and bounds.ndim == 2 + and bounds.shape[-1] > 2 + ): return True @docstrings.dedent @@ -973,39 +986,48 @@ def get_coord(cname, raise_error=True): idims = var.psy.idims except AttributeError: # got xarray.Variable idims = {} - return ret.isel(**{d: sl for d, sl in idims.items() - if d in ret.dims}) + return ret.isel( + **{d: sl for d, sl in idims.items() if d in ret.dims} + ) axis = axis.lower() - if axis not in list('xyzt'): - raise ValueError("Axis must be one of X, Y, Z, T, not {0}".format( - axis)) + if axis not in list("xyzt"): + raise ValueError( + "Axis must be one of X, Y, Z, T, not {0}".format(axis) + ) # we first check for the dimensions and then for the coordinates # attribute coords = coords or self.ds.coords - coord_names = var.attrs.get('coordinates', var.encoding.get( - 'coordinates', '')).split() + coord_names = var.attrs.get( + "coordinates", var.encoding.get("coordinates", "") + ).split() if not coord_names: return ret = [] matched = [] - for coord in map(lambda dim: coords[dim], filter( - lambda dim: dim in coords, chain( - coord_names, var.dims))): + for coord in map( + lambda dim: coords[dim], + filter(lambda dim: dim in coords, chain(coord_names, var.dims)), + ): # check for the axis attribute or whether the coordinate is in the # list of possible coordinate names if coord.name not in (c.name for c in ret): if coord.name in getattr(self, axis): matched.append(coord) - elif coord.attrs.get('axis', '').lower() == axis: + elif coord.attrs.get("axis", "").lower() == axis: ret.append(coord) if matched: if len(set([c.name for c in matched])) > 1: - warn("Found multiple matches for %s coordinate in the " - "coordinates: %s. I use %s" % ( - axis, ', '.join([c.name for c in matched]), - matched[0].name), - PsyPlotRuntimeWarning) + warn( + "Found multiple matches for %s coordinate in the " + "coordinates: %s. I use %s" + % ( + axis, + ", ".join([c.name for c in matched]), + matched[0].name, + ), + PsyPlotRuntimeWarning, + ) return matched[0] elif ret: return None if len(ret) > 1 else ret[0] @@ -1017,29 +1039,37 @@ def get_coord(cname, raise_error=True): # for latitude and longitude. This is not very nice, hence it is # better to specify the :attr:`x` and :attr:`y` attribute tnames = self.t.intersection(coord_names) - if axis == 'x': - for cname in filter(lambda cname: re.search('lon', cname), - coord_names): + if axis == "x": + for cname in filter( + lambda cname: re.search("lon", cname), coord_names + ): return get_coord(cname) return get_coord(coord_names[-1], raise_error=False) - elif axis == 'y' and len(coord_names) >= 2: - for cname in filter(lambda cname: re.search('lat', cname), - coord_names): + elif axis == "y" and len(coord_names) >= 2: + for cname in filter( + lambda cname: re.search("lat", cname), coord_names + ): return get_coord(cname) return get_coord(coord_names[-2], raise_error=False) - elif (axis == 'z' and len(coord_names) >= 3 and - coord_names[-3] not in tnames): + elif ( + axis == "z" + and len(coord_names) >= 3 + and coord_names[-3] not in tnames + ): return get_coord(coord_names[-3], raise_error=False) - elif axis == 't' and tnames: + elif axis == "t" and tnames: tname = next(iter(tnames)) if len(tnames) > 1: - warn("Found multiple matches for time coordinate in the " - "coordinates: %s. I use %s" % (', '.join(tnames), tname), - PsyPlotRuntimeWarning) + warn( + "Found multiple matches for time coordinate in the " + "coordinates: %s. I use %s" % (", ".join(tnames), tname), + PsyPlotRuntimeWarning, + ) return get_coord(tname, raise_error=False) - @docstrings.get_sections(base="CFDecoder.get_x", sections=[ - 'Parameters', 'Returns']) + @docstrings.get_sections( + base="CFDecoder.get_x", sections=["Parameters", "Returns"] + ) @dedent def get_x(self, var, coords=None): """ @@ -1064,7 +1094,7 @@ def get_x(self, var, coords=None): xarray.Coordinate or None The y-coordinate or None if it could be found""" coords = coords or self.ds.coords - coord = self.get_variable_by_axis(var, 'x', coords) + coord = self.get_variable_by_axis(var, "x", coords) if coord is not None: return coord return coords.get(self.get_xname(var)) @@ -1092,22 +1122,25 @@ def get_xname(self, var, coords=None): -------- get_x""" if coords is not None: - coord = self.get_variable_by_axis(var, 'x', coords) + coord = self.get_variable_by_axis(var, "x", coords) if coord is not None and coord.name in var.dims: return coord.name dimlist = list(self.x.intersection(var.dims)) if dimlist: if len(dimlist) > 1: - warn("Found multiple matches for x coordinate in the variable:" - "%s. I use %s" % (', '.join(dimlist), dimlist[0]), - PsyPlotRuntimeWarning) + warn( + "Found multiple matches for x coordinate in the variable:" + "%s. I use %s" % (", ".join(dimlist), dimlist[0]), + PsyPlotRuntimeWarning, + ) return dimlist[0] # otherwise we return the coordinate in the last position if var.dims: return var.dims[-1] - @docstrings.get_sections(base="CFDecoder.get_y", sections=[ - 'Parameters', 'Returns']) + @docstrings.get_sections( + base="CFDecoder.get_y", sections=["Parameters", "Returns"] + ) @dedent def get_y(self, var, coords=None): """ @@ -1133,7 +1166,7 @@ def get_y(self, var, coords=None): xarray.Coordinate or None The y-coordinate or None if it could be found""" coords = coords or self.ds.coords - coord = self.get_variable_by_axis(var, 'y', coords) + coord = self.get_variable_by_axis(var, "y", coords) if coord is not None: return coord return coords.get(self.get_yname(var)) @@ -1161,15 +1194,17 @@ def get_yname(self, var, coords=None): -------- get_y""" if coords is not None: - coord = self.get_variable_by_axis(var, 'y', coords) + coord = self.get_variable_by_axis(var, "y", coords) if coord is not None and coord.name in var.dims: return coord.name dimlist = list(self.y.intersection(var.dims)) if dimlist: if len(dimlist) > 1: - warn("Found multiple matches for y coordinate in the variable:" - "%s. I use %s" % (', '.join(dimlist), dimlist[0]), - PsyPlotRuntimeWarning) + warn( + "Found multiple matches for y coordinate in the variable:" + "%s. I use %s" % (", ".join(dimlist), dimlist[0]), + PsyPlotRuntimeWarning, + ) return dimlist[0] # otherwise we return the coordinate in the last or second last # position @@ -1178,8 +1213,9 @@ def get_yname(self, var, coords=None): return var.dims[-1] return var.dims[-2 if var.ndim > 1 else -1] - @docstrings.get_sections(base="CFDecoder.get_z", sections=[ - 'Parameters', 'Returns']) + @docstrings.get_sections( + base="CFDecoder.get_z", sections=["Parameters", "Returns"] + ) @dedent def get_z(self, var, coords=None): """ @@ -1205,7 +1241,7 @@ def get_z(self, var, coords=None): xarray.Coordinate or None The z-coordinate or None if no z coordinate could be found""" coords = coords or self.ds.coords - coord = self.get_variable_by_axis(var, 'z', coords) + coord = self.get_variable_by_axis(var, "z", coords) if coord is not None: return coord zname = self.get_zname(var) @@ -1237,28 +1273,34 @@ def get_zname(self, var, coords=None): -------- get_z""" if coords is not None: - coord = self.get_variable_by_axis(var, 'z', coords) + coord = self.get_variable_by_axis(var, "z", coords) if coord is not None and coord.name in var.dims: return coord.name dimlist = list(self.z.intersection(var.dims)) if dimlist: if len(dimlist) > 1: - warn("Found multiple matches for z coordinate in the variable:" - "%s. I use %s" % (', '.join(dimlist), dimlist[0]), - PsyPlotRuntimeWarning) + warn( + "Found multiple matches for z coordinate in the variable:" + "%s. I use %s" % (", ".join(dimlist), dimlist[0]), + PsyPlotRuntimeWarning, + ) return dimlist[0] # otherwise we return the coordinate in the third last position if var.dims: is_unstructured = self.is_unstructured(var) icheck = -2 if is_unstructured else -3 - min_dim = abs(icheck) if 'variable' not in var.dims else abs(icheck-1) + min_dim = ( + abs(icheck) if "variable" not in var.dims else abs(icheck - 1) + ) if var.ndim >= min_dim and var.dims[icheck] != self.get_tname( - var, coords): + var, coords + ): return var.dims[icheck] return None - @docstrings.get_sections(base="CFDecoder.get_t", sections=[ - 'Parameters', 'Returns']) + @docstrings.get_sections( + base="CFDecoder.get_t", sections=["Parameters", "Returns"] + ) @dedent def get_t(self, var, coords=None): """ @@ -1283,16 +1325,18 @@ def get_t(self, var, coords=None): xarray.Coordinate or None The time coordinate or None if no time coordinate could be found""" coords = coords or self.ds.coords - coord = self.get_variable_by_axis(var, 't', coords) + coord = self.get_variable_by_axis(var, "t", coords) if coord is not None: return coord dimlist = list(self.t.intersection(var.dims).intersection(coords)) if dimlist: if len(dimlist) > 1: - warn("Found multiple matches for time coordinate in the " - "variable: %s. I use %s" % ( - ', '.join(dimlist), dimlist[0]), - PsyPlotRuntimeWarning) + warn( + "Found multiple matches for time coordinate in the " + "variable: %s. I use %s" + % (", ".join(dimlist), dimlist[0]), + PsyPlotRuntimeWarning, + ) return coords[dimlist[0]] tname = self.get_tname(var) if tname is not None: @@ -1321,15 +1365,17 @@ def get_tname(self, var, coords=None): -------- get_t""" if coords is not None: - coord = self.get_variable_by_axis(var, 't', coords) + coord = self.get_variable_by_axis(var, "t", coords) if coord is not None and coord.name in var.dims: return coord.name dimlist = list(self.t.intersection(var.dims)) if dimlist: if len(dimlist) > 1: - warn("Found multiple matches for t coordinate in the variable:" - "%s. I use %s" % (', '.join(dimlist), dimlist[0]), - PsyPlotRuntimeWarning) + warn( + "Found multiple matches for t coordinate in the variable:" + "%s. I use %s" % (", ".join(dimlist), dimlist[0]), + PsyPlotRuntimeWarning, + ) return dimlist[0] # otherwise we return None return None @@ -1363,14 +1409,19 @@ def get_idims(self, arr, coords=None): coords = arr.coords else: coords = { - label: coord for label, coord in six.iteritems(arr.coords) - if label in coords} + label: coord + for label, coord in six.iteritems(arr.coords) + if label in coords + } ret = self.get_coord_idims(coords) # handle the coordinates that are not in the dataset missing = set(arr.dims).difference(ret) if missing: - warn('Could not get slices for the following dimensions: %r' % ( - missing, ), PsyPlotRuntimeWarning) + warn( + "Could not get slices for the following dimensions: %r" + % (missing,), + PsyPlotRuntimeWarning, + ) return ret def get_coord_idims(self, coords): @@ -1393,11 +1444,13 @@ def get_coord_idims(self, coords): ret = dict( (label, get_index_from_coord(coord, self.ds.indexes[label])) for label, coord in six.iteritems(coords) - if label in self.ds.indexes) + if label in self.ds.indexes + ) return ret - @docstrings.get_sections(base='CFDecoder.get_plotbounds', sections=[ - 'Parameters', 'Returns']) + @docstrings.get_sections( + base="CFDecoder.get_plotbounds", sections=["Parameters", "Returns"] + ) @dedent def get_plotbounds(self, coord, kind=None, ignore_shape=False): """ @@ -1426,8 +1479,8 @@ def get_plotbounds(self, coord, kind=None, ignore_shape=False): additional array (i.e. if `coord` has shape (4, ), `bounds` will have shape (5, ) and if `coord` has shape (4, 5), `bounds` will have shape (5, 6)""" - if 'bounds' in coord.attrs: - bounds = self.ds.coords[coord.attrs['bounds']] + if "bounds" in coord.attrs: + bounds = self.ds.coords[coord.attrs["bounds"]] if ignore_shape: return bounds.values.ravel() if not bounds.shape[:-1] == coord.shape: @@ -1435,8 +1488,10 @@ def get_plotbounds(self, coord, kind=None, ignore_shape=False): try: return self._get_plotbounds_from_cf(coord, bounds) except ValueError as e: - warn((e.message if six.PY2 else str(e)) + - " Bounds are calculated automatically!") + warn( + (e.message if six.PY2 else str(e)) + + " Bounds are calculated automatically!" + ) return self._infer_interval_breaks(coord, kind=kind) @staticmethod @@ -1464,22 +1519,34 @@ def _get_plotbounds_from_cf(coord, bounds): raise ValueError( "Cannot interprete bounds with shape {0} for {1} " "coordinate with shape {2}.".format( - bounds.shape, coord.name, coord.shape)) - ret = np.zeros(tuple(map(lambda i: i+1, coord.shape))) + bounds.shape, coord.name, coord.shape + ) + ) + ret = np.zeros(tuple(map(lambda i: i + 1, coord.shape))) ret[tuple(map(slice, coord.shape))] = bounds[..., 0] last_slices = tuple(slice(-1, None) for _ in coord.shape) ret[last_slices] = bounds[tuple(chain(last_slices, [1]))] return ret - docstrings.keep_params('CFDecoder._check_unstructured_bounds.parameters', - 'nans') + docstrings.keep_params( + "CFDecoder._check_unstructured_bounds.parameters", "nans" + ) - @docstrings.get_sections(base='CFDecoder.get_triangles', sections=[ - 'Parameters', 'Returns']) + @docstrings.get_sections( + base="CFDecoder.get_triangles", sections=["Parameters", "Returns"] + ) @docstrings.dedent - def get_triangles(self, var, coords=None, convert_radian=True, - copy=False, src_crs=None, target_crs=None, - nans=None, stacklevel=1): + def get_triangles( + self, + var, + coords=None, + convert_radian=True, + copy=False, + src_crs=None, + target_crs=None, + nans=None, + stacklevel=1, + ): """ Get the triangles for the variable @@ -1512,33 +1579,38 @@ def get_triangles(self, var, coords=None, convert_radian=True, ------ ValueError If `src_crs` is not None and `target_crs` is None""" - warn("The 'get_triangles' method is depreceated and will be removed " - "soon! Use the 'get_cell_node_coord' method!", - DeprecationWarning, stacklevel=stacklevel) + warn( + "The 'get_triangles' method is depreceated and will be removed " + "soon! Use the 'get_cell_node_coord' method!", + DeprecationWarning, + stacklevel=stacklevel, + ) from matplotlib.tri import Triangulation def get_vertices(axis): - bounds = self._check_unstructured_bounds(var, coords=coords, - axis=axis, nans=nans)[1] + bounds = self._check_unstructured_bounds( + var, coords=coords, axis=axis, nans=nans + )[1] if coords is not None: bounds = coords.get(bounds.name, bounds) vertices = bounds.values.ravel() if convert_radian: - coord = getattr(self, 'get_' + axis)(var) - if coord.attrs.get('units') == 'radian': - vertices = vertices * 180. / np.pi + coord = getattr(self, "get_" + axis)(var) + if coord.attrs.get("units") == "radian": + vertices = vertices * 180.0 / np.pi return vertices if not copy else vertices.copy() if coords is None: coords = self.ds.coords - xvert = get_vertices('x') - yvert = get_vertices('y') + xvert = get_vertices("x") + yvert = get_vertices("y") if src_crs is not None and src_crs != target_crs: if target_crs is None: raise ValueError( "Found %s for the source crs but got None for the " - "target_crs!" % (src_crs, )) + "target_crs!" % (src_crs,) + ) arr = target_crs.transform_points(src_crs, xvert, yvert) xvert = arr[:, 0] yvert = arr[:, 1] @@ -1546,7 +1618,8 @@ def get_vertices(axis): return Triangulation(xvert, yvert, triangles) docstrings.delete_params( - 'CFDecoder.get_plotbounds.parameters', 'ignore_shape') + "CFDecoder.get_plotbounds.parameters", "ignore_shape" + ) @staticmethod def _infer_interval_breaks(coord, kind=None): @@ -1568,17 +1641,19 @@ def _infer_interval_breaks(coord, kind=None): return _infer_interval_breaks(coord) elif coord.ndim == 2: from scipy.interpolate import interp2d - kind = kind or rcParams['decoder.interp_kind'] + + kind = kind or rcParams["decoder.interp_kind"] y, x = map(np.arange, coord.shape) new_x, new_y = map(_infer_interval_breaks, [x, y]) coord = np.asarray(coord) return interp2d(x, y, coord, kind=kind, copy=False)(new_x, new_y) @classmethod - @docstrings.get_sections(base='CFDecoder._decode_ds') + @docstrings.get_sections(base="CFDecoder._decode_ds") @docstrings.dedent - def _decode_ds(cls, ds, gridfile=None, decode_coords=True, - decode_times=True): + def _decode_ds( + cls, ds, gridfile=None, decode_coords=True, decode_times=True + ): """ Static method to decode coordinates and time informations @@ -1601,11 +1676,15 @@ def _decode_ds(cls, ds, gridfile=None, decode_coords=True, for k, v in six.iteritems(ds.variables): # check for absolute time units and make sure the data is not # already decoded via dtype check - if v.attrs.get('units', '') == 'day as %Y%m%d.%f' and ( - np.issubdtype(v.dtype, np.float64)): + if v.attrs.get("units", "") == "day as %Y%m%d.%f" and ( + np.issubdtype(v.dtype, np.float64) + ): decoded = xr.Variable( - v.dims, AbsoluteTimeDecoder(v), attrs=v.attrs, - encoding=v.encoding) + v.dims, + AbsoluteTimeDecoder(v), + attrs=v.attrs, + encoding=v.encoding, + ) ds.update({k: decoded}) return ds @@ -1642,13 +1721,16 @@ def correct_dims(self, var, dims={}, remove=True): remove: bool If True, dimensions in `dims` that are not in the dimensions of `var` are removed""" - method_mapping = {'x': self.get_xname, - 'z': self.get_zname, 't': self.get_tname} + method_mapping = { + "x": self.get_xname, + "z": self.get_zname, + "t": self.get_tname, + } dims = dict(dims) if self.is_unstructured(var): # we assume a one-dimensional grid - method_mapping['y'] = self.get_xname + method_mapping["y"] = self.get_xname else: - method_mapping['y'] = self.get_yname + method_mapping["y"] = self.get_yname for key in six.iterkeys(dims.copy()): if key in method_mapping and key not in var.dims: dim_name = method_mapping[key](var, self.ds.coords) @@ -1664,7 +1746,9 @@ def correct_dims(self, var, dims={}, remove=True): dims.pop(key) self.logger.debug( "Could not find a dimensions matching %s in variable %s!", - key, var) + key, + var, + ) return dims def standardize_dims(self, var, dims={}): @@ -1682,10 +1766,12 @@ def standardize_dims(self, var, dims={}): dict The dictionary with replaced dimensions""" dims = dict(dims) - name_map = {self.get_xname(var, self.ds.coords): 'x', - self.get_yname(var, self.ds.coords): 'y', - self.get_zname(var, self.ds.coords): 'z', - self.get_tname(var, self.ds.coords): 't'} + name_map = { + self.get_xname(var, self.ds.coords): "x", + self.get_yname(var, self.ds.coords): "y", + self.get_zname(var, self.ds.coords): "z", + self.get_tname(var, self.ds.coords): "t", + } dims = dict(dims) for dim in set(dims).intersection(name_map): dims[name_map[dim]] = dims.pop(dim) @@ -1720,7 +1806,7 @@ def get_mesh(self, var, coords=None): ------- xarray.Coordinate The mesh coordinate""" - mesh = var.attrs.get('mesh') + mesh = var.attrs.get("mesh") if mesh is None: return None if coords is None: @@ -1746,8 +1832,17 @@ def can_decode(cls, ds, var): return cls(ds).get_mesh(var) is not None @docstrings.dedent - def get_triangles(self, var, coords=None, convert_radian=True, copy=False, - src_crs=None, target_crs=None, nans=None, stacklevel=1): + def get_triangles( + self, + var, + coords=None, + convert_radian=True, + copy=False, + src_crs=None, + target_crs=None, + nans=None, + stacklevel=1, + ): """ Get the of the given coordinate. @@ -1768,9 +1863,12 @@ def get_triangles(self, var, coords=None, convert_radian=True, copy=False, .. todo:: Implement the visualization for UGrid data shown on the edge of the triangles""" - warn("The 'get_triangles' method is depreceated and will be removed " - "soon! Use the 'get_cell_node_coord' method!", - DeprecationWarning, stacklevel=stacklevel) + warn( + "The 'get_triangles' method is depreceated and will be removed " + "soon! Use the 'get_cell_node_coord' method!", + DeprecationWarning, + stacklevel=stacklevel, + ) from matplotlib.tri import Triangulation if coords is None: @@ -1786,42 +1884,45 @@ def get_coord(coord): xvert, yvert = nodes xvert = xvert.values yvert = yvert.values - loc = var.attrs.get('location', 'face') - if loc == 'face': + loc = var.attrs.get("location", "face") + if loc == "face": triangles = get_coord( - mesh.attrs.get('face_node_connectivity', '')).values + mesh.attrs.get("face_node_connectivity", "") + ).values if triangles is None: raise ValueError( - "Could not find the connectivity information!") - elif loc == 'node': + "Could not find the connectivity information!" + ) + elif loc == "node": triangles = None else: raise ValueError( "Could not interprete location attribute (%s) of mesh " - "variable %s!" % (loc, mesh.name)) + "variable %s!" % (loc, mesh.name) + ) if convert_radian: for coord in nodes: - if coord.attrs.get('units') == 'radian': - coord = coord * 180. / np.pi + if coord.attrs.get("units") == "radian": + coord = coord * 180.0 / np.pi if src_crs is not None and src_crs != target_crs: if target_crs is None: raise ValueError( "Found %s for the source crs but got None for the " - "target_crs!" % (src_crs, )) + "target_crs!" % (src_crs,) + ) xvert = xvert[triangles].ravel() yvert = yvert[triangles].ravel() arr = target_crs.transform_points(src_crs, xvert, yvert) xvert = arr[:, 0] yvert = arr[:, 1] - if loc == 'face': - triangles = np.reshape(range(len(xvert)), (len(xvert) // 3, - 3)) + if loc == "face": + triangles = np.reshape(range(len(xvert)), (len(xvert) // 3, 3)) return Triangulation(xvert, yvert, triangles) @docstrings.dedent - def get_cell_node_coord(self, var, coords=None, axis='x', nans=None): + def get_cell_node_coord(self, var, coords=None, axis="x", nans=None): """ Checks whether the bounds in the variable attribute are triangular @@ -1839,51 +1940,68 @@ def get_cell_node_coord(self, var, coords=None, axis='x', nans=None): def get_coord(coord): coord = coords.get(coord, self.ds.coords.get(coord)) - return coord.isel(**{d: sl for d, sl in idims.items() - if d in coord.dims}) + return coord.isel( + **{d: sl for d, sl in idims.items() if d in coord.dims} + ) mesh = self.get_mesh(var, coords) if mesh is None: return nodes = self.get_nodes(mesh, coords) if not len(nodes): - raise ValueError("Could not find the nodes variables for the %s " - "coordinate!" % axis) - vert = nodes[0 if axis == 'x' else 1] + raise ValueError( + "Could not find the nodes variables for the %s " + "coordinate!" % axis + ) + vert = nodes[0 if axis == "x" else 1] if vert is None: - raise ValueError("Could not find the nodes variables for the %s " - "coordinate!" % axis) - loc = var.attrs.get('location', 'face') - if loc == 'node': + raise ValueError( + "Could not find the nodes variables for the %s " + "coordinate!" % axis + ) + loc = var.attrs.get("location", "face") + if loc == "node": # we assume a triangular grid and use matplotlibs triangulation from matplotlib.tri import Triangulation + xvert, yvert = nodes triangles = Triangulation(xvert, yvert) - if axis == 'x': + if axis == "x": bounds = triangles.x[triangles.triangles] else: bounds = triangles.y[triangles.triangles] - elif loc in ['edge', 'face']: + elif loc in ["edge", "face"]: connectivity = get_coord( - mesh.attrs.get('%s_node_connectivity' % loc, '')) + mesh.attrs.get("%s_node_connectivity" % loc, "") + ) if connectivity is None: raise ValueError( - "Could not find the connectivity information!") + "Could not find the connectivity information!" + ) connectivity = connectivity.values bounds = vert.values[ - np.where(np.isnan(connectivity), connectivity[:, :1], - connectivity).astype(int)] + np.where( + np.isnan(connectivity), connectivity[:, :1], connectivity + ).astype(int) + ] else: raise ValueError( "Could not interprete location attribute (%s) of mesh " - "variable %s!" % (loc, mesh.name)) - dim0 = '__face' if loc == 'node' else var.dims[-1] + "variable %s!" % (loc, mesh.name) + ) + dim0 = "__face" if loc == "node" else var.dims[-1] return xr.DataArray( bounds, - coords={key: val for key, val in coords.items() - if (dim0, ) == val.dims}, - dims=(dim0, '__bnds', ), - name=vert.name + '_bnds', attrs=vert.attrs.copy()) + coords={ + key: val for key, val in coords.items() if (dim0,) == val.dims + }, + dims=( + dim0, + "__bnds", + ), + name=vert.name + "_bnds", + attrs=vert.attrs.copy(), + ) @staticmethod @docstrings.dedent @@ -1900,28 +2018,36 @@ def decode_coords(ds, gridfile=None): %(CFDecoder.decode_coords.returns)s""" extra_coords = set(ds.coords) for var in six.itervalues(ds.variables): - if 'mesh' in var.attrs: - mesh = var.attrs['mesh'] + if "mesh" in var.attrs: + mesh = var.attrs["mesh"] if mesh not in extra_coords: extra_coords.add(mesh) try: mesh_var = ds.variables[mesh] except KeyError: - warn('Could not find mesh variable %s' % mesh) + warn("Could not find mesh variable %s" % mesh) continue - if 'node_coordinates' in mesh_var.attrs: + if "node_coordinates" in mesh_var.attrs: extra_coords.update( - mesh_var.attrs['node_coordinates'].split()) - if 'face_node_connectivity' in mesh_var.attrs: + mesh_var.attrs["node_coordinates"].split() + ) + if "face_node_connectivity" in mesh_var.attrs: extra_coords.add( - mesh_var.attrs['face_node_connectivity']) + mesh_var.attrs["face_node_connectivity"] + ) if gridfile is not None and not isinstance(gridfile, xr.Dataset): gridfile = open_dataset(gridfile) - ds.update({k: v for k, v in six.iteritems(gridfile.variables) - if k in extra_coords}) + ds.update( + { + k: v + for k, v in six.iteritems(gridfile.variables) + if k in extra_coords + } + ) if xr_version < (0, 11): - ds.set_coords(extra_coords.intersection(ds.variables), - inplace=True) + ds.set_coords( + extra_coords.intersection(ds.variables), inplace=True + ) else: ds._coord_names.update(extra_coords.intersection(ds.variables)) return ds @@ -1935,10 +2061,13 @@ def get_nodes(self, coord, coords): The mesh variable coords: dict The coordinates to use to get node coordinates""" + def get_coord(coord): return coords.get(coord, self.ds.coords.get(coord)) - return list(map(get_coord, - coord.attrs.get('node_coordinates', '').split()[:2])) + + return list( + map(get_coord, coord.attrs.get("node_coordinates", "").split()[:2]) + ) @docstrings.dedent def get_x(self, var, coords=None): @@ -1958,9 +2087,12 @@ def get_x(self, var, coords=None): ret = super(UGridDecoder, self).get_x(var, coords) # but if that doesn't work because we get the variable name in the # dimension of `var`, we use the means of the triangles - if ret is None or ret.name in var.dims or (hasattr(var, 'mesh') and - ret.name == var.mesh): - bounds = self.get_cell_node_coord(var, axis='x', coords=coords) + if ( + ret is None + or ret.name in var.dims + or (hasattr(var, "mesh") and ret.name == var.mesh) + ): + bounds = self.get_cell_node_coord(var, axis="x", coords=coords) if bounds is not None: centers = bounds.mean(axis=-1) x = self.get_nodes(self.get_mesh(var, coords), coords)[0] @@ -1990,9 +2122,12 @@ def get_y(self, var, coords=None): ret = super(UGridDecoder, self).get_y(var, coords) # but if that doesn't work because we get the variable name in the # dimension of `var`, we use the means of the triangles - if ret is None or ret.name in var.dims or (hasattr(var, 'mesh') and - ret.name == var.mesh): - bounds = self.get_cell_node_coord(var, axis='y', coords=coords) + if ( + ret is None + or ret.name in var.dims + or (hasattr(var, "mesh") and ret.name == var.mesh) + ): + bounds = self.get_cell_node_coord(var, axis="y", coords=coords) if bounds is not None: centers = bounds.mean(axis=-1) y = self.get_nodes(self.get_mesh(var, coords), coords)[1] @@ -2008,17 +2143,25 @@ def get_y(self, var, coords=None): # register the UGridDecoder CFDecoder.register_decoder(UGridDecoder) -docstrings.keep_params('CFDecoder.decode_coords.parameters', 'gridfile') -docstrings.get_sections(inspect.cleandoc( - xr.open_dataset.__doc__.split('\n', 1)[1]), - 'xarray.open_dataset') -docstrings.delete_params('xarray.open_dataset.parameters', 'engine') +docstrings.keep_params("CFDecoder.decode_coords.parameters", "gridfile") +docstrings.get_sections( + inspect.cleandoc(xr.open_dataset.__doc__.split("\n", 1)[1]), + "xarray.open_dataset", +) +docstrings.delete_params("xarray.open_dataset.parameters", "engine") -@docstrings.get_sections(base='open_dataset') +@docstrings.get_sections(base="open_dataset") @docstrings.dedent -def open_dataset(filename_or_obj, decode_cf=True, decode_times=True, - decode_coords=True, engine=None, gridfile=None, **kwargs): +def open_dataset( + filename_or_obj, + decode_cf=True, + decode_times=True, + decode_coords=True, + engine=None, + gridfile=None, + **kwargs, +): """ Open an instance of :class:`xarray.Dataset`. @@ -2043,41 +2186,59 @@ def open_dataset(filename_or_obj, decode_cf=True, decode_times=True, # use the absolute path name (is saver when saving the project) if isstring(filename_or_obj) and osp.exists(filename_or_obj): filename_or_obj = osp.abspath(filename_or_obj) - if engine == 'gdal': + if engine == "gdal": from psyplot.gdal_store import GdalStore + filename_or_obj = GdalStore(filename_or_obj) engine = None - ds = xr.open_dataset(filename_or_obj, decode_cf=decode_cf, - decode_coords=False, engine=engine, - decode_times=decode_times, **kwargs) + ds = xr.open_dataset( + filename_or_obj, + decode_cf=decode_cf, + decode_coords=False, + engine=engine, + decode_times=decode_times, + **kwargs, + ) if isstring(filename_or_obj): ds.psy.filename = filename_or_obj if decode_cf: ds = CFDecoder.decode_ds( - ds, decode_coords=decode_coords, decode_times=decode_times, - gridfile=gridfile) + ds, + decode_coords=decode_coords, + decode_times=decode_times, + gridfile=gridfile, + ) return ds docstrings.get_sections( - inspect.cleandoc(xr.open_mfdataset.__doc__.split('\n', 1)[1]), - 'xarray.open_mfdataset') -docstrings.delete_params('xarray.open_mfdataset.parameters', 'engine') -docstrings.keep_params('get_tdata.parameters', 't_format') + inspect.cleandoc(xr.open_mfdataset.__doc__.split("\n", 1)[1]), + "xarray.open_mfdataset", +) +docstrings.delete_params("xarray.open_mfdataset.parameters", "engine") +docstrings.keep_params("get_tdata.parameters", "t_format") -docstrings.params['xarray.open_mfdataset.parameters.no_engine'] = \ - docstrings.params['xarray.open_mfdataset.parameters.no_engine'].replace( - '**kwargs', '``**kwargs``').replace('"path/to/my/files/*.nc"', - '``"path/to/my/files/*.nc"``') +docstrings.params["xarray.open_mfdataset.parameters.no_engine"] = ( + docstrings.params["xarray.open_mfdataset.parameters.no_engine"] + .replace("**kwargs", "``**kwargs``") + .replace('"path/to/my/files/*.nc"', '``"path/to/my/files/*.nc"``') +) -docstrings.keep_params('open_dataset.parameters', 'engine') +docstrings.keep_params("open_dataset.parameters", "engine") @docstrings.dedent -def open_mfdataset(paths, decode_cf=True, decode_times=True, - decode_coords=True, engine=None, gridfile=None, - t_format=None, **kwargs): +def open_mfdataset( + paths, + decode_cf=True, + decode_times=True, + decode_coords=True, + engine=None, + gridfile=None, + t_format=None, + **kwargs, +): """ Open multiple files as a single dataset. @@ -2098,39 +2259,48 @@ def open_mfdataset(paths, decode_cf=True, decode_times=True, ------- xarray.Dataset The dataset that contains the variables from `filename_or_obj`""" - if t_format is not None or engine == 'gdal': + if t_format is not None or engine == "gdal": if isinstance(paths, six.string_types): paths = sorted(glob(paths)) if not paths: - raise IOError('no files to open') + raise IOError("no files to open") if t_format is not None: time, paths = get_tdata(t_format, paths) - kwargs['concat_dim'] = 'time' + kwargs["concat_dim"] = "time" if xr_version > (0, 11): - kwargs['combine'] = 'nested' + kwargs["combine"] = "nested" if all(map(isstring, paths)): filenames = list(paths) else: filenames = None - if engine == 'gdal': + if engine == "gdal": from psyplot.gdal_store import GdalStore + paths = list(map(GdalStore, paths)) engine = None if xr_version < (0, 18): - kwargs['lock'] = False + kwargs["lock"] = False ds = xr.open_mfdataset( - paths, decode_cf=decode_cf, decode_times=decode_times, engine=engine, - decode_coords=False, **kwargs) + paths, + decode_cf=decode_cf, + decode_times=decode_times, + engine=engine, + decode_coords=False, + **kwargs, + ) ds.psy.filename = filenames if decode_cf: - ds = CFDecoder.decode_ds(ds, gridfile=gridfile, - decode_coords=decode_coords, - decode_times=decode_times) - ds.psy._concat_dim = kwargs.get('concat_dim') - ds.psy._combine = kwargs.get('combine') + ds = CFDecoder.decode_ds( + ds, + gridfile=gridfile, + decode_coords=decode_coords, + decode_times=decode_times, + ) + ds.psy._concat_dim = kwargs.get("concat_dim") + ds.psy._combine = kwargs.get("combine") if t_format is not None: - ds['time'] = time + ds["time"] = time return ds @@ -2159,8 +2329,9 @@ def plotter(self, value): def plotter(self): self._plotter = None - no_auto_update = property(_no_auto_update_getter, - doc=_no_auto_update_getter.__doc__) + no_auto_update = property( + _no_auto_update_getter, doc=_no_auto_update_getter.__doc__ + ) @property def plot(self): @@ -2181,6 +2352,7 @@ def plot(self): psyplot.project.DataArrayPlotter: for the different plot methods""" if self._plot is None: import psyplot.project as psy + self._plot = psy.DataArrayPlotter(self) return self._plot @@ -2196,10 +2368,13 @@ def logger(self): try: return self._logger except AttributeError: - name = '%s.%s.%s' % (self.__module__, self.__class__.__name__, - self.arr_name) + name = "%s.%s.%s" % ( + self.__module__, + self.__class__.__name__, + self.arr_name, + ) self._logger = logging.getLogger(name) - self.logger.debug('Initializing...') + self.logger.debug("Initializing...") return self._logger @logger.setter @@ -2215,24 +2390,26 @@ def ax(self): def ax(self, value): if self.plotter is None: raise ValueError( - 'Cannot set the axes because the plotter attribute is None!') + "Cannot set the axes because the plotter attribute is None!" + ) self.plotter.ax = value block_signals = utils._temp_bool_prop( - 'block_signals', "Block the emitting of signals of this instance") + "block_signals", "Block the emitting of signals of this instance" + ) # ------------------------------------------------------------------------- # -------------------------------- SIGNALS -------------------------------- # ------------------------------------------------------------------------- #: :class:`Signal` to be emitted when the object has been updated - onupdate = Signal('_onupdate') + onupdate = Signal("_onupdate") _onupdate = None _plotter = None @property - @docstrings.get_docstring(base='InteractiveBase._njobs') + @docstrings.get_docstring(base="InteractiveBase._njobs") @dedent def _njobs(self): """ @@ -2264,9 +2441,9 @@ def arr_name(self, value): _no_auto_update = None - @docstrings.get_sections(base='InteractiveBase') + @docstrings.get_sections(base="InteractiveBase") @dedent - def __init__(self, plotter=None, arr_name='arr0', auto_update=None): + def __init__(self, plotter=None, arr_name="arr0", auto_update=None): """ Parameters ---------- @@ -2284,7 +2461,7 @@ def __init__(self, plotter=None, arr_name='arr0', auto_update=None): self.plotter = plotter self.arr_name = arr_name if auto_update is None: - auto_update = rcParams['lists.auto_update'] + auto_update = rcParams["lists.auto_update"] self.no_auto_update = not bool(auto_update) self.replot = False @@ -2294,10 +2471,11 @@ def _finish_all(self, queues): for i in range(n): queue.task_done() - @docstrings.get_sections(base='InteractiveBase._register_update') + @docstrings.get_sections(base="InteractiveBase._register_update") @dedent - def _register_update(self, replot=False, fmt={}, force=False, - todefault=False): + def _register_update( + self, replot=False, fmt={}, force=False, todefault=False + ): """ Register new formatoptions for updating @@ -2326,11 +2504,13 @@ def _register_update(self, replot=False, fmt={}, force=False, start_update""" self.replot = self.replot or replot if self.plotter is not None: - self.plotter._register_update(replot=self.replot, fmt=fmt, - force=force, todefault=todefault) + self.plotter._register_update( + replot=self.replot, fmt=fmt, force=force, todefault=todefault + ) - @docstrings.get_sections(base='InteractiveBase.start_update', - sections=['Parameters', 'Returns']) + @docstrings.get_sections( + base="InteractiveBase.start_update", sections=["Parameters", "Returns"] + ) @dedent def start_update(self, draw=None, queues=None): """ @@ -2368,13 +2548,22 @@ def start_update(self, draw=None, queues=None): if self.plotter is not None: return self.plotter.start_update(draw=draw, queues=queues) - docstrings.keep_params('InteractiveBase.start_update.parameters', 'draw') + docstrings.keep_params("InteractiveBase.start_update.parameters", "draw") - @docstrings.get_sections(base='InteractiveBase.update', - sections=['Parameters', 'Notes']) + @docstrings.get_sections( + base="InteractiveBase.update", sections=["Parameters", "Notes"] + ) @docstrings.dedent - def update(self, fmt={}, replot=False, draw=None, auto_update=False, - force=False, todefault=False, **kwargs): + def update( + self, + fmt={}, + replot=False, + draw=None, + auto_update=False, + force=False, + todefault=False, + **kwargs, + ): """ Update the coordinates and the plot @@ -2404,19 +2593,21 @@ def update(self, fmt={}, replot=False, draw=None, auto_update=False, fmt = dict(fmt) fmt.update(kwargs) - self._register_update(replot=replot, fmt=fmt, force=force, - todefault=todefault) + self._register_update( + replot=replot, fmt=fmt, force=force, todefault=todefault + ) if not self.no_auto_update or auto_update: self.start_update(draw=draw) def to_interactive_list(self): """Return a :class:`InteractiveList` that contains this object""" - raise NotImplementedError('Not implemented for the %s class' % ( - self.__class__.__name__, )) + raise NotImplementedError( + "Not implemented for the %s class" % (self.__class__.__name__,) + ) -@xr.register_dataarray_accessor('psy') +@xr.register_dataarray_accessor("psy") class InteractiveArray(InteractiveBase): """Interactive psyplot accessor for the data array @@ -2430,23 +2621,27 @@ class InteractiveArray(InteractiveBase): def base(self): """Base dataset this instance gets its data from""" if self._base is None: - if 'variable' in self.arr.dims: + if "variable" in self.arr.dims: + def to_dataset(i): ret = self.isel(variable=i).to_dataset( - name=self.arr.coords['variable'].values[i]) + name=self.arr.coords["variable"].values[i] + ) try: - return ret.drop_vars('variable') + return ret.drop_vars("variable") except ValueError: # 'variable' Variable not defined pass return ret + ds = to_dataset(0) - if len(self.arr.coords['variable']) > 1: - for i in range(1, len(self.arr.coords['variable'])): + if len(self.arr.coords["variable"]) > 1: + for i in range(1, len(self.arr.coords["variable"])): ds.update(ds.merge(to_dataset(i))) self._base = ds else: self._base = self.arr.to_dataset( - name=self.arr.name or self.arr_name) + name=self.arr.name or self.arr_name + ) self.onbasechange.emit() return self._base @@ -2497,7 +2692,7 @@ def _njobs(self): # -------------- SIGNALS -------------------------------------------------- #: :class:`Signal` to be emiited when the base of the object changes - onbasechange = Signal('_onbasechange') + onbasechange = Signal("_onbasechange") _onbasechange = None @docstrings.dedent @@ -2528,8 +2723,9 @@ def __init__(self, xarray_obj, *args, **kwargs): self._new_dims = {} self.method = None - def init_accessor(self, base=None, idims=None, decoder=None, - *args, **kwargs): + def init_accessor( + self, base=None, idims=None, decoder=None, *args, **kwargs + ): """ Initialize the accessor instance @@ -2557,8 +2753,12 @@ def init_accessor(self, base=None, idims=None, decoder=None, def iter_base_variables(self): """An iterator over the base variables in the :attr:`base` dataset""" if VARIABLELABEL in self.arr.coords: - return (self._get_base_var(name) for name in safe_list( - self.arr.coords[VARIABLELABEL].values.tolist())) + return ( + self._get_base_var(name) + for name in safe_list( + self.arr.coords[VARIABLELABEL].values.tolist() + ) + ) name = self.arr.name if name is None: return iter([self.arr._variable]) @@ -2575,21 +2775,33 @@ def base_variables(self): """A mapping from the variable name to the variablein the :attr:`base` dataset.""" if VARIABLELABEL in self.arr.coords: - return OrderedDict([ - (name, self._get_base_var(name)) for name in safe_list( - self.arr.coords[VARIABLELABEL].values.tolist())]) + return dict( + [ + (name, self._get_base_var(name)) + for name in safe_list( + self.arr.coords[VARIABLELABEL].values.tolist() + ) + ] + ) name = self.arr.name if name is None: return {name: self.arr._variable} else: return {self.arr.name: self.base.variables[self.arr.name]} - docstrings.keep_params('setup_coords.parameters', 'dims') + docstrings.keep_params("setup_coords.parameters", "dims") - @docstrings.get_sections(base='InteractiveArray._register_update') + @docstrings.get_sections(base="InteractiveArray._register_update") @docstrings.dedent - def _register_update(self, method='isel', replot=False, dims={}, fmt={}, - force=False, todefault=False): + def _register_update( + self, + method="isel", + replot=False, + dims={}, + fmt={}, + force=False, + todefault=False, + ): """ Register new dimensions and formatoptions for updating @@ -2610,22 +2822,27 @@ def _register_update(self, method='isel', replot=False, dims={}, fmt={}, if self._new_dims and self.method != method: raise ValueError( "New dimensions were already specified for with the %s method!" - " I can not choose a new method %s" % (self.method, method)) + " I can not choose a new method %s" % (self.method, method) + ) else: self.method = method - if 'name' in dims: - self._new_dims['name'] = dims.pop('name') - if 'name' in self._new_dims: - name = self._new_dims['name'] + if "name" in dims: + self._new_dims["name"] = dims.pop("name") + if "name" in self._new_dims: + name = self._new_dims["name"] if not isstring(name): name = name[0] # concatenated array arr = self.base[name] else: - arr= next(six.itervalues(self.base_variables)) + arr = next(six.itervalues(self.base_variables)) self._new_dims.update(self.decoder.correct_dims(arr, dims)) InteractiveBase._register_update( - self, fmt=fmt, replot=replot or bool(self._new_dims), force=force, - todefault=todefault) + self, + fmt=fmt, + replot=replot or bool(self._new_dims), + force=force, + todefault=todefault, + ) def _update_concatenated(self, dims, method): """Updates a concatenated array to new dimensions""" @@ -2641,17 +2858,19 @@ def is_unequal(v1, v2): def filter_attrs(item): """Checks whether the attribute is from the base variable""" - return (item[0] not in self.base.attrs or - is_unequal(item[1], self.base.attrs[item[0]])) + return item[0] not in self.base.attrs or is_unequal( + item[1], self.base.attrs[item[0]] + ) + saved_attrs = list(filter(filter_attrs, six.iteritems(self.arr.attrs))) saved_name = self.arr.name - self.arr.name = 'None' - if 'name' in dims: - name = dims.pop('name') + self.arr.name = "None" + if "name" in dims: + name = dims.pop("name") else: - name = list(self.arr.coords['variable'].values) + name = list(self.arr.coords["variable"].values) base_dims = self.base[name].dims - if method == 'isel': + if method == "isel": self.idims.update(dims) dims = self.idims for dim in set(base_dims) - set(dims): @@ -2662,26 +2881,28 @@ def filter_attrs(item): else: self._idims = None for key, val in six.iteritems(self.arr.coords): - if key in base_dims and key != 'variable': + if key in base_dims and key != "variable": dims.setdefault(key, val) kws = dims.copy() # the sel method does not work with slice objects if not any(isinstance(idx, slice) for idx in dims.values()): - kws['method'] = method + kws["method"] = method try: res = self.base[name].sel(**kws) except KeyError: _fix_times(kws) res = self.base[name].sel(**kws) res = res.to_array() - if 'coordinates' in self.base[name[0]].encoding: - res.encoding['coordinates'] = self.base[name[0]].encoding[ - 'coordinates'] + if "coordinates" in self.base[name[0]].encoding: + res.encoding["coordinates"] = self.base[name[0]].encoding[ + "coordinates" + ] self.arr._variable = res._variable self.arr._coords = res._coords try: self.arr._indexes = ( - res._indexes.copy() if res._indexes is not None else None) + res._indexes.copy() if res._indexes is not None else None + ) except AttributeError: # res.indexes not existent for xr<0.12 pass self.arr.name = saved_name @@ -2702,18 +2923,19 @@ def is_unequal(v1, v2): def filter_attrs(item): """Checks whether the attribute is from the base variable""" - return (item[0] not in base_var.attrs or - is_unequal(item[1], base_var.attrs[item[0]])) + return item[0] not in base_var.attrs or is_unequal( + item[1], base_var.attrs[item[0]] + ) base_var = self.base.variables[self.arr.name] - if 'name' in dims: - name = dims.pop('name') + if "name" in dims: + name = dims.pop("name") self.arr.name = name else: name = self.arr.name # save attributes that have been changed by the user saved_attrs = list(filter(filter_attrs, six.iteritems(self.arr.attrs))) - if method == 'isel': + if method == "isel": self.idims.update(dims) dims = self.idims for dim in set(self.base[name].dims) - set(dims): @@ -2730,21 +2952,26 @@ def filter_attrs(item): kws = dims.copy() # the sel method does not work with slice objects if not any(isinstance(idx, slice) for idx in dims.values()): - kws['method'] = method + kws["method"] = method try: res = self.base[name].sel(**kws) except KeyError: _fix_times(kws) res = self.base[name].sel(**kws) # squeeze the 0-dimensional dimensions - res = res.isel(**{ - dim: 0 for i, dim in enumerate(res.dims) if ( - res.shape[i] == 1 and dim not in old_dims)}) + res = res.isel( + **{ + dim: 0 + for i, dim in enumerate(res.dims) + if (res.shape[i] == 1 and dim not in old_dims) + } + ) self.arr._variable = res._variable self.arr._coords = res._coords try: self.arr._indexes = ( - res._indexes.copy() if res._indexes is not None else None) + res._indexes.copy() if res._indexes is not None else None + ) except AttributeError: # res.indexes not existent for xr<0.12 pass # update to old attributes @@ -2771,28 +2998,33 @@ def shiftlon(self, central_longitude): """ if xr_version < (0, 10): raise NotImplementedError( - "xarray>=0.10 is required for the shiftlon method!") + "xarray>=0.10 is required for the shiftlon method!" + ) arr = self.arr ret = arr.copy(True, arr.values.copy()) if arr.ndim > 2: - xname = self.get_dim('x') - yname = self.get_dim('y') - shapes = OrderedDict( - [(dim, range(i)) for dim, i in zip(arr.dims, arr.shape) - if dim not in [xname, yname]]) + xname = self.get_dim("x") + yname = self.get_dim("y") + shapes = dict( + [ + (dim, range(i)) + for dim, i in zip(arr.dims, arr.shape) + if dim not in [xname, yname] + ] + ) dims = list(shapes) for indexes in product(*shapes.values()): d = dict(zip(dims, indexes)) shifted = ret[d].psy.shiftlon(central_longitude) ret[d] = shifted.values - x = shifted.psy.get_coord('x') + x = shifted.psy.get_coord("x") ret[x.name] = shifted[x.name].variable return ret - lon = self.get_coord('x').variable - xname = self.get_dim('x') + lon = self.get_coord("x").variable + xname = self.get_dim("x") ix = arr.dims.index(xname) lon = lon.copy(True, lon.values.copy()) lonsin = lon.values @@ -2801,21 +3033,23 @@ def shiftlon(self, central_longitude): clon = np.asarray(central_longitude) if lonsin.ndim not in [1]: - raise ValueError('1D longitudes required') + raise ValueError("1D longitudes required") elif clon.ndim: - raise ValueError("Central longitude must be a scalar, not " - "%i-dimensional!" % clon.ndim) + raise ValueError( + "Central longitude must be a scalar, not " + "%i-dimensional!" % clon.ndim + ) - lonsin = np.where(lonsin > clon+180, lonsin-360, lonsin) - lonsin = np.where(lonsin < clon-180, lonsin+360, lonsin) - londiff = np.abs(lonsin[0:-1]-lonsin[1:]) + lonsin = np.where(lonsin > clon + 180, lonsin - 360, lonsin) + lonsin = np.where(lonsin < clon - 180, lonsin + 360, lonsin) + londiff = np.abs(lonsin[0:-1] - lonsin[1:]) londiff_sort = np.sort(londiff) - thresh = 360.-londiff_sort[-2] + thresh = 360.0 - londiff_sort[-2] itemindex = len(lonsin) - np.where(londiff >= thresh)[0] if itemindex.size: # check to see if cyclic (wraparound) point included # if so, remove it. - if np.abs(lonsin[0]-lonsin[-1]) < 1.e-4: + if np.abs(lonsin[0] - lonsin[-1]) < 1.0e-4: hascyclic = True lonsin_save = lonsin.copy() lonsin = lonsin[1:] @@ -2824,13 +3058,13 @@ def shiftlon(self, central_longitude): datain = datain[1:] else: hascyclic = False - lonsin = np.roll(lonsin, itemindex-1) + lonsin = np.roll(lonsin, itemindex - 1) if datain is not None: - datain = np.roll(datain, itemindex-1, [ix]) + datain = np.roll(datain, itemindex - 1, [ix]) # add cyclic point back at beginning. if hascyclic: lonsin_save[1:] = lonsin - lonsin_save[0] = lonsin[-1]-360. + lonsin_save[0] = lonsin[-1] - 360.0 lonsin = lonsin_save if datain is not None: datain_save[1:] = datain @@ -2864,9 +3098,13 @@ def start_update(self, draw=None, queues=None): -------- :attr:`no_auto_update`, update """ + def filter_attrs(item): - return (item[0] not in self.base.attrs or - item[1] != self.base.attrs[item[0]]) + return ( + item[0] not in self.base.attrs + or item[1] != self.base.attrs[item[0]] + ) + if queues is not None: # make sure that no plot is updated during gathering the data queues[0].get() @@ -2887,12 +3125,22 @@ def filter_attrs(item): raise return InteractiveBase.start_update(self, draw=draw, queues=queues) - @docstrings.get_sections(base='InteractiveArray.update', - sections=['Parameters', 'Notes']) + @docstrings.get_sections( + base="InteractiveArray.update", sections=["Parameters", "Notes"] + ) @docstrings.dedent - def update(self, method='isel', dims={}, fmt={}, replot=False, - auto_update=False, draw=None, force=False, todefault=False, - **kwargs): + def update( + self, + method="isel", + dims={}, + fmt={}, + replot=False, + auto_update=False, + draw=None, + force=False, + todefault=False, + **kwargs, + ): """ Update the coordinates and the plot @@ -2923,35 +3171,50 @@ def update(self, method='isel', dims={}, fmt={}, replot=False, %(InteractiveBase.update.notes)s""" dims = dict(dims) fmt = dict(fmt) - vars_and_coords = set(chain( - self.arr.dims, self.arr.coords, ['name', 'x', 'y', 'z', 't'])) + vars_and_coords = set( + chain(self.arr.dims, self.arr.coords, ["name", "x", "y", "z", "t"]) + ) furtherdims, furtherfmt = utils.sort_kwargs(kwargs, vars_and_coords) dims.update(furtherdims) fmt.update(furtherfmt) - self._register_update(method=method, replot=replot, dims=dims, - fmt=fmt, force=force, todefault=todefault) + self._register_update( + method=method, + replot=replot, + dims=dims, + fmt=fmt, + force=force, + todefault=todefault, + ) if not self.no_auto_update or auto_update: self.start_update(draw=draw) def _short_info(self, intend=0, maybe=False): - str_intend = ' ' * intend - if 'variable' in self.arr.coords: - name = ', '.join(self.arr.coords['variable'].values) + str_intend = " " * intend + if "variable" in self.arr.coords: + name = ", ".join(self.arr.coords["variable"].values) else: name = self.arr.name if self.arr.ndim > 0: - dims = ', with (%s)=%s' % (', '.join(self.arr.dims), - self.arr.shape) + dims = ", with (%s)=%s" % ( + ", ".join(self.arr.dims), + self.arr.shape, + ) else: - dims = '' + dims = "" return str_intend + "%s: %i-dim %s of %s%s, %s" % ( - self.arr_name, self.arr.ndim, self.arr.__class__.__name__, name, - dims, ", ".join( + self.arr_name, + self.arr.ndim, + self.arr.__class__.__name__, + name, + dims, + ", ".join( "%s=%s" % (coord, format_item(val.values)) for coord, val in six.iteritems(self.arr.coords) - if val.ndim == 0)) + if val.ndim == 0 + ), + ) def __getitem__(self, key): ret = self.arr.__getitem__(key) @@ -2959,13 +3222,21 @@ def __getitem__(self, key): return ret def isel(self, *args, **kwargs): - # reimplemented to keep the base. The doc is set below + """Select a subset of the array based on position. + + Same method as :meth:`xarray.DataArray.isel` but keeps information on + the base dataset. + """ ret = self.arr.isel(*args, **kwargs) ret.psy._base = self._base return ret def sel(self, *args, **kwargs): - # reimplemented to keep the base. The doc is set below + """Select a subset of the array based on indexes. + + Same method as :meth:`xarray.DataArray.sel` but keeps information on + the base dataset. + """ ret = self.arr.sel(*args, **kwargs) ret.psy._base = self._base return ret @@ -2985,7 +3256,7 @@ def copy(self, deep=False): def to_interactive_list(self): return InteractiveList([self], arr_name=self.arr_name) - @docstrings.get_sections(base='InteractiveArray.get_coord') + @docstrings.get_sections(base="InteractiveArray.get_coord") @docstrings.dedent def get_coord(self, what, base=False): """ @@ -2998,9 +3269,10 @@ def get_coord(self, what, base=False): base: bool If True, use the base variable in the :attr:`base` dataset.""" what = what.lower() - return getattr(self.decoder, 'get_' + what)( + return getattr(self.decoder, "get_" + what)( next(six.itervalues(self.base_variables)) if base else self.arr, - self.arr.coords) + self.arr.coords, + ) @docstrings.dedent def get_dim(self, what, base=False): @@ -3011,29 +3283,32 @@ def get_dim(self, what, base=False): ---------- %(InteractiveArray.get_coord.parameters)s""" what = what.lower() - return getattr(self.decoder, 'get_%sname' % what)( - next(six.itervalues(self.base_variables)) if base else self.arr) + return getattr(self.decoder, "get_%sname" % what)( + next(six.itervalues(self.base_variables)) if base else self.arr + ) # ------------------ Calculations ----------------------------------------- def _gridweights(self): """Calculate the gridweights with a simple rectangular approximation""" arr = self.arr - xcoord = self.get_coord('x') - ycoord = self.get_coord('y') + xcoord = self.get_coord("x") + ycoord = self.get_coord("y") # convert the units xcoord_orig = xcoord ycoord_orig = ycoord - units = xcoord.attrs.get('units', '') + units = xcoord.attrs.get("units", "") in_metres = False in_degree = False - if 'deg' in units or ( - 'rad' not in units and 'lon' in xcoord.name and - 'lat' in ycoord.name): + if "deg" in units or ( + "rad" not in units + and "lon" in xcoord.name + and "lat" in ycoord.name + ): xcoord = xcoord * np.pi / 180 ycoord = ycoord * np.pi / 180 in_degree = True - elif 'rad' in units: + elif "rad" in units: pass else: in_metres = True @@ -3046,16 +3321,20 @@ def _gridweights(self): # calculate the weights based on the units if xcoord.ndim == 2 or self.decoder.is_unstructured(self.arr): - warn("[%s] - Curvilinear grids are not supported! " - "Using constant grid cell area weights!" % self.logger.name, - PsyPlotRuntimeWarning) + warn( + "[%s] - Curvilinear grids are not supported! " + "Using constant grid cell area weights!" % self.logger.name, + PsyPlotRuntimeWarning, + ) weights = np.ones_like(xcoord.values) elif in_metres: weights = np.abs(xbounds[:-1, :-1] - xbounds[1:, 1:]) * ( - np.abs(ybounds[:-1, :-1] - ybounds[1:, 1:])) + np.abs(ybounds[:-1, :-1] - ybounds[1:, 1:]) + ) else: weights = np.abs(xbounds[:-1, :-1] - xbounds[1:, 1:]) * ( - np.sin(ybounds[:-1, :-1]) - np.sin(ybounds[1:, 1:])) + np.sin(ybounds[:-1, :-1]) - np.sin(ybounds[1:, 1:]) + ) # normalize the weights by dividing through the sum if in_degree: @@ -3071,26 +3350,32 @@ def _gridweights(self): def _gridweights_cdo(self): """Estimate the gridweights using CDOs""" - from cdo import Cdo from tempfile import NamedTemporaryFile - sdims = {self.get_dim('y'), self.get_dim('x')} + + from cdo import Cdo + + sdims = {self.get_dim("y"), self.get_dim("x")} cdo = Cdo() - fname = NamedTemporaryFile(prefix='psy', suffix='.nc').name + fname = NamedTemporaryFile(prefix="psy", suffix=".nc").name arr = self.arr base = arr.psy.base dims = arr.dims ds = arr.isel(**{d: 0 for d in set(dims) - sdims}).to_dataset() for coord in six.itervalues(ds.coords): - bounds = coord.attrs.get('bounds', coord.encoding.get('bounds')) - if (bounds and bounds in set(base.coords) - set(ds.coords) and - sdims.intersection(base.coords[bounds].dims)): + bounds = coord.attrs.get("bounds", coord.encoding.get("bounds")) + if ( + bounds + and bounds in set(base.coords) - set(ds.coords) + and sdims.intersection(base.coords[bounds].dims) + ): ds[bounds] = base.sel( **{d: arr.coords[d].values for d in sdims} - ).coords[bounds] - ds = ds.drop_vars([c.name for c in six.itervalues(ds.coords) - if not c.ndim]) + ).coords[bounds] + ds = ds.drop_vars( + [c.name for c in six.itervalues(ds.coords) if not c.ndim] + ) to_netcdf(ds, fname) - ret = cdo.gridweights(input=fname, returnArray='cell_weights') + ret = cdo.gridweights(input=fname, returnArray="cell_weights") try: os.remove(fname) except Exception: @@ -3098,16 +3383,15 @@ def _gridweights_cdo(self): return ret def _weights_to_da(self, weights, keepdims=False, keepshape=False): - """Convert the 2D weights into a DataArray and potentially enlarge it - """ + """Convert the 2D weights into a DataArray and potentially enlarge it""" arr = self.arr - xcoord = self.get_coord('x') - ycoord = self.get_coord('y') - sdims = (self.get_dim('y'), self.get_dim('x')) - if sdims[0] == sdims[1]: # unstructured grids + xcoord = self.get_coord("x") + ycoord = self.get_coord("y") + sdims = (self.get_dim("y"), self.get_dim("x")) + if sdims[0] == sdims[1]: # unstructured grids sdims = sdims[:1] if (ycoord.name, xcoord.name) != sdims: - attrs = dict(coordinates=ycoord.name + ' ' + xcoord.name) + attrs = dict(coordinates=ycoord.name + " " + xcoord.name) else: attrs = {} @@ -3143,13 +3427,17 @@ def notnull(a): else: dims = arr.dims coords = arr.isel( - **{d: 0 if d not in sdims else slice(None) - for d in dims}).coords + **{d: 0 if d not in sdims else slice(None) for d in dims} + ).coords weights = weights.reshape( - tuple(1 if dim not in sdims else s - for s, dim in zip(arr.shape, arr.dims))) - return xr.DataArray(weights, dims=dims, coords=coords, - name='cell_weights', attrs=attrs) + tuple( + 1 if dim not in sdims else s + for s, dim in zip(arr.shape, arr.dims) + ) + ) + return xr.DataArray( + weights, dims=dims, coords=coords, name="cell_weights", attrs=attrs + ) def gridweights(self, keepdims=False, keepshape=False, use_cdo=None): """Calculate the cell weights for each grid cell @@ -3172,7 +3460,7 @@ def gridweights(self, keepdims=False, keepshape=False, use_cdo=None): xarray.DataArray The 2D-DataArray with the grid weights""" if use_cdo is None: - use_cdo = rcParams['gridweights.use_cdo'] + use_cdo = rcParams["gridweights.use_cdo"] if not use_cdo and self.decoder.is_unstructured(self.arr): use_cdo = True if use_cdo is None or use_cdo: @@ -3186,34 +3474,37 @@ def gridweights(self, keepdims=False, keepshape=False, use_cdo=None): else: weights = self._gridweights() - return self._weights_to_da(weights, keepdims=keepdims, - keepshape=keepshape) + return self._weights_to_da( + weights, keepdims=keepdims, keepshape=keepshape + ) def _fldaverage_args(self): """Masked array, xname, yname and axis for calculating the average""" arr = self.arr - sdims = (self.get_dim('y'), self.get_dim('x')) + sdims = (self.get_dim("y"), self.get_dim("x")) if sdims[0] == sdims[1]: sdims = sdims[:1] axis = tuple(map(arr.dims.index, sdims)) return arr, sdims, axis def _insert_fldmean_bounds(self, da, keepdims=False): - xcoord = self.get_coord('x') - ycoord = self.get_coord('y') - sdims = (self.get_dim('y'), self.get_dim('x')) + xcoord = self.get_coord("x") + ycoord = self.get_coord("y") + sdims = (self.get_dim("y"), self.get_dim("x")) xbounds = np.array([[xcoord.min(), xcoord.max()]]) ybounds = np.array([[ycoord.min(), ycoord.max()]]) - xdims = (sdims[-1], 'bnds') if keepdims else ('bnds', ) - ydims = (sdims[0], 'bnds') if keepdims else ('bnds', ) + xdims = (sdims[-1], "bnds") if keepdims else ("bnds",) + ydims = (sdims[0], "bnds") if keepdims else ("bnds",) xattrs = xcoord.attrs.copy() - xattrs.pop('bounds', None) + xattrs.pop("bounds", None) yattrs = ycoord.attrs.copy() - yattrs.pop('bounds', None) - da.psy.base.coords[xcoord.name + '_bnds'] = xr.Variable( - xdims, xbounds if keepdims else xbounds[0], attrs=xattrs) - da.psy.base.coords[ycoord.name + '_bnds'] = xr.Variable( - ydims, ybounds if keepdims else ybounds[0], attrs=yattrs) + yattrs.pop("bounds", None) + da.psy.base.coords[xcoord.name + "_bnds"] = xr.Variable( + xdims, xbounds if keepdims else xbounds[0], attrs=xattrs + ) + da.psy.base.coords[ycoord.name + "_bnds"] = xr.Variable( + ydims, ybounds if keepdims else ybounds[0], attrs=yattrs + ) def fldmean(self, keepdims=False): """Calculate the weighted mean over the x- and y-dimension @@ -3244,12 +3535,15 @@ def fldmean(self, keepdims=False): gridweights = self.gridweights() arr, sdims, axis = self._fldaverage_args() - xcoord = self.decoder.get_x(next(six.itervalues(self.base_variables)), - arr.coords) - ycoord = self.decoder.get_y(next(six.itervalues(self.base_variables)), - arr.coords) + xcoord = self.decoder.get_x( + next(six.itervalues(self.base_variables)), arr.coords + ) + ycoord = self.decoder.get_y( + next(six.itervalues(self.base_variables)), arr.coords + ) means = ((arr * gridweights)).sum(axis=axis) * ( - gridweights.size / arr.notnull().sum(axis=axis)) + gridweights.size / arr.notnull().sum(axis=axis) + ) if keepdims: means = means.expand_dims(sdims, axis=axis) @@ -3259,8 +3553,8 @@ def fldmean(self, keepdims=False): else: means[xcoord.name] = xcoord.mean() means[ycoord.name] = ycoord.mean() - means.coords[xcoord.name].attrs['bounds'] = xcoord.name + '_bnds' - means.coords[ycoord.name].attrs['bounds'] = ycoord.name + '_bnds' + means.coords[xcoord.name].attrs["bounds"] = xcoord.name + "_bnds" + means.coords[ycoord.name].attrs["bounds"] = ycoord.name + "_bnds" self._insert_fldmean_bounds(means, keepdims) means.name = arr.name return means @@ -3294,19 +3588,25 @@ def fldstd(self, keepdims=False): arr, sdims, axis = self._fldaverage_args() means = self.fldmean(keepdims=True) weights = self.gridweights(keepshape=True) - variance = ((arr - means.values)**2 * weights).sum(axis=axis) + variance = ((arr - means.values) ** 2 * weights).sum(axis=axis) if keepdims: variance = variance.expand_dims(sdims, axis=axis) for key, coord in six.iteritems(means.coords): if key not in variance.coords: dims = set(sdims).intersection(coord.dims) - variance[key] = coord if keepdims else coord.isel( - **dict(zip(dims, repeat(0)))) + variance[key] = ( + coord + if keepdims + else coord.isel(**dict(zip(dims, repeat(0)))) + ) for key, coord in six.iteritems(means.psy.base.coords): if key not in variance.psy.base.coords: dims = set(sdims).intersection(coord.dims) - variance.psy.base[key] = coord if keepdims else coord.isel( - **dict(zip(dims, repeat(0)))) + variance.psy.base[key] = ( + coord + if keepdims + else coord.isel(**dict(zip(dims, repeat(0)))) + ) std = variance**0.5 std.name = arr.name return std @@ -3347,9 +3647,9 @@ def fldpctl(self, q, keepdims=False): gridweights = self.gridweights(keepshape=True) arr = self.arr - q = np.asarray(q) / 100. + q = np.asarray(q) / 100.0 if not (np.all(q >= 0) and np.all(q <= 100)): - raise ValueError('q should be in [0, 100]') + raise ValueError("q should be in [0, 100]") reduce_shape = False if keepdims else (not bool(q.ndim)) if not q.ndim: q = q[np.newaxis] @@ -3361,22 +3661,25 @@ def fldpctl(self, q, keepdims=False): data = np.rollaxis(data, ax, 0) weights = np.rollaxis(weights, ax, 0) data = data.reshape( - (np.product(data.shape[:len(axis)]), ) + data.shape[len(axis):]) + (np.product(data.shape[: len(axis)]),) + data.shape[len(axis) :] + ) weights = weights.reshape( - (np.product(weights.shape[:len(axis)]), ) + - weights.shape[len(axis):]) + (np.product(weights.shape[: len(axis)]),) + + weights.shape[len(axis) :] + ) # sort the data sorter = np.argsort(data, axis=0) all_indices = map(tuple, product(*map(range, data.shape[1:]))) for indices in all_indices: - indices = (slice(None), ) + indices + indices = (slice(None),) + indices data.__setitem__( - indices, data.__getitem__(indices)[ - sorter.__getitem__(indices)]) + indices, data.__getitem__(indices)[sorter.__getitem__(indices)] + ) weights.__setitem__( - indices, weights.__getitem__(indices)[ - sorter.__getitem__(indices)]) + indices, + weights.__getitem__(indices)[sorter.__getitem__(indices)], + ) # compute the percentiles try: @@ -3385,25 +3688,33 @@ def fldpctl(self, q, keepdims=False): notnull = ~np.isnan(weights) weights[notnull] = np.cumsum(weights[notnull]) all_indices = map(tuple, product(*map(range, data.shape[1:]))) - pctl = np.zeros((len(q), ) + data.shape[1:]) + pctl = np.zeros((len(q),) + data.shape[1:]) for indices in all_indices: - indices = (slice(None), ) + indices + indices = (slice(None),) + indices mask = ~np.isnan(data.__getitem__(indices)) - pctl.__setitem__(indices, np.interp( - q, weights.__getitem__(indices)[mask], - data.__getitem__(indices)[mask])) + pctl.__setitem__( + indices, + np.interp( + q, + weights.__getitem__(indices)[mask], + data.__getitem__(indices)[mask], + ), + ) # setup the data array and it's coordinates - xcoord = self.decoder.get_x(next(six.itervalues(self.base_variables)), - arr.coords) - ycoord = self.decoder.get_y(next(six.itervalues(self.base_variables)), - arr.coords) + xcoord = self.decoder.get_x( + next(six.itervalues(self.base_variables)), arr.coords + ) + ycoord = self.decoder.get_y( + next(six.itervalues(self.base_variables)), arr.coords + ) coords = dict(arr.coords) if keepdims: pctl = pctl.reshape( - (len(q), ) + - tuple(1 if i in axis else s for i, s in enumerate(arr.shape))) + (len(q),) + + tuple(1 if i in axis else s for i, s in enumerate(arr.shape)) + ) coords[xcoord.name] = xcoord.mean().expand_dims(xcoord.dims[0]) coords[ycoord.name] = ycoord.mean().expand_dims(ycoord.dims[0]) dims = arr.dims @@ -3413,31 +3724,36 @@ def fldpctl(self, q, keepdims=False): dims = tuple(d for d in arr.dims if d not in sdims) if reduce_shape: pctl = pctl[0] - coords['pctl'] = xr.Variable((), q[0] * 100., - attrs={'long_name': 'Percentile'}) + coords["pctl"] = xr.Variable( + (), q[0] * 100.0, attrs={"long_name": "Percentile"} + ) else: - coords['pctl'] = xr.Variable(('pctl', ), q * 100., - attrs={'long_name': 'Percentile'}) - dims = ('pctl', ) + dims - coords[xcoord.name].attrs['bounds'] = xcoord.name + '_bnds' - coords[ycoord.name].attrs['bounds'] = ycoord.name + '_bnds' - coords = {name: c for name, c in coords.items() - if set(c.dims) <= set(dims)} - ret = xr.DataArray(pctl, name=arr.name, dims=dims, coords=coords, - attrs=arr.attrs.copy()) + coords["pctl"] = xr.Variable( + ("pctl",), q * 100.0, attrs={"long_name": "Percentile"} + ) + dims = ("pctl",) + dims + coords[xcoord.name].attrs["bounds"] = xcoord.name + "_bnds" + coords[ycoord.name].attrs["bounds"] = ycoord.name + "_bnds" + coords = { + name: c for name, c in coords.items() if set(c.dims) <= set(dims) + } + ret = xr.DataArray( + pctl, + name=arr.name, + dims=dims, + coords=coords, + attrs=arr.attrs.copy(), + ) self._insert_fldmean_bounds(ret, keepdims) return ret - isel.__doc__ = xr.DataArray.isel.__doc__ - sel.__doc__ = xr.DataArray.sel.__doc__ - class ArrayList(list): """Base class for creating a list of interactive arrays from a dataset This list contains and manages :class:`InteractiveArray` instances""" - docstrings.keep_params('InteractiveBase.parameters', 'auto_update') + docstrings.keep_params("InteractiveBase.parameters", "auto_update") @property def dims(self): @@ -3446,10 +3762,12 @@ def dims(self): @property def dims_intersect(self): - """Dimensions of the arrays in this list that are used in all arrays - """ - return set.intersection(*map( - set, (getattr(arr, 'dims_intersect', arr.dims) for arr in self))) + """Dimensions of the arrays in this list that are used in all arrays""" + return set.intersection( + *map( + set, (getattr(arr, "dims_intersect", arr.dims) for arr in self) + ) + ) @property def arr_names(self): @@ -3464,8 +3782,9 @@ def arr_names(self, value): value = list(islice(value, 0, len(self))) if not len(set(value)) == len(self): raise ValueError( - "Got %i unique array names for %i data objects!" % ( - len(set(value)), len(self))) + "Got %i unique array names for %i data objects!" + % (len(set(value)), len(self)) + ) for arr, n in zip(self, value): arr.psy.arr_name = n @@ -3484,26 +3803,29 @@ def names(self): def all_names(self): """The variable names for each of the arrays in this list""" return [ - _get_variable_names(arr) if not isinstance(arr, ArrayList) else - arr.all_names - for arr in self] + _get_variable_names(arr) + if not isinstance(arr, ArrayList) + else arr.all_names + for arr in self + ] @property def all_dims(self): """The dimensions for each of the arrays in this list""" return [ - _get_dims(arr) if not isinstance(arr, ArrayList) else - arr.all_dims - for arr in self] + _get_dims(arr) if not isinstance(arr, ArrayList) else arr.all_dims + for arr in self + ] @property def is_unstructured(self): """A boolean for each array whether it is unstructured or not""" return [ arr.psy.decoder.is_unstructured(arr) - if not isinstance(arr, ArrayList) else - arr.is_unstructured - for arr in self] + if not isinstance(arr, ArrayList) + else arr.is_unstructured + for arr in self + ] @property def coords(self): @@ -3512,21 +3834,25 @@ def coords(self): @property def coords_intersect(self): - """Coordinates of the arrays in this list that are used in all arrays - """ - return set.intersection(*map( - set, (getattr(arr, 'coords_intersect', arr.coords) for arr in self) - )) + """Coordinates of the arrays in this list that are used in all arrays""" + return set.intersection( + *map( + set, + (getattr(arr, "coords_intersect", arr.coords) for arr in self), + ) + ) @property def with_plotter(self): """The arrays in this instance that are visualized with a plotter""" return self.__class__( (arr for arr in self if arr.psy.plotter is not None), - auto_update=bool(self.auto_update)) + auto_update=bool(self.auto_update), + ) - no_auto_update = property(_no_auto_update_getter, - doc=_no_auto_update_getter.__doc__) + no_auto_update = property( + _no_auto_update_getter, doc=_no_auto_update_getter.__doc__ + ) @no_auto_update.setter def no_auto_update(self, value): @@ -3540,9 +3866,9 @@ def logger(self): try: return self._logger except AttributeError: - name = '%s.%s' % (self.__module__, self.__class__.__name__) + name = "%s.%s" % (self.__module__, self.__class__.__name__) self._logger = logging.getLogger(name) - self.logger.debug('Initializing...') + self.logger.debug("Initializing...") return self._logger @logger.setter @@ -3551,14 +3877,21 @@ def logger(self, value): @property def arrays(self): - """A list of all the :class:`xarray.DataArray` instances in this list - """ - return list(chain.from_iterable( - ([arr] if not isinstance(arr, InteractiveList) else arr.arrays - for arr in self))) + """A list of all the :class:`xarray.DataArray` instances in this list""" + return list( + chain.from_iterable( + ( + [arr] + if not isinstance(arr, InteractiveList) + else arr.arrays + for arr in self + ) + ) + ) - @docstrings.get_sections(base='ArrayList.rename', sections=[ - 'Parameters', 'Raises']) + @docstrings.get_sections( + base="ArrayList.rename", sections=["Parameters", "Raises"] + ) @dedent def rename(self, arr, new_name=True): """ @@ -3599,17 +3932,18 @@ def rename(self, arr, new_name=True): if new_name is False: raise ValueError( "Array name %s is already in use! Set the `new_name` " - "parameter to None for renaming!" % arr.psy.arr_name) + "parameter to None for renaming!" % arr.psy.arr_name + ) elif new_name is True: - new_name = new_name if isstring(new_name) else 'arr{0}' + new_name = new_name if isstring(new_name) else "arr{0}" arr.psy.arr_name = self.next_available_name(new_name) return arr, True return arr, None - docstrings.keep_params('ArrayList.rename.parameters', 'new_name') - docstrings.keep_params('InteractiveBase.parameters', 'auto_update') + docstrings.keep_params("ArrayList.rename.parameters", "new_name") + docstrings.keep_params("InteractiveBase.parameters", "auto_update") - @docstrings.get_sections(base='ArrayList') + @docstrings.get_sections(base="ArrayList") @docstrings.dedent def __init__(self, iterable=[], attrs={}, auto_update=None, new_name=True): """ @@ -3622,15 +3956,20 @@ def __init__(self, iterable=[], attrs={}, auto_update=None, new_name=True): %(InteractiveBase.parameters.auto_update)s %(ArrayList.rename.parameters.new_name)s""" super(ArrayList, self).__init__() - self.attrs = OrderedDict(attrs) + self.attrs = dict(attrs) if auto_update is None: - auto_update = rcParams['lists.auto_update'] + auto_update = rcParams["lists.auto_update"] self.auto_update = not bool(auto_update) # append the data in order to set the correct names - self.extend(filter( - lambda arr: isinstance(getattr(arr, 'psy', None), - InteractiveBase), - iterable), new_name=new_name) + self.extend( + filter( + lambda arr: isinstance( + getattr(arr, "psy", None), InteractiveBase + ), + iterable, + ), + new_name=new_name, + ) def copy(self, deep=False): """Returns a copy of the list @@ -3641,22 +3980,39 @@ def copy(self, deep=False): If False (default), only the list is copied and not the contained arrays, otherwise the contained arrays are deep copied""" if not deep: - return self.__class__(self[:], attrs=self.attrs.copy(), - auto_update=not bool(self.no_auto_update)) + return self.__class__( + self[:], + attrs=self.attrs.copy(), + auto_update=not bool(self.no_auto_update), + ) else: return self.__class__( - [arr.psy.copy(deep) for arr in self], attrs=self.attrs.copy(), - auto_update=not bool(self.auto_update)) + [arr.psy.copy(deep) for arr in self], + attrs=self.attrs.copy(), + auto_update=not bool(self.auto_update), + ) - docstrings.keep_params('InteractiveArray.update.parameters', 'method') + docstrings.keep_params("InteractiveArray.update.parameters", "method") @classmethod - @docstrings.get_sections(base='ArrayList.from_dataset', sections=[ - 'Parameters', 'Other Parameters', 'Returns']) + @docstrings.get_sections( + base="ArrayList.from_dataset", + sections=["Parameters", "Other Parameters", "Returns"], + ) @docstrings.dedent - def from_dataset(cls, base, method='isel', default_slice=None, - decoder=None, auto_update=None, prefer_list=False, - squeeze=True, attrs=None, load=False, **kwargs): + def from_dataset( + cls, + base, + method="isel", + default_slice=None, + decoder=None, + auto_update=None, + prefer_list=False, + squeeze=True, + attrs=None, + load=False, + **kwargs, + ): """ Construct an ArrayList instance from an existing base dataset @@ -3707,9 +4063,12 @@ def from_dataset(cls, base, method='isel', default_slice=None, try: load = dict(load) except (TypeError, ValueError): + def maybe_load(arr): return arr.load() if load else arr + else: + def maybe_load(arr): return arr.load(**load) @@ -3719,7 +4078,7 @@ def iter_dims(dims): while 1: yield {} else: - dims = OrderedDict(dims) + dims = dict(dims) keys = dims.keys() for vals in zip(*map(cycle, map(safe_list, dims.values()))): yield dict(zip(keys, vals)) @@ -3727,23 +4086,27 @@ def iter_dims(dims): def recursive_selection(key, dims, names): names = safe_list(names) if len(names) > 1 and prefer_list: - keys = ('arr%i' % i for i in range(len(names))) + keys = ("arr%i" % i for i in range(len(names))) return InteractiveList( starmap(sel_method, zip(keys, iter_dims(dims), names)), - auto_update=auto_update, arr_name=key) + auto_update=auto_update, + arr_name=key, + ) elif len(names) > 1: return sel_method(key, dims, tuple(names)) else: return sel_method(key, dims, names[0]) def ds2arr(arr): - base_var = next(var for key, var in arr.variables.items() - if key not in arr.coords) + base_var = next( + var + for key, var in arr.variables.items() + if key not in arr.coords + ) attrs = base_var.attrs arr = arr.to_array() - if 'coordinates' in base_var.encoding: - arr.encoding['coordinates'] = base_var.encoding[ - 'coordinates'] + if "coordinates" in base_var.encoding: + arr.encoding["coordinates"] = base_var.encoding["coordinates"] arr.attrs.update(attrs) return arr @@ -3763,23 +4126,39 @@ def add_missing_dimensions(arr): # add the missing dimensions to the dataset. This is not anymore # done by default from xarray >= 0.9 but we need it to ensure the # interactive treatment of DataArrays - missing = set(arr.dims).difference(base.coords) - {'variable'} + missing = set(arr.dims).difference(base.coords) - {"variable"} for dim in missing: - base[dim] = arr.coords[dim] = np.arange(base.dims[dim]) + try: + size = base.sizes[dim] + except AttributeError: + # old xarray version + size = base.dims[dim] + base[dim] = arr.coords[dim] = np.arange(size) if squeeze: + def squeeze_array(arr): - return arr.isel(**{dim: 0 for i, dim in enumerate(arr.dims) - if arr.shape[i] == 1}) + return arr.isel( + **{ + dim: 0 + for i, dim in enumerate(arr.dims) + if arr.shape[i] == 1 + } + ) + else: + def squeeze_array(arr): return arr - if method == 'isel': + + if method == "isel": + def sel_method(key, dims, name=None): if name is None: - return recursive_selection(key, dims, dims.pop('name')) - elif (isinstance(name, six.string_types) or - not utils.is_iterable(name)): + return recursive_selection(key, dims, dims.pop("name")) + elif isinstance( + name, six.string_types + ) or not utils.is_iterable(name): arr = base[name] decoder = get_decoder(arr) dims = decoder.correct_dims(arr, dims) @@ -3787,27 +4166,36 @@ def sel_method(key, dims, name=None): arr = base[list(name)] decoder = get_decoder(base[name[0]]) dims = decoder.correct_dims(base[name[0]], dims) - def_slice = slice(None) if default_slice is None else \ - default_slice - dims.update({ - dim: def_slice for dim in set(arr.dims).difference( - dims) if dim != 'variable'}) + def_slice = ( + slice(None) if default_slice is None else default_slice + ) + dims.update( + { + dim: def_slice + for dim in set(arr.dims).difference(dims) + if dim != "variable" + } + ) add_missing_dimensions(arr) ret = arr.isel(**dims) if not isinstance(ret, xr.DataArray): ret = ds2arr(ret) ret = squeeze_array(ret) # delete the variable dimension for the idims - dims.pop('variable', None) - ret.psy.init_accessor(arr_name=key, base=base, idims=dims, - decoder=decoder) + dims.pop("variable", None) + ret.psy.init_accessor( + arr_name=key, base=base, idims=dims, decoder=decoder + ) return maybe_load(ret) + else: + def sel_method(key, dims, name=None): if name is None: - return recursive_selection(key, dims, dims.pop('name')) - elif (isinstance(name, six.string_types) or - not utils.is_iterable(name)): + return recursive_selection(key, dims, dims.pop("name")) + elif isinstance( + name, six.string_types + ) or not utils.is_iterable(name): arr = base[name] decoder = get_decoder(arr) dims = decoder.correct_dims(arr, dims) @@ -3817,17 +4205,23 @@ def sel_method(key, dims, name=None): dims = decoder.correct_dims(base[name[0]], dims) if default_slice is not None: if isinstance(default_slice, slice): - dims.update({ - dim: default_slice - for dim in set(arr.dims).difference(dims) - if dim != 'variable'}) + dims.update( + { + dim: default_slice + for dim in set(arr.dims).difference(dims) + if dim != "variable" + } + ) else: - dims.update({ - dim: arr.coords[dim][default_slice] - for dim in set(arr.dims).difference(dims) - if dim != 'variable'}) + dims.update( + { + dim: arr.coords[dim][default_slice] + for dim in set(arr.dims).difference(dims) + if dim != "variable" + } + ) kws = dims.copy() - kws['method'] = method + kws["method"] = method # the sel method does not work with slice objects for dim, val in dims.items(): if isinstance(val, slice): @@ -3846,32 +4240,43 @@ def sel_method(key, dims, name=None): ret = squeeze_array(ret) ret.psy.init_accessor(arr_name=key, base=base, decoder=decoder) return maybe_load(ret) - if 'name' not in kwargs: + + if "name" not in kwargs: default_names = list( - key for key in base.variables if key not in base.coords) + key for key in base.variables if key not in base.coords + ) try: default_names.sort() except TypeError: pass - kwargs['name'] = default_names + kwargs["name"] = default_names names = setup_coords(**kwargs) # check coordinates - possible_keys = ['t', 'x', 'y', 'z', 'name'] + list(base.dims) + possible_keys = ["t", "x", "y", "z", "name"] + list(base.dims) for key in set(chain(*six.itervalues(names))): - utils.check_key(key, possible_keys, name='dimension') - instance = cls(starmap(sel_method, six.iteritems(names)), - attrs=base.attrs, auto_update=auto_update) + utils.check_key(key, possible_keys, name="dimension") + instance = cls( + starmap(sel_method, six.iteritems(names)), + attrs=base.attrs, + auto_update=auto_update, + ) # convert to interactive lists if an instance is not if prefer_list and any( - not isinstance(arr, InteractiveList) for arr in instance): + not isinstance(arr, InteractiveList) for arr in instance + ): # if any instance is an interactive list, than convert the others if any(isinstance(arr, InteractiveList) for arr in instance): for i, arr in enumerate(instance): if not isinstance(arr, InteractiveList): instance[i] = InteractiveList([arr]) else: # put everything into one single interactive list - instance = cls([InteractiveList(instance, attrs=base.attrs, - auto_update=auto_update)]) + instance = cls( + [ + InteractiveList( + instance, attrs=base.attrs, auto_update=auto_update + ) + ] + ) instance[0].psy.arr_name = instance[0][0].psy.arr_name if attrs is not None: for arr in instance: @@ -3879,70 +4284,107 @@ def sel_method(key, dims, name=None): return instance @classmethod - def _get_dsnames(cls, data, ignore_keys=['attrs', 'plotter', 'ds'], - concat_dim=False, combine=False): + def _get_dsnames( + cls, + data, + ignore_keys=["attrs", "plotter", "ds"], + concat_dim=False, + combine=False, + ): """Recursive method to get all the file names out of a dictionary `data` created with the :meth`array_info` method""" + def filter_ignores(item): return item[0] not in ignore_keys and isinstance(item[1], dict) - if 'fname' in data: - return {tuple( - [data['fname'], data['store']] + - ([data.get('concat_dim')] if concat_dim else []) + - ([data.get('combine')] if combine else []))} - return set(chain(*map(partial(cls._get_dsnames, concat_dim=concat_dim, - combine=combine, - ignore_keys=ignore_keys), - dict(filter(filter_ignores, - six.iteritems(data))).values()))) + + if "fname" in data: + return { + tuple( + [data["fname"], data["store"]] + + ([data.get("concat_dim")] if concat_dim else []) + + ([data.get("combine")] if combine else []) + ) + } + return set( + chain( + *map( + partial( + cls._get_dsnames, + concat_dim=concat_dim, + combine=combine, + ignore_keys=ignore_keys, + ), + dict(filter(filter_ignores, six.iteritems(data))).values(), + ) + ) + ) @classmethod def _get_ds_descriptions( - cls, data, ds_description={'ds', 'fname', 'arr'}, **kwargs): + cls, data, ds_description={"ds", "fname", "arr"}, **kwargs + ): def new_dict(): return defaultdict(list) + ret = defaultdict(new_dict) ds_description = set(ds_description) for d in cls._get_ds_descriptions_unsorted(data, **kwargs): try: - num = d.get('num') or d['ds'].psy.num + num = d.get("num") or d["ds"].psy.num except KeyError: raise ValueError( - 'Could not find either the dataset number nor the dataset ' - 'in the data! However one must be provided.') + "Could not find either the dataset number nor the dataset " + "in the data! However one must be provided." + ) d_ret = ret[num] for key, val in six.iteritems(d): - if key == 'arr': - d_ret['arr'].append(d['arr']) + if key == "arr": + d_ret["arr"].append(d["arr"]) else: d_ret[key] = val return ret @classmethod def _get_ds_descriptions_unsorted( - cls, data, ignore_keys=['attrs', 'plotter'], nums=None): + cls, data, ignore_keys=["attrs", "plotter"], nums=None + ): """Recursive method to get all the file names or datasets out of a dictionary `data` created with the :meth`array_info` method""" - ds_description = {'ds', 'fname', 'num', 'arr', 'store'} - if 'ds' in data: + ds_description = {"ds", "fname", "num", "arr", "store"} + if "ds" in data: # make sure that the data set has a number assigned to it - data['ds'].psy.num + data["ds"].psy.num keys_in_data = ds_description.intersection(data) if keys_in_data: return {key: data[key] for key in keys_in_data} for key in ignore_keys: data.pop(key, None) - func = partial(cls._get_ds_descriptions_unsorted, - ignore_keys=ignore_keys, nums=nums) - return chain(*map(lambda d: [d] if isinstance(d, dict) else d, - map(func, six.itervalues(data)))) + func = partial( + cls._get_ds_descriptions_unsorted, + ignore_keys=ignore_keys, + nums=nums, + ) + return chain( + *map( + lambda d: [d] if isinstance(d, dict) else d, + map(func, six.itervalues(data)), + ) + ) @classmethod - @docstrings.get_sections(base='ArrayList.from_dict') + @docstrings.get_sections(base="ArrayList.from_dict") @docstrings.dedent - def from_dict(cls, d, alternative_paths={}, datasets=None, - pwd=None, ignore_keys=['attrs', 'plotter', 'ds'], - only=None, chname={}, **kwargs): + def from_dict( + cls, + d, + alternative_paths={}, + datasets=None, + pwd=None, + ignore_keys=["attrs", "plotter", "ds"], + only=None, + chname={}, + **kwargs, + ): """ Create a list from the dictionary returned by :meth:`array_info` @@ -3960,7 +4402,7 @@ def from_dict(cls, d, alternative_paths={}, datasets=None, working directory. If `alternative_paths` is a list (or any other iterable) is provided, the file names will be replaced as they appear in `d` - (note that this is very unsafe if `d` is not and OrderedDict) + (note that this is very unsafe if `d` is not and dict) datasets: dict or list or None A mapping from original filenames in `d` to the instances of :class:`xarray.Dataset` to use. If it is an iterable, the same @@ -4022,29 +4464,38 @@ def filter_func(arr_name: str, info: dict): -> bool See Also -------- from_dataset, array_info""" - pwd = pwd or getcwd() + pwd = pwd or os.getcwd() if only is None: + def only_filter(arr_name, info): return True + elif callable(only): only_filter = only elif isstring(only): + def only_filter(arr_name, info): return patt.search(arr_name) is not None + patt = re.compile(only) only = None else: + def only_filter(arr_name, info): return arr_name in save_only + save_only = only only = None def get_fname_use(fname): squeeze = isstring(fname) fname = safe_list(fname) - ret = tuple(f if utils.is_remote_url(f) or osp.isabs(f) else - osp.join(pwd, f) - for f in fname) + ret = tuple( + f + if utils.is_remote_url(f) or osp.isabs(f) + else osp.join(pwd, f) + for f in fname + ) return ret[0] if squeeze else ret def get_name(name): @@ -4058,23 +4509,29 @@ def get_name(name): alternative_paths = defaultdict(partial(next, it, None)) # first open all datasets if not already done if datasets is None: - replace_concat_dim = 'concat_dim' not in kwargs - replace_combine = 'combine' not in kwargs + replace_concat_dim = "concat_dim" not in kwargs + replace_combine = "combine" not in kwargs names_and_stores = cls._get_dsnames( - d, concat_dim=True, combine=True) + d, concat_dim=True, combine=True + ) datasets = {} - for fname, (store_mod, store_cls), concat_dim, combine in names_and_stores: + for ( + fname, + (store_mod, store_cls), + concat_dim, + combine, + ) in names_and_stores: fname_use = fname got = True if replace_concat_dim and concat_dim is not None: - kwargs['concat_dim'] = concat_dim + kwargs["concat_dim"] = concat_dim elif replace_concat_dim and concat_dim is None: - kwargs.pop('concat_dim', None) + kwargs.pop("concat_dim", None) if replace_combine and combine is not None: - kwargs['combine'] = combine + kwargs["combine"] = combine elif replace_combine and combine is None: - kwargs.pop('combine', None) + kwargs.pop("combine", None) try: fname_use = alternative_paths[fname] except KeyError: @@ -4084,7 +4541,8 @@ def get_name(name): fname_use = get_fname_use(fname) if fname_use is not None: datasets[fname] = _open_ds_from_store( - fname_use, store_mod, store_cls, **kwargs) + fname_use, store_mod, store_cls, **kwargs + ) if alternative_paths is not None: for fname in set(alternative_paths).difference(datasets): datasets[fname] = _open_ds_from_store(fname, **kwargs) @@ -4097,27 +4555,34 @@ def get_name(name): if arr_name in ignore_keys or not only_filter(arr_name, info): arrays.pop(i) continue - if not {'fname', 'ds', 'arr'}.intersection(info): + if not {"fname", "ds", "arr"}.intersection(info): # the described object is an InteractiveList arr = InteractiveList.from_dict( - info, alternative_paths=alternative_paths, - datasets=datasets, chname=chname) + info, + alternative_paths=alternative_paths, + datasets=datasets, + chname=chname, + ) if not arr: warn("Skipping empty list %s!" % arr_name) arrays.pop(i) continue else: - if 'arr' in info: - arr = info.pop('arr') - elif 'ds' in info: + if "arr" in info: + arr = info.pop("arr") + elif "ds" in info: arr = cls.from_dataset( - info['ds'], dims=info['dims'], - name=get_name(info['name']))[0] + info["ds"], + dims=info["dims"], + name=get_name(info["name"]), + )[0] else: - fname = info['fname'] + fname = info["fname"] if fname is None: - warn("Could not open array %s because no filename was " - "specified!" % arr_name) + warn( + "Could not open array %s because no filename was " + "specified!" % arr_name + ) arrays.pop(i) continue try: # in case, datasets is a defaultdict @@ -4125,28 +4590,42 @@ def get_name(name): except KeyError: pass if fname not in datasets: - warn("Could not open array %s because %s was not in " - "the list of datasets!" % (arr_name, fname)) + warn( + "Could not open array %s because %s was not in " + "the list of datasets!" % (arr_name, fname) + ) arrays.pop(i) continue arr = cls.from_dataset( - datasets[fname], dims=info['dims'], - name=get_name(info['name']))[0] - for key, val in six.iteritems(info.get('attrs', {})): + datasets[fname], + dims=info["dims"], + name=get_name(info["name"]), + )[0] + for key, val in six.iteritems(info.get("attrs", {})): arr.attrs.setdefault(key, val) arr.psy.arr_name = arr_name arrays[i] = arr i += 1 - return cls(arrays, attrs=d.get('attrs', {})) + return cls(arrays, attrs=d.get("attrs", {})) - docstrings.delete_params('get_filename_ds.parameters', 'ds', 'dump') + docstrings.delete_params("get_filename_ds.parameters", "ds", "dump") - @docstrings.get_sections(base='ArrayList.array_info') + @docstrings.get_sections(base="ArrayList.array_info") @docstrings.dedent - def array_info(self, dump=None, paths=None, attrs=True, - standardize_dims=True, pwd=None, use_rel_paths=True, - alternative_paths={}, ds_description={'fname', 'store'}, - full_ds=True, copy=False, **kwargs): + def array_info( + self, + dump=None, + paths=None, + attrs=True, + standardize_dims=True, + pwd=None, + use_rel_paths=True, + alternative_paths={}, + ds_description={"fname", "store"}, + full_ds=True, + copy=False, + **kwargs, + ): """ Get dimension informations on you arrays @@ -4204,20 +4683,26 @@ def array_info(self, dump=None, paths=None, attrs=True, Returns ------- - OrderedDict + dict An ordered mapping from array names to dimensions and filename corresponding to the array See Also -------- from_dict""" - saved_ds = kwargs.pop('_saved_ds', {}) + saved_ds = kwargs.pop("_saved_ds", {}) def get_alternative(f): - return next(filter(lambda t: osp.samefile(f, t[0]), - six.iteritems(alternative_paths)), [False, f]) + return next( + filter( + lambda t: osp.samefile(f, t[0]), + six.iteritems(alternative_paths), + ), + [False, f], + ) if copy: + def copy_obj(obj): # try to get the number of the dataset and create only one copy # copy for each dataset @@ -4232,12 +4717,15 @@ def copy_obj(obj): saved_ds[num] = obj.psy.copy(True) return saved_ds[num] return obj.psy.copy(True) + else: + def copy_obj(obj): return obj - ret = OrderedDict() - if ds_description == 'all': - ds_description = {'fname', 'ds', 'num', 'arr', 'store'} + + ret = dict() + if ds_description == "all": + ds_description = {"fname", "ds", "num", "arr", "store"} if paths is not None: if dump is None: dump = True @@ -4245,64 +4733,74 @@ def copy_obj(obj): elif dump is None: dump = False if pwd is None: - pwd = getcwd() + pwd = os.getcwd() for arr in self: if isinstance(arr, InteractiveList): ret[arr.arr_name] = arr.array_info( - dump, paths, pwd=pwd, attrs=attrs, + dump, + paths, + pwd=pwd, + attrs=attrs, standardize_dims=standardize_dims, - use_rel_paths=use_rel_paths, ds_description=ds_description, - alternative_paths=alternative_paths, copy=copy, - _saved_ds=saved_ds, **kwargs) + use_rel_paths=use_rel_paths, + ds_description=ds_description, + alternative_paths=alternative_paths, + copy=copy, + _saved_ds=saved_ds, + **kwargs, + ) else: if standardize_dims: idims = arr.psy.decoder.standardize_dims( - next(arr.psy.iter_base_variables), arr.psy.idims) + next(arr.psy.iter_base_variables), arr.psy.idims + ) else: idims = arr.psy.idims - ret[arr.psy.arr_name] = d = {'dims': idims} - if 'variable' in arr.coords: - d['name'] = [list(arr.coords['variable'].values)] + ret[arr.psy.arr_name] = d = {"dims": idims} + if "variable" in arr.coords: + d["name"] = [list(arr.coords["variable"].values)] else: - d['name'] = arr.name - if 'fname' in ds_description or 'store' in ds_description: + d["name"] = arr.name + if "fname" in ds_description or "store" in ds_description: fname, store_mod, store_cls = get_filename_ds( - arr.psy.base, dump=dump, paths=paths, **kwargs) - if 'store' in ds_description: - d['store'] = (store_mod, store_cls) - if 'fname' in ds_description: - d['fname'] = [] + arr.psy.base, dump=dump, paths=paths, **kwargs + ) + if "store" in ds_description: + d["store"] = (store_mod, store_cls) + if "fname" in ds_description: + d["fname"] = [] for i, f in enumerate(safe_list(fname)): - if (f is None or utils.is_remote_url(f)): - d['fname'].append(f) + if f is None or utils.is_remote_url(f): + d["fname"].append(f) else: found, f = get_alternative(f) if use_rel_paths: f = osp.relpath(f, pwd) else: f = osp.abspath(f) - d['fname'].append(f) - if fname is None or isinstance(fname, - six.string_types): - d['fname'] = d['fname'][0] + d["fname"].append(f) + if fname is None or isinstance( + fname, six.string_types + ): + d["fname"] = d["fname"][0] else: - d['fname'] = tuple(safe_list(fname)) + d["fname"] = tuple(safe_list(fname)) if arr.psy.base.psy._concat_dim is not None: - d['concat_dim'] = arr.psy.base.psy._concat_dim + d["concat_dim"] = arr.psy.base.psy._concat_dim if arr.psy.base.psy._combine is not None: - d['combine'] = arr.psy.base.psy._combine - if 'ds' in ds_description: + d["combine"] = arr.psy.base.psy._combine + if "ds" in ds_description: if full_ds: - d['ds'] = copy_obj(arr.psy.base) + d["ds"] = copy_obj(arr.psy.base) else: - d['ds'] = copy_obj(arr.to_dataset()) - if 'num' in ds_description: - d['num'] = arr.psy.base.psy.num - if 'arr' in ds_description: - d['arr'] = copy_obj(arr) + d["ds"] = copy_obj(arr.to_dataset()) + if "num" in ds_description: + d["num"] = arr.psy.base.psy.num + if "arr" in ds_description: + d["arr"] = copy_obj(arr) if attrs: - d['attrs'] = arr.attrs - ret['attrs'] = self.attrs + d["attrs"] = arr.attrs + ret["attrs"] = self.attrs return ret def _get_tnames(self): @@ -4312,13 +4810,23 @@ def _get_tnames(self): if isinstance(arr, InteractiveList): tnames.update(arr.get_tnames()) else: - tnames.add(arr.psy.decoder.get_tname( - next(arr.psy.iter_base_variables), arr.coords)) + tnames.add( + arr.psy.decoder.get_tname( + next(arr.psy.iter_base_variables), arr.coords + ) + ) return tnames - {None} @docstrings.dedent - def _register_update(self, method='isel', replot=False, dims={}, fmt={}, - force=False, todefault=False): + def _register_update( + self, + method="isel", + replot=False, + dims={}, + fmt={}, + force=False, + todefault=False, + ): """ Register new dimensions and formatoptions for updating. The keywords are the same as for each single array @@ -4328,10 +4836,16 @@ def _register_update(self, method='isel', replot=False, dims={}, fmt={}, %(InteractiveArray._register_update.parameters)s""" for arr in self: - arr.psy._register_update(method=method, replot=replot, dims=dims, - fmt=fmt, force=force, todefault=todefault) - - @docstrings.get_sections(base='ArrayList.start_update') + arr.psy._register_update( + method=method, + replot=replot, + dims=dims, + fmt=fmt, + force=force, + todefault=todefault, + ) + + @docstrings.get_sections(base="ArrayList.start_update") @dedent def start_update(self, draw=None): """ @@ -4352,16 +4866,22 @@ def start_update(self, draw=None): See Also -------- :attr:`no_auto_update`, update""" + def worker(arr): results[arr.psy.arr_name] = arr.psy.start_update( - draw=False, queues=queues) + draw=False, queues=queues + ) + if len(self) == 0: return results = {} - threads = [Thread(target=worker, args=(arr,), - name='update_%s' % arr.psy.arr_name) - for arr in self] + threads = [ + Thread( + target=worker, args=(arr,), name="update_%s" % arr.psy.arr_name + ) + for arr in self + ] jobs = [arr.psy._njobs for arr in self] queues = [Queue() for _ in range(max(map(len, jobs)))] # populate the queues @@ -4376,21 +4896,33 @@ def worker(arr): for thread in threads: thread.join() if draw is None: - draw = rcParams['auto_draw'] + draw = rcParams["auto_draw"] if draw: - self(arr_name=[name for name, adraw in six.iteritems(results) - if adraw]).draw() - if rcParams['auto_show']: + self( + arr_name=[ + name for name, adraw in six.iteritems(results) if adraw + ] + ).draw() + if rcParams["auto_show"]: self.show() - docstrings.keep_params('InteractiveArray.update.parameters', - 'auto_update') + docstrings.keep_params("InteractiveArray.update.parameters", "auto_update") - @docstrings.get_sections(base='ArrayList.update') + @docstrings.get_sections(base="ArrayList.update") @docstrings.dedent - def update(self, method='isel', dims={}, fmt={}, replot=False, - auto_update=False, draw=None, force=False, todefault=False, - enable_post=None, **kwargs): + def update( + self, + method="isel", + dims={}, + fmt={}, + replot=False, + auto_update=False, + draw=None, + force=False, + todefault=False, + enable_post=None, + **kwargs, + ): """ Update the coordinates and the plot @@ -4418,14 +4950,21 @@ def update(self, method='isel', dims={}, fmt={}, replot=False, no_auto_update, start_update""" dims = dict(dims) fmt = dict(fmt) - vars_and_coords = set(chain( - self.dims, self.coords, ['name', 'x', 'y', 'z', 't'])) + vars_and_coords = set( + chain(self.dims, self.coords, ["name", "x", "y", "z", "t"]) + ) furtherdims, furtherfmt = utils.sort_kwargs(kwargs, vars_and_coords) dims.update(furtherdims) fmt.update(furtherfmt) - self._register_update(method=method, replot=replot, dims=dims, fmt=fmt, - force=force, todefault=todefault) + self._register_update( + method=method, + replot=replot, + dims=dims, + fmt=fmt, + force=force, + todefault=todefault, + ) if enable_post is not None: for arr in self.with_plotter: arr.psy.plotter.enable_post = enable_post @@ -4434,8 +4973,11 @@ def update(self, method='isel', dims={}, fmt={}, replot=False, def draw(self): """Draws all the figures in this instance""" - for fig in set(chain(*map( - lambda arr: arr.psy.plotter.figs2draw, self.with_plotter))): + for fig in set( + chain( + *map(lambda arr: arr.psy.plotter.figs2draw, self.with_plotter) + ) + ): self.logger.debug("Drawing figure %s", fig.number) fig.canvas.draw() for arr in self: @@ -4443,7 +4985,7 @@ def draw(self): arr.psy.plotter._figs2draw.clear() self.logger.debug("Done drawing.") - def __call__(self, types=None, method='isel', fmts=[], **attrs): + def __call__(self, types=None, method="isel", fmts=[], **attrs): """Get the arrays specified by their attributes Parameters @@ -4468,34 +5010,45 @@ def __call__(self, types=None, method='isel', fmts=[], **attrs): Values may be iterables (e.g. lists) of the attributes to consider or callable functions that accept the attribute as a value. If the value is a string, it will be put into a list.""" + def safe_item_list(key, val): return key, val if callable(val) else safe_list(val) def filter_list(arr): other_attrs = attrs.copy() - arr_names = other_attrs.pop('arr_name', None) - return ((arr_names is None or ( - arr_names(arr.psy.arr_name) if callable(arr_names) - else arr.psy.arr_name in arr_names)) and - len(arr) == len(arr(types=types, method=method, - **other_attrs))) + arr_names = other_attrs.pop("arr_name", None) + return ( + arr_names is None + or ( + arr_names(arr.psy.arr_name) + if callable(arr_names) + else arr.psy.arr_name in arr_names + ) + ) and len(arr) == len( + arr(types=types, method=method, **other_attrs) + ) + if not attrs: + def filter_by_attrs(arr): return True - elif method == 'sel': + + elif method == "sel": + def filter_by_attrs(arr): if isinstance(arr, InteractiveList): return filter_list(arr) tname = arr.psy.decoder.get_tname( - next(six.itervalues(arr.psy.base_variables))) + next(six.itervalues(arr.psy.base_variables)) + ) def check_values(arr, key, vals): - if key == 'arr_name': + if key == "arr_name": attr = arr.psy.arr_name - elif key == 'ax': + elif key == "ax": attr = arr.psy.ax - elif key == 'fig': - attr = getattr(arr.psy.ax, 'figure', None) + elif key == "fig": + attr = getattr(arr.psy.ax, "figure", None) else: try: attr = getattr(arr, key) @@ -4503,8 +5056,7 @@ def check_values(arr, key, vals): return False if np.ndim(attr): # do not filter for multiple items return False - if hasattr(arr.psy, 'decoder') and ( - arr.name == tname): + if hasattr(arr.psy, "decoder") and (arr.name == tname): try: vals = np.asarray(vals, dtype=np.datetime64) except ValueError: @@ -4513,20 +5065,28 @@ def check_values(arr, key, vals): return attr.values.astype(vals.dtype) in vals if callable(vals): return vals(attr) - return getattr(attr, 'values', attr) in vals + return getattr(attr, "values", attr) in vals + return all( check_values(arr, key, val) for key, val in six.iteritems( - arr.psy.decoder.correct_dims(next(six.itervalues( - arr.psy.base_variables)), attrs, remove=False))) + arr.psy.decoder.correct_dims( + next(six.itervalues(arr.psy.base_variables)), + attrs, + remove=False, + ) + ) + ) + else: + def check_values(arr, key, vals): - if key == 'arr_name': + if key == "arr_name": attr = arr.psy.arr_name - elif key == 'ax': + elif key == "ax": attr = arr.psy.ax - elif key == 'fig': - attr = getattr(arr.psy.ax, 'figure', None) + elif key == "fig": + attr = getattr(arr.psy.ax, "figure", None) elif key in arr.coords: attr = arr.psy.idims[key] else: @@ -4546,23 +5106,37 @@ def filter_by_attrs(arr): return all( check_values(arr, key, val) for key, val in six.iteritems( - arr.psy.decoder.correct_dims(next(six.itervalues( - arr.psy.base_variables)), attrs, remove=False))) + arr.psy.decoder.correct_dims( + next(six.itervalues(arr.psy.base_variables)), + attrs, + remove=False, + ) + ) + ) + attrs = dict(starmap(safe_item_list, six.iteritems(attrs))) ret = self.__class__( # iterable - (arr for arr in self if - (types is None or isinstance(arr.psy.plotter, types)) and - filter_by_attrs(arr)), + ( + arr + for arr in self + if (types is None or isinstance(arr.psy.plotter, types)) + and filter_by_attrs(arr) + ), # give itself as base and the auto_update parameter - auto_update=bool(self.auto_update)) + auto_update=bool(self.auto_update), + ) # now filter for the formatoptions if fmts: fmts = set(safe_list(fmts)) ret = self.__class__( - filter(lambda arr: (arr.psy.plotter and - fmts <= set(arr.psy.plotter)), - ret)) + filter( + lambda arr: ( + arr.psy.plotter and fmts <= set(arr.psy.plotter) + ), + ret, + ) + ) return ret def __contains__(self, val): @@ -4572,16 +5146,18 @@ def __contains__(self, val): return False else: return name in self.arr_names and ( - isstring(val) or self._contains_array(val)) + isstring(val) or self._contains_array(val) + ) def _contains_array(self, val): """Checks whether exactly this array is in the list""" arr = self(arr_name=val.psy.arr_name)[0] is_not_list = any( - map(lambda a: not isinstance(a, InteractiveList), - [arr, val])) - is_list = any(map(lambda a: isinstance(a, InteractiveList), - [arr, val])) + map(lambda a: not isinstance(a, InteractiveList), [arr, val]) + ) + is_list = any( + map(lambda a: isinstance(a, InteractiveList), [arr, val]) + ) # if one is an InteractiveList and the other not, they differ if is_list and is_not_list: return False @@ -4594,19 +5170,22 @@ def _contains_array(self, val): def _short_info(self, intend=0, maybe=False): if maybe: intend = 0 - str_intend = ' ' * intend + str_intend = " " * intend if len(self) == 1: return str_intend + "%s%s.%s([%s])" % ( - '' if not hasattr(self, 'arr_name') else self.arr_name + ': ', - self.__class__.__module__, self.__class__.__name__, - self[0].psy._short_info(intend+4, maybe=True)) + "" if not hasattr(self, "arr_name") else self.arr_name + ": ", + self.__class__.__module__, + self.__class__.__name__, + self[0].psy._short_info(intend + 4, maybe=True), + ) return str_intend + "%s%s.%s([\n%s])" % ( - '' if not hasattr(self, 'arr_name') else self.arr_name + ': ', - self.__class__.__module__, self.__class__.__name__, + "" if not hasattr(self, "arr_name") else self.arr_name + ": ", + self.__class__.__module__, + self.__class__.__name__, ",\n".join( - '%s' % ( - arr.psy._short_info(intend+4)) - for arr in self)) + "%s" % (arr.psy._short_info(intend + 4)) for arr in self + ), + ) def __str__(self): return self._short_info() @@ -4618,16 +5197,16 @@ def __getitem__(self, key): """Overwrites lists __getitem__ by returning an ArrayList if `key` is a slice""" if isinstance(key, slice): # return a new ArrayList - return self.__class__( - super(ArrayList, self).__getitem__(key)) + return self.__class__(super(ArrayList, self).__getitem__(key)) else: # return the item return super(ArrayList, self).__getitem__(key) if six.PY2: # for compatibility to python 2.7 + def __getslice__(self, *args): return self[slice(*args)] - def next_available_name(self, fmt_str='arr{0}', counter=None): + def next_available_name(self, fmt_str="arr{0}", counter=None): """Create a new array out of the given format string Parameters @@ -4646,11 +5225,10 @@ def next_available_name(self, fmt_str='arr{0}', counter=None): counter = counter or iter(range(1000)) try: new_name = next( - filter(lambda n: n not in names, - map(fmt_str.format, counter))) + filter(lambda n: n not in names, map(fmt_str.format, counter)) + ) except StopIteration: - raise ValueError( - "{0} already in the list".format(fmt_str)) + raise ValueError("{0} already in the list".format(fmt_str)) return new_name @docstrings.dedent @@ -4694,9 +5272,13 @@ def extend(self, iterable, new_name=False): -------- list.extend, append, rename""" # extend those arrays that aren't alredy in the list - super(ArrayList, self).extend(t[0] for t in filter( - lambda t: t[1] is not None, ( - self.rename(arr, new_name) for arr in iterable))) + super(ArrayList, self).extend( + t[0] + for t in filter( + lambda t: t[1] is not None, + (self.rename(arr, new_name) for arr in iterable), + ) + ) def remove(self, arr): """Removes an array from the list @@ -4712,17 +5294,15 @@ def remove(self, arr): If no array with the specified array name is in the list""" name = arr if isinstance(arr, six.string_types) else arr.psy.arr_name if arr not in self: - raise ValueError( - "Array {0} not in the list".format(name)) + raise ValueError("Array {0} not in the list".format(name)) for i, arr in enumerate(self): if arr.psy.arr_name == name: del self[i] return - raise ValueError( - "No array found with name {0}".format(name)) + raise ValueError("No array found with name {0}".format(name)) -@xr.register_dataset_accessor('psy') +@xr.register_dataset_accessor("psy") class DatasetAccessor(object): """A dataset accessor to interface with the psyplot package""" @@ -4770,6 +5350,7 @@ def plot(self): """ if self._plot is None: import psyplot.project as psy + self._plot = psy.DatasetPlotter(self.ds) return self._plot @@ -4791,7 +5372,7 @@ def data_store(self): dataset""" store_info = self._data_store if store_info is None or any(s is None for s in store_info): - store = getattr(self.ds, '_file_obj', None) + store = getattr(self.ds, "_file_obj", None) store_mod = store.__module__ if store is not None else None store_cls = store.__class__.__name__ if store is not None else None return store_mod, store_cls @@ -4839,13 +5420,14 @@ def __getitem__(self, key): return ret def __getattr__(self, attr): - if attr != 'ds' and attr in self.ds: + if attr != "ds" and attr in self.ds: ret = getattr(self.ds, attr) ret.psy.base = self.ds return ret else: - raise AttributeError("%s has not Attribute %s" % ( - self.__class__.__name__, attr)) + raise AttributeError( + "%s has not Attribute %s" % (self.__class__.__name__, attr) + ) def copy(self, deep=False): """Copy the array @@ -4864,8 +5446,9 @@ class InteractiveList(ArrayList, InteractiveBase): through :class:`psyplot.plotter.Plotter` classes. It is mainly used by the :mod:`psyplot.plotter.simple` module""" - no_auto_update = property(_no_auto_update_getter, - doc=_no_auto_update_getter.__doc__) + no_auto_update = property( + _no_auto_update_getter, doc=_no_auto_update_getter.__doc__ + ) @no_auto_update.setter def no_auto_update(self, value): @@ -4887,7 +5470,7 @@ def psy(self): logger = InteractiveBase.logger - docstrings.delete_params('InteractiveBase.parameters', 'auto_update') + docstrings.delete_params("InteractiveBase.parameters", "auto_update") @docstrings.dedent def __init__(self, *args, **kwargs): @@ -4897,15 +5480,23 @@ def __init__(self, *args, **kwargs): %(ArrayList.parameters)s %(InteractiveBase.parameters.no_auto_update)s""" ibase_kwargs, array_kwargs = utils.sort_kwargs( - kwargs, ['plotter', 'arr_name']) + kwargs, ["plotter", "arr_name"] + ) self._registered_updates = {} InteractiveBase.__init__(self, **ibase_kwargs) with self.block_signals: ArrayList.__init__(self, *args, **kwargs) @docstrings.dedent - def _register_update(self, method='isel', replot=False, dims={}, fmt={}, - force=False, todefault=False): + def _register_update( + self, + method="isel", + replot=False, + dims={}, + fmt={}, + force=False, + todefault=False, + ): """ Register new dimensions and formatoptions for updating @@ -4913,9 +5504,13 @@ def _register_update(self, method='isel', replot=False, dims={}, fmt={}, ---------- %(InteractiveArray._register_update.parameters)s""" ArrayList._register_update(self, method=method, dims=dims) - InteractiveBase._register_update(self, fmt=fmt, todefault=todefault, - replot=bool(dims) or replot, - force=force) + InteractiveBase._register_update( + self, + fmt=fmt, + todefault=todefault, + replot=bool(dims) or replot, + force=force, + ) @docstrings.dedent def start_update(self, draw=None, queues=None): @@ -4956,24 +5551,27 @@ def start_update(self, draw=None, queues=None): def to_dataframe(self): def to_df(arr): df = arr.to_pandas() - if hasattr(df, 'to_frame'): + if hasattr(df, "to_frame"): df = df.to_frame() if not keep_names: return df.rename(columns={df.keys()[0]: arr.psy.arr_name}) return df + if len(self) == 1: return self[0].to_series().to_frame() else: keep_names = len(set(arr.name for arr in self)) == self df = to_df(self[0]) for arr in self[1:]: - df = df.merge(to_df(arr), left_index=True, right_index=True, - how='outer') + df = df.merge( + to_df(arr), left_index=True, right_index=True, how="outer" + ) return df - docstrings.delete_params('ArrayList.from_dataset.parameters', 'plotter') - docstrings.delete_kwargs('ArrayList.from_dataset.other_parameters', - 'args', 'kwargs') + docstrings.delete_params("ArrayList.from_dataset.parameters", "plotter") + docstrings.delete_kwargs( + "ArrayList.from_dataset.other_parameters", "args", "kwargs" + ) @classmethod @docstrings.dedent @@ -5000,8 +5598,8 @@ def from_dataset(cls, *args, **kwargs): Returns ------- %(ArrayList.from_dataset.returns)s""" - plotter = kwargs.pop('plotter', None) - make_plot = kwargs.pop('make_plot', True) + plotter = kwargs.pop("plotter", None) + make_plot = kwargs.pop("make_plot", True) instance = super(InteractiveList, cls).from_dataset(*args, **kwargs) if plotter is not None: plotter.initialize_plot(instance, make_plot=make_plot) @@ -5025,6 +5623,7 @@ class _MissingModule(object): """Class that can be used if an optional module is not avaible. This class raises an error if any attribute is accessed or it is called""" + def __init__(self, error): """ Parameters @@ -5055,10 +5654,12 @@ def _open_ds_from_store(fname, store_mod=None, store_cls=None, **kwargs): store_mod = repeat(store_mod) if isstring(store_cls): store_cls = repeat(store_cls) - fname = [_open_store(sm, sc, f) - for sm, sc, f in zip(store_mod, store_cls, fname)] - kwargs['engine'] = None - kwargs['lock'] = False + fname = [ + _open_store(sm, sc, f) + for sm, sc, f in zip(store_mod, store_cls, fname) + ] + kwargs["engine"] = None + kwargs["lock"] = False return open_mfdataset(fname, **kwargs) else: # try guessing with open_dataset @@ -5076,21 +5677,24 @@ def decode(t): # round microseconds if rest.microseconds: rest += dt.timedelta(microseconds=1e6 - rest.microseconds) - return np.datetime64(dt.datetime.strptime( - "%i" % day, "%Y%m%d") + rest) + return np.datetime64(dt.datetime.strptime("%i" % day, "%Y%m%d") + rest) + return np.vectorize(decode, [np.datetime64])(times) def encode_absolute_time(times): def encode(t): t = to_datetime(t) - return float(t.strftime('%Y%m%d')) + ( - t - dt.datetime(t.year, t.month, t.day)).total_seconds() / 86400. + return ( + float(t.strftime("%Y%m%d")) + + (t - dt.datetime(t.year, t.month, t.day)).total_seconds() + / 86400.0 + ) + return np.vectorize(encode, [float])(times) class AbsoluteTimeDecoder(NDArrayMixin): - def __init__(self, array): self.array = array example_value = first_n_items(array, 1) or 0 @@ -5100,7 +5704,7 @@ def __init__(self, array): logger.error("Could not interprete absolute time values!") raise else: - self._dtype = getattr(result, 'dtype', np.dtype('object')) + self._dtype = getattr(result, "dtype", np.dtype("object")) @property def dtype(self): @@ -5111,7 +5715,6 @@ def __getitem__(self, key): class AbsoluteTimeEncoder(NDArrayMixin): - def __init__(self, array): self.array = array example_value = first_n_items(array, 1) or 0 @@ -5121,7 +5724,7 @@ def __init__(self, array): logger.error("Could not interprete absolute time values!") raise else: - self._dtype = getattr(result, 'dtype', np.dtype('object')) + self._dtype = getattr(result, "dtype", np.dtype("object")) @property def dtype(self): diff --git a/psyplot/docstring.py b/psyplot/docstring.py index 54c5236..d1e6673 100755 --- a/psyplot/docstring.py +++ b/psyplot/docstring.py @@ -5,33 +5,18 @@ .. _docrep: http://docrep.readthedocs.io/en/latest/ """ -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only +import inspect import types + import six -import inspect -from docrep import DocstringProcessor, safe_modulo +from docrep import DocstringProcessor, safe_modulo # noqa: F401 def dedent(func): @@ -50,24 +35,28 @@ def dedent(func): def indent(text, num=4): """Indet the given string""" - str_indent = ' ' * num - return str_indent + ('\n' + str_indent).join(text.splitlines()) + str_indent = " " * num + return str_indent + ("\n" + str_indent).join(text.splitlines()) def append_original_doc(parent, num=0): """Return an iterator that append the docstring of the given `parent` function to the applied function""" + def func(func): func.__doc__ = func.__doc__ and func.__doc__ + indent( - parent.__doc__, num) + parent.__doc__, num + ) return func + return func _docstrings = DocstringProcessor() -_docstrings.get_sections(base='DocstringProcessor.get_sections')( - dedent(DocstringProcessor.get_sections)) +_docstrings.get_sections(base="DocstringProcessor.get_sections")( + dedent(DocstringProcessor.get_sections) +) class PsyplotDocstringProcessor(DocstringProcessor): @@ -76,11 +65,16 @@ class PsyplotDocstringProcessor(DocstringProcessor): """ param_like_sections = DocstringProcessor.param_like_sections + [ - 'Possible types'] + "Possible types" + ] @_docstrings.dedent - def get_sections(self, s=None, base=None, sections=[ - 'Parameters', 'Other Parameters', 'Possible types']): + def get_sections( + self, + s=None, + base=None, + sections=["Parameters", "Other Parameters", "Possible types"], + ): """ Extract the specified sections out of the given string @@ -96,8 +90,10 @@ def get_sections(self, s=None, base=None, sections=[ str The replaced string """ - return super(PsyplotDocstringProcessor, self).get_sections(s, base, - sections) + return super(PsyplotDocstringProcessor, self).get_sections( + s, base, sections + ) + del _docstrings diff --git a/psyplot/gdal_store.py b/psyplot/gdal_store.py index fca1b48..0c700ab 100755 --- a/psyplot/gdal_store.py +++ b/psyplot/gdal_store.py @@ -11,48 +11,33 @@ >>> from psyplot.gdal_store import GdalStore >>> from xarray import open_dataset - >>> ds = open_dataset(GdalStore('my_tiff')) + >>> ds = open_dataset(GdalStore("my_tiff")) Or you use the `engine` of the :func:`psyplot.open_dataset` function: - >>> ds = open_dataset('my_tiff.tiff', engine='gdal') + >>> ds = open_dataset("my_tiff.tiff", engine="gdal") """ -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only + import six -from numpy import arange, nan, dtype +from numpy import arange, dtype, nan from xarray import Variable -from collections import OrderedDict +from xarray.backends.common import AbstractDataStore + +import psyplot.data as psyd +from psyplot.warning import warn + try: from xarray.core.utils import FrozenOrderedDict except ImportError: FrozenOrderedDict = dict -from xarray.backends.common import AbstractDataStore -from psyplot.compat.pycompat import range -from psyplot.warning import warn -import psyplot.data as psyd try: import gdal from osgeo import gdal_array @@ -60,6 +45,7 @@ gdal = psyd._MissingModule(e) try: from dask.array import Array + with_dask = True except ImportError: with_dask = False @@ -71,7 +57,7 @@ class GdalStore(AbstractDataStore): We recommend to use the :func:`psyplot.open_dataset` function to open a geotiff file:: - >>> ds = psyplot.open_dataset('my_geotiff.tiff', engine='gdal') + >>> ds = psyplot.open_dataset("my_geotiff.tiff", engine="gdal") Notes ----- @@ -110,60 +96,72 @@ def load(band): except ValueError: pass return a + ds = self.ds - dims = ['lat', 'lon'] + dims = ["lat", "lon"] chunks = ((ds.RasterYSize,), (ds.RasterXSize,)) shape = (ds.RasterYSize, ds.RasterXSize) - variables = OrderedDict() - for iband in range(1, ds.RasterCount+1): + variables = dict() + for iband in range(1, ds.RasterCount + 1): band = ds.GetRasterBand(iband) dt = dtype(gdal_array.codes[band.DataType]) if with_dask: - dsk = {('x', 0, 0): (load, iband)} - arr = Array(dsk, 'x', chunks, shape=shape, dtype=dt) + dsk = {("x", 0, 0): (load, iband)} + arr = Array(dsk, "x", chunks, shape=shape, dtype=dt) else: arr = load(iband) attrs = band.GetMetadata_Dict() try: dt.type(nan) - attrs['_FillValue'] = nan + attrs["_FillValue"] = nan except ValueError: no_data = band.GetNoDataValue() - attrs.update({'_FillValue': no_data} if no_data else {}) - variables['Band%i' % iband] = Variable(dims, arr, attrs) - variables['lat'], variables['lon'] = self._load_GeoTransform() + attrs.update({"_FillValue": no_data} if no_data else {}) + variables["Band%i" % iband] = Variable(dims, arr, attrs) + variables["lat"], variables["lon"] = self._load_GeoTransform() return FrozenOrderedDict(variables) def _load_GeoTransform(self): """Calculate latitude and longitude variable calculated from the gdal.Open.GetGeoTransform method""" + def load_lon(): - return arange(ds.RasterXSize)*b[1]+b[0] + return arange(ds.RasterXSize) * b[1] + b[0] def load_lat(): - return arange(ds.RasterYSize)*b[5]+b[3] + return arange(ds.RasterYSize) * b[5] + b[3] + ds = self.ds b = self.ds.GetGeoTransform() # bbox, interval if with_dask: lat = Array( - {('lat', 0): (load_lat,)}, 'lat', (self.ds.RasterYSize,), - shape=(self.ds.RasterYSize,), dtype=float) + {("lat", 0): (load_lat,)}, + "lat", + (self.ds.RasterYSize,), + shape=(self.ds.RasterYSize,), + dtype=float, + ) lon = Array( - {('lon', 0): (load_lon,)}, 'lon', (self.ds.RasterXSize,), - shape=(self.ds.RasterXSize,), dtype=float) + {("lon", 0): (load_lon,)}, + "lon", + (self.ds.RasterXSize,), + shape=(self.ds.RasterXSize,), + dtype=float, + ) else: lat = load_lat() lon = load_lon() - return Variable(('lat',), lat), Variable(('lon',), lon) + return Variable(("lat",), lat), Variable(("lon",), lon) def get_attrs(self): from osr import SpatialReference + attrs = self.ds.GetMetadata() try: sp = SpatialReference(wkt=self.ds.GetProjection()) proj4 = sp.ExportToProj4() - except: - warn('Could not identify projection') + except Exception: + warn("Could not identify projection") else: - attrs['proj4'] = proj4 + attrs["proj4"] = proj4 return FrozenOrderedDict(attrs) diff --git a/psyplot/plotter.py b/psyplot/plotter.py index 620c6de..ddd50b6 100755 --- a/psyplot/plotter.py +++ b/psyplot/plotter.py @@ -5,49 +5,38 @@ :class:`Plotter` combines a set of formatoption keys where each formatoption key is represented by a :class:`Formatoption` subclass.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only -import six +import logging import weakref from abc import ABCMeta, abstractmethod -from textwrap import TextWrapper -import logging -from itertools import chain, groupby, tee, repeat, starmap from collections import defaultdict -from threading import RLock from datetime import datetime, timedelta -from numpy import datetime64, timedelta64, ndarray, inf -from xarray.core.formatting import format_timestamp, format_timedelta +from itertools import chain, groupby, repeat, starmap, tee +from textwrap import TextWrapper +from threading import RLock + +import six +from numpy import datetime64, inf, ndarray, timedelta64 +from xarray.core.formatting import format_timedelta, format_timestamp + from psyplot import rcParams -from psyplot.warning import warn, critical, PsyPlotRuntimeWarning -from psyplot.compat.pycompat import map, filter, zip, range from psyplot.config.rcsetup import SubDict -from psyplot.docstring import docstrings, dedent -from psyplot.data import ( - InteractiveList, _no_auto_update_getter, CFDecoder) -from psyplot.utils import (DefaultOrderedDict, _TempBool, _temp_bool_prop, - unique_everseen, check_key) +from psyplot.data import CFDecoder, InteractiveList, _no_auto_update_getter +from psyplot.docstring import dedent, docstrings +from psyplot.utils import ( + Defaultdict, + _temp_bool_prop, + _TempBool, + check_key, + unique_everseen, +) +from psyplot.warning import PsyPlotRuntimeWarning, warn #: the default function to use when printing formatoption infos (the default is #: use print or in the gui, use the help explorer) @@ -56,18 +45,18 @@ #: :class:`dict`. Mapping from group to group names groups = { - 'data': 'Data manipulation formatoptions', - 'axes': 'Axes formatoptions', - 'labels': 'Label formatoptions', - 'plotting': 'Plot formatoptions', - 'post_processing': 'Post processing formatoptions', - 'colors': 'Color coding formatoptions', - 'misc': 'Miscallaneous formatoptions', - 'ticks': 'Axis tick formatoptions', - 'vector': 'Vector plot formatoptions', - 'masking': 'Masking formatoptions', - 'regression': 'Fitting formatoptions', - } + "data": "Data manipulation formatoptions", + "axes": "Axes formatoptions", + "labels": "Label formatoptions", + "plotting": "Plot formatoptions", + "post_processing": "Post processing formatoptions", + "colors": "Color coding formatoptions", + "misc": "Miscallaneous formatoptions", + "ticks": "Axis tick formatoptions", + "vector": "Vector plot formatoptions", + "masking": "Masking formatoptions", + "regression": "Fitting formatoptions", +} def _identity(*args): @@ -131,7 +120,8 @@ def get_x(self): return getattr(self.plotter, self._child_mapping[childname]) return property( - get_x, doc=childname + " Formatoption instance in the plotter") + get_x, doc=childname + " Formatoption instance in the plotter" + ) class FormatoptionMeta(ABCMeta): @@ -141,15 +131,21 @@ class FormatoptionMeta(ABCMeta): efficient docstring generation by using the :attr:`psyplot.docstring.docstrings` when creating a new formatoption class""" + def __new__(cls, clsname, bases, dct): """Assign an automatic documentation to the formatoption""" - doc = dct.get('__doc__') + doc = dct.get("__doc__") if doc is not None: - dct['__doc__'] = docstrings.dedent(doc) - new_cls = super(FormatoptionMeta, cls).__new__(cls, clsname, bases, - dct) - for childname in chain(new_cls.children, new_cls.dependencies, - new_cls.connections, new_cls.parents): + dct["__doc__"] = docstrings.dedent(doc) + new_cls = super(FormatoptionMeta, cls).__new__( + cls, clsname, bases, dct + ) + for childname in chain( + new_cls.children, + new_cls.dependencies, + new_cls.connections, + new_cls.parents, + ): setattr(new_cls, childname, _child_property(childname)) if new_cls.plot_fmt: new_cls.data_dependent = True @@ -230,7 +226,7 @@ def plotter(self, value): #: :class:`str`. Key of the group name in :data:`groups` of this #: formatoption keyword - group = 'misc' + group = "misc" #: :class:`bool` or a callable. This attribute indicates whether this #: :class:`Formatoption` depends on the data and should be updated if the @@ -297,15 +293,18 @@ def groupname(self): try: return groups[self.group] except KeyError: - warn("Unknown formatoption group " + str(self.group), - PsyPlotRuntimeWarning) + warn( + "Unknown formatoption group " + str(self.group), + PsyPlotRuntimeWarning, + ) return self.group @property def raw_data(self): """The original data of the plotter of this formatoption""" if self.index_in_list is not None and isinstance( - self.plotter.data, InteractiveList): + self.plotter.data, InteractiveList + ): return self.plotter.data[self.index_in_list] else: return self.plotter.data @@ -317,7 +316,8 @@ def decoder(self): # If the decoder is modified by one of the formatoptions, use this one if self.plotter.plot_data_decoder is not None: if self.index_in_list is not None and isinstance( - self.plotter.plot_data, InteractiveList): + self.plotter.plot_data, InteractiveList + ): ret = self.plotter.plot_data_decoder[self.index_in_list] if ret is not None: return ret @@ -346,7 +346,8 @@ def any_decoder(self): def data(self): """The data that is plotted""" if self.index_in_list is not None and isinstance( - self.plotter.plot_data, InteractiveList): + self.plotter.plot_data, InteractiveList + ): return self.plotter.plot_data[self.index_in_list] else: return self.plotter.plot_data @@ -380,9 +381,13 @@ def validate(self): try: self._validate = self.plotter.get_vfunc(self.key) except KeyError: - warn("Could not find a validation function for %s " - "formatoption keyword! No validation will be made!" % ( - self.key), PsyPlotRuntimeWarning, logger=self.logger) + warn( + "Could not find a validation function for %s " + "formatoption keyword! No validation will be made!" + % (self.key), + PsyPlotRuntimeWarning, + logger=self.logger, + ) self._validate = _identity return self._validate @@ -440,11 +445,17 @@ def value2pickle(self): """ return self.value - @docstrings.get_sections(base='Formatoption') + @docstrings.get_sections(base="Formatoption") @dedent - def __init__(self, key, plotter=None, index_in_list=None, - additional_children=[], additional_dependencies=[], - **kwargs): + def __init__( + self, + key, + plotter=None, + index_in_list=None, + additional_children=[], + additional_dependencies=[], + **kwargs, + ): """ Parameters ---------- @@ -476,24 +487,38 @@ def __init__(self, key, plotter=None, index_in_list=None, self.additional_dependencies = additional_dependencies self.children = self.children + additional_children self.dependencies = self.dependencies + additional_dependencies - self._child_mapping = dict(zip(*tee(chain( - self.children, self.dependencies, self.connections, - self.parents), 2))) + self._child_mapping = dict( + zip( + *tee( + chain( + self.children, + self.dependencies, + self.connections, + self.parents, + ), + 2, + ) + ) + ) # check kwargs for key in (key for key in kwargs if key not in self._child_mapping): raise TypeError( - '%s.__init__() got an unexpected keyword argument %r' % ( - self.__class__.__name__, key)) + "%s.__init__() got an unexpected keyword argument %r" + % (self.__class__.__name__, key) + ) # set up child mapping self._child_mapping.update(kwargs) # reset the dependency lists to match the current plotter setup - for attr in ['children', 'dependencies', 'connections', 'parents']: - setattr(self, attr, - [self._child_mapping[key] for key in getattr(self, attr)]) + for attr in ["children", "dependencies", "connections", "parents"]: + setattr( + self, + attr, + [self._child_mapping[key] for key in getattr(self, attr)], + ) def __set__(self, instance, value): if isinstance(value, Formatoption): - setattr(instance, '_' + self.key, value) + setattr(instance, "_" + self.key, value) else: fmto = getattr(instance, self.key) fmto.set_value(value) @@ -502,22 +527,25 @@ def __get__(self, instance, owner): if instance is None: return self try: - return getattr(instance, '_' + self.key) + return getattr(instance, "_" + self.key) except AttributeError: fmto = self.__class__( - self.key, instance, self.index_in_list, + self.key, + instance, + self.index_in_list, additional_children=self.additional_children, additional_dependencies=self.additional_dependencies, - **self.init_kwargs) - setattr(instance, '_' + self.key, fmto) + **self.init_kwargs, + ) + setattr(instance, "_" + self.key, fmto) return fmto def __delete__(self, instance, owner): - fmto = getattr(instance, '_' + self.key) + fmto = getattr(instance, "_" + self.key) with instance.no_validation: instance[self.key] = fmto.default - @docstrings.get_sections(base='Formatoption.set_value') + @docstrings.get_sections(base="Formatoption.set_value") @dedent def set_value(self, value, validate=True, todefault=False): """ @@ -536,8 +564,9 @@ def set_value(self, value, validate=True, todefault=False): if self.key in self.plotter._shared: return with self.plotter.no_validation: - self.plotter[self.key] = value if not validate else \ - self.validate(value) + self.plotter[self.key] = ( + value if not validate else self.validate(value) + ) def set_data(self, data, i=None): """ @@ -562,8 +591,9 @@ def set_data(self, data, i=None): """ if self.index_in_list is not None: i = self.index_in_list - if i is not None and isinstance(self.plotter.plot_data, - InteractiveList): + if i is not None and isinstance( + self.plotter.plot_data, InteractiveList + ): self.plotter.plot_data[i] = data else: self.plotter.plot_data = data @@ -588,14 +618,16 @@ def set_decoder(self, decoder, i=None): # we do not modify the raw data but instead set it on the plotter # TODO: This is not safe for encapsulated InteractiveList instances! if i is not None and isinstance( - self.plotter.plot_data, InteractiveList): + self.plotter.plot_data, InteractiveList + ): n = len(self.plotter.plot_data) decoders = self.plotter.plot_data_decoder or [None] * n decoders[i] = decoder self.plotter.plot_data_decoder = decoders else: - if (isinstance(self.plotter.plot_data, InteractiveList) and - isinstance(decoder, CFDecoder)): + if isinstance( + self.plotter.plot_data, InteractiveList + ) and isinstance(decoder, CFDecoder): decoder = [decoder] * len(self.plotter.plot_data) self.plotter.plot_data_decoder = decoder @@ -603,7 +635,8 @@ def get_decoder(self, i=None): # we do not modify the raw data but instead set it on the plotter # TODO: This is not safe for encapsulated InteractiveList instances! if i is not None and isinstance( - self.plotter.plot_data, InteractiveList): + self.plotter.plot_data, InteractiveList + ): n = len(self.plotter.plot_data) decoders = self.plotter.plot_data_decoder or [None] * n return decoders[i] or self.plotter.plot_data[i].psy.decoder @@ -755,7 +788,7 @@ def remove(self): @docstrings.get_extended_summary(base="Formatoption.convert_coordinate") @docstrings.get_sections( base="Formatoption.convert_coordinate", - sections=["Parameters", "Returns"] + sections=["Parameters", "Returns"], ) def convert_coordinate(self, coord, *variables): """Convert a coordinate to units necessary for the plot. @@ -847,21 +880,22 @@ class PostTiming(Formatoption): -------- post: The post processing formatoption""" - default = 'never' + default = "never" priority = -inf - group = 'post_processing' + group = "post_processing" - name = 'Timing of the post processing' + name = "Timing of the post processing" @staticmethod def validate(value): value = six.text_type(value) - possible_values = ['never', 'always', 'replot'] + possible_values = ["never", "always", "replot"] if value not in possible_values: - raise ValueError('String must be one of %s, not %r' % ( - possible_values, value)) + raise ValueError( + "String must be one of %s, not %r" % (possible_values, value) + ) return value def update(self, value): @@ -869,10 +903,12 @@ def update(self, value): def get_fmt_widget(self, parent, project): from psyplot_gui.compat.qtcompat import QComboBox + combo = QComboBox(parent) - combo.addItems(['never', 'always', 'replot']) + combo.addItems(["never", "always", "replot"]) combo.setCurrentText( - next((plotter[self.key] for plotter in project.plotters), 'never')) + next((plotter[self.key] for plotter in project.plotters), "never") + ) combo.currentTextChanged.connect(parent.set_obj) return combo @@ -881,10 +917,13 @@ class PostProcDependencies(object): """The dependencies of this formatoption""" def __get__(self, instance, owner): - if (instance is None or instance.plotter is None or - not instance.plotter._initialized): + if ( + instance is None + or instance.plotter is None + or not instance.plotter._initialized + ): return [] - elif instance.post_timing.value == 'always': + elif instance.post_timing.value == "always": return list(set(instance.plotter) - {instance.key}) else: return [] @@ -926,12 +965,13 @@ class PostProcessing(Formatoption): from psyplot.plotter import Plotter from xarray import DataArray + plotter = Plotter(DataArray([1, 2, 3])) # enable the post formatoption plotter.enable_post = True plotter.update(post="self.ax.set_title(str(self.data.mean()))") plotter.ax.get_title() - '2.0' + "2.0" By default, the ``post`` formatoption is only ran, when it is explicitly updated. However, you can use the :attr:`post_timing` formatoption, to @@ -940,28 +980,28 @@ class PostProcessing(Formatoption): .. code-block:: python - plotter.update(post_timing='always') + plotter.update(post_timing="always") See Also -------- post_timing: Determine the timing of this formatoption""" - children = ['post_timing'] + children = ["post_timing"] default = None priority = -inf - group = 'post_processing' + group = "post_processing" - name = 'Custom post processing script' + name = "Custom post processing script" @staticmethod def validate(value): if value is None: return value elif not isinstance(value, six.string_types): - raise ValueError("Expected a string, not %s" % (type(value), )) + raise ValueError("Expected a string, not %s" % (type(value),)) else: return six.text_type(value) @@ -970,7 +1010,7 @@ def data_dependent(self): """True if the corresponding :class:`post_timing ` formatoption is set to ``'replot'`` to run the post processing script after every change of the data""" - return self.post_timing.value == 'replot' + return self.post_timing.value == "replot" dependencies = PostProcDependencies() @@ -980,9 +1020,10 @@ def update(self, value): if not self.plotter.enable_post: warn( "Post processing is disabled. Set the ``enable_post`` " - "attribute to True to run the script") + "attribute to True to run the script" + ) else: - exec(value, {'self': self}) + exec(value, {"self": self}) class Plotter(dict): @@ -995,10 +1036,12 @@ class Plotter(dict): #: List of base strings in the :attr:`psyplot.rcParams` dictionary _rcparams_string = [] - post_timing = PostTiming('post_timing') - post = PostProcessing('post') + post_timing = PostTiming("post_timing") + post = PostProcessing("post") - no_validation = _temp_bool_prop('no_validation', """ + no_validation = _temp_bool_prop( + "no_validation", + """ Temporarily disable the validation Examples @@ -1007,13 +1050,15 @@ class Plotter(dict): you can disable it via:: >>> with plotter.no_validation: - ... plotter['ticksize'] = 'x' + ... plotter["ticksize"] = "x" + ... To permanently disable the validation, simply set >>> plotter.no_validation = True - >>> plotter['ticksize'] = 'x' - >>> plotter.no_validation = False # reenable validation""") + >>> plotter["ticksize"] = "x" + >>> plotter.no_validation = False # reenable validation""", + ) #: Temporarily include links in the key descriptions from #: :meth:`show_keys`, :meth:`show_docs` and :meth:`show_summaries`. @@ -1026,6 +1071,7 @@ def ax(self): """Axes instance of the plot""" if self._ax is None: import matplotlib.pyplot as plt + plt.figure() self._ax = plt.axes(projection=self._get_sample_projection()) return self._ax @@ -1071,9 +1117,14 @@ def rc(self): def base_variables(self): """A mapping from the base_variable names to the variables""" if isinstance(self.data, InteractiveList): - return dict(chain(*map( - lambda arr: six.iteritems(arr.psy.base_variables), - self.data))) + return dict( + chain( + *map( + lambda arr: six.iteritems(arr.psy.base_variables), + self.data, + ) + ) + ) else: return self.data.psy.base_variables @@ -1085,8 +1136,9 @@ def iter_base_variables(self): else: return self.data.psy.iter_base_variables - no_auto_update = property(_no_auto_update_getter, - doc=_no_auto_update_getter.__doc__) + no_auto_update = property( + _no_auto_update_getter, doc=_no_auto_update_getter.__doc__ + ) @no_auto_update.setter def no_auto_update(self, value): @@ -1096,8 +1148,11 @@ def no_auto_update(self, value): def changed(self): """:class:`dict` containing the key value pairs that are not the default""" - return {key: value for key, value in six.iteritems(self) - if getattr(self, key).changed} + return { + key: value + for key, value in six.iteritems(self) + if getattr(self, key).changed + } @property def figs2draw(self): @@ -1155,7 +1210,7 @@ def data(self, value): @property def plot_data(self): """The data that is used for plotting""" - return getattr(self, '_plot_data', self.data) + return getattr(self, "_plot_data", self.data) @plot_data.setter def plot_data(self, value): @@ -1181,16 +1236,25 @@ def logger(self): try: return self.data.psy.logger.getChild(self.__class__.__name__) except AttributeError: - name = '%s.%s' % (self.__module__, self.__class__.__name__) + name = "%s.%s" % (self.__module__, self.__class__.__name__) return logging.getLogger(name) - docstrings.keep_params('InteractiveBase.parameters', 'auto_update') + docstrings.keep_params("InteractiveBase.parameters", "auto_update") - @docstrings.get_sections(base='Plotter') + @docstrings.get_sections(base="Plotter") @docstrings.dedent - def __init__(self, data=None, ax=None, auto_update=None, project=None, - draw=False, make_plot=True, clear=False, - enable_post=False, **kwargs): + def __init__( + self, + data=None, + ax=None, + auto_update=None, + project=None, + draw=False, + make_plot=True, + clear=False, + enable_post=False, + **kwargs, + ): """ Parameters ---------- @@ -1219,7 +1283,7 @@ def __init__(self, data=None, ax=None, auto_update=None, project=None, self.data = data self.enable_post = enable_post if auto_update is None: - auto_update = rcParams['lists.auto_update'] + auto_update = rcParams["lists.auto_update"] self.no_auto_update = not bool(auto_update) self._registered_updates = {} self._todefault = False @@ -1257,8 +1321,9 @@ def __init__(self, data=None, ax=None, auto_update=None, project=None, self._set_rc() for key, value in six.iteritems(kwargs): # then the user values self[key] = value - self.initialize_plot(data, ax=ax, draw=draw, clear=clear, - make_plot=make_plot) + self.initialize_plot( + data, ax=ax, draw=draw, clear=clear, make_plot=make_plot + ) def _try2set(self, fmto, *args, **kwargs): """Sets the value in `fmto` and gives additional informations when fail @@ -1290,7 +1355,7 @@ def __setitem__(self, key, value): def __delitem__(self, key): self[key] = getattr(self, key).default - docstrings.delete_params('check_key.parameters', 'possible_keys', 'name') + docstrings.delete_params("check_key.parameters", "possible_keys", "name") @docstrings.dedent def check_key(self, key, raise_error=True, *args, **kwargs): @@ -1309,12 +1374,18 @@ def check_key(self, key, raise_error=True, *args, **kwargs): ------ %(check_key.raises)s""" return check_key( - key, possible_keys=list(self), raise_error=raise_error, - name='formatoption keyword', *args, **kwargs) + key, + possible_keys=list(self), + raise_error=raise_error, + name="formatoption keyword", + *args, + **kwargs, + ) @classmethod - @docstrings.get_sections(base='Plotter.check_data', sections=['Parameters', - 'Returns']) + @docstrings.get_sections( + base="Plotter.check_data", sections=["Parameters", "Returns"] + ) @dedent def check_data(cls, name, dims, is_unstructured): """ @@ -1349,16 +1420,25 @@ def check_data(cls, name, dims, is_unstructured): N = len(name) if len(dims) != N or len(is_unstructured) != N: return [False] * N, [ - 'Number of provided names (%i) and dimensions ' - '(%i) or unstructured information (%i) are not the same' % ( - N, len(dims), len(is_unstructured))] * N - return [True] * N, [''] * N + "Number of provided names (%i) and dimensions " + "(%i) or unstructured information (%i) are not the same" + % (N, len(dims), len(is_unstructured)) + ] * N + return [True] * N, [""] * N - docstrings.keep_params('Plotter.parameters', 'ax', 'make_plot', 'clear') + docstrings.keep_params("Plotter.parameters", "ax", "make_plot", "clear") @docstrings.dedent - def initialize_plot(self, data=None, ax=None, make_plot=True, clear=False, - draw=False, remove=False, priority=None): + def initialize_plot( + self, + data=None, + ax=None, + make_plot=True, + clear=False, + draw=False, + remove=False, + priority=None, + ): """ Initialize the plot for a data array @@ -1389,7 +1469,8 @@ def initialize_plot(self, data=None, ax=None, make_plot=True, clear=False, if data is None: # nothing to do if no data is given return self.no_auto_update = not ( - not self.no_auto_update or not data.psy.no_auto_update) + not self.no_auto_update or not data.psy.no_auto_update + ) data.psy.plotter = self if not make_plot: # stop here if we shall not plot return @@ -1401,22 +1482,28 @@ def initialize_plot(self, data=None, ax=None, make_plot=True, clear=False, fmto.remove() except Exception: self.logger.debug( - "Could not remove %s while initializing", fmto.key, - exc_info=True) + "Could not remove %s while initializing", + fmto.key, + exc_info=True, + ) if clear: self.logger.debug(" Clearing axes...") self.ax.clear() self.cleared = True # get the formatoptions. We sort them here by key to make sure that the # order always stays the same (easier for debugging) - fmto_groups = self._grouped_fmtos(self._sorted_by_priority( - sorted(self._fmtos, key=lambda fmto: fmto.key))) + fmto_groups = self._grouped_fmtos( + self._sorted_by_priority( + sorted(self._fmtos, key=lambda fmto: fmto.key) + ) + ) self.plot_data = self.data self._updating = True for fmto_priority, grouper in fmto_groups: if priority is None or fmto_priority == priority: - self._plot_by_priority(fmto_priority, grouper, - initializing=True) + self._plot_by_priority( + fmto_priority, grouper, initializing=True + ) self._release_all(True) # finish the update self.cleared = False self.replot = False @@ -1424,19 +1511,21 @@ def initialize_plot(self, data=None, ax=None, make_plot=True, clear=False, self._updating = False if draw is None: - draw = rcParams['auto_draw'] + draw = rcParams["auto_draw"] if draw: self.draw() - if rcParams['auto_show']: + if rcParams["auto_show"]: self.show() - docstrings.keep_params('InteractiveBase._register_update.parameters', - 'force', 'todefault') + docstrings.keep_params( + "InteractiveBase._register_update.parameters", "force", "todefault" + ) - @docstrings.get_sections(base='Plotter._register_update') + @docstrings.get_sections(base="Plotter._register_update") @docstrings.dedent - def _register_update(self, fmt={}, replot=False, force=False, - todefault=False): + def _register_update( + self, fmt={}, replot=False, force=False, todefault=False + ): """ Register formatoptions for the update @@ -1456,7 +1545,8 @@ def _register_update(self, fmt={}, replot=False, force=False, if force is True: force = list(fmt) self._force.update( - [ret[0] for ret in map(self.check_key, force or [])]) + [ret[0] for ret in map(self.check_key, force or [])] + ) # check the keys list(map(self.check_key, fmt)) self._registered_updates.update(fmt) @@ -1489,27 +1579,33 @@ def start_update(self, draw=None, queues=None, update_shared=True): See Also -------- :attr:`no_auto_update`, update""" + def update_the_others(): for fmto in fmtos: for other_fmto in fmto.shared: if not other_fmto.plotter._updating: other_fmto.plotter._register_update( - force=[other_fmto.key]) + force=[other_fmto.key] + ) for fmto in fmtos: for other_fmto in fmto.shared: if not other_fmto.plotter._updating: other_draw = other_fmto.plotter.start_update( - draw=False, update_shared=False) + draw=False, update_shared=False + ) if other_draw: self._figs2draw.add( - other_fmto.plotter.ax.get_figure()) + other_fmto.plotter.ax.get_figure() + ) + if self.disabled: return False if queues is not None: queues[0].get() - self.logger.debug("Starting update of %r", - self._registered_updates.keys()) + self.logger.debug( + "Starting update of %r", self._registered_updates.keys() + ) # update the formatoptions self._save_state() try: @@ -1536,8 +1632,13 @@ def update_the_others(): # wait for the other tasks to finish queues[0].join() queues[1].get() - fmtos.extend([fmto for fmto in self._insert_additionals(list( - self._to_update)) if fmto not in fmtos]) + fmtos.extend( + [ + fmto + for fmto in self._insert_additionals(list(self._to_update)) + if fmto not in fmtos + ] + ) self._to_update.clear() fmto_groups = self._grouped_fmtos(self._sorted_by_priority(fmtos[:])) @@ -1559,13 +1660,14 @@ def update_the_others(): raise finally: # make sure that all locks are released - self._release_all(finish=True, - queue=None if queues is None else queues[1]) + self._release_all( + finish=True, queue=None if queues is None else queues[1] + ) if draw is None: - draw = rcParams['auto_draw'] + draw = rcParams["auto_draw"] if draw and arr_draw: self.draw() - if rcParams['auto_show']: + if rcParams["auto_show"]: self.show() self.replot = False return arr_draw @@ -1580,7 +1682,7 @@ def _release_all(self, finish=False, queue=None): fmto.lock.release() except RuntimeError: pass - except: + except Exception: raise finally: if queue is not None: @@ -1592,8 +1694,11 @@ def _plot_by_priority(self, priority, fmtos, initializing=False): def update(fmto): other_fmto = self._shared.get(fmto.key) if other_fmto: - self.logger.debug("%s is shared with %s", fmto.key, - other_fmto.plotter.logger.name) + self.logger.debug( + "%s is shared with %s", + fmto.key, + other_fmto.plotter.logger.name, + ) other_fmto.share(fmto, initializing=initializing) # but if not, share them else: @@ -1612,7 +1717,9 @@ def update(fmto): self.logger.debug( "%s formatoptions with priority %i", - "Initializing" if initializing else "Updating", priority) + "Initializing" if initializing else "Updating", + priority, + ) if priority >= START or priority == END: for fmto in fmtos: @@ -1643,9 +1750,12 @@ def reinit(self, draw=None, clear=False): # False if any fmto has requires_clearing attribute set to True, # because this then has been cleared before self.initialize_plot( - self.data, self._ax, draw=draw, clear=clear or any( - fmto.requires_clearing for fmto in self._fmtos), - remove=True) + self.data, + self._ax, + draw=draw, + clear=clear or any(fmto.requires_clearing for fmto in self._fmtos), + remove=True, + ) def draw(self): """Draw the figures and those that are shared and have been changed""" @@ -1661,6 +1771,7 @@ def key_func(fmto): return BEFOREPLOTTING else: return END + return groupby(fmtos, key_func) def _set_and_filter(self): @@ -1679,10 +1790,11 @@ def _set_and_filter(self): for key in self._force: self._registered_updates.setdefault(key, getattr(self, key).value) for key, value in chain( - six.iteritems(self._registered_updates), - six.iteritems( - {key: getattr(self, key).default for key in self}) - if self._todefault else ()): + six.iteritems(self._registered_updates), + six.iteritems({key: getattr(self, key).default for key in self}) + if self._todefault + else (), + ): if key in seen: continue seen.add(key) @@ -1692,19 +1804,25 @@ def _set_and_filter(self): # project update) if key in self._shared and key not in self._force: if not self._shared[key].plotter._updating: - warn(("%s formatoption is shared with another plotter." - " Use the unshare method to enable the updating") % ( - fmto.key), - logger=self.logger) + warn( + ( + "%s formatoption is shared with another plotter." + " Use the unshare method to enable the updating" + ) + % (fmto.key), + logger=self.logger, + ) changed = False else: try: changed = fmto.check_and_set( - value, todefault=self._todefault, - validate=not self.no_validation) + value, + todefault=self._todefault, + validate=not self.no_validation, + ) except Exception as e: self._registered_updates.pop(key, None) - self.logger.debug('Failed to set %s', key) + self.logger.debug("Failed to set %s", key) raise e changed = changed or key in self._force if changed: @@ -1746,22 +1864,32 @@ def _insert_additionals(self, fmtos, seen=None): `fmtos` and `seen` are modified in place (except that any formatoption in the initial `fmtos` has :attr:`~Formatoption.requires_clearing` attribute set to True)""" + def get_dependencies(fmto): if fmto is None: return [] - return fmto.dependencies + list(chain(*map( - lambda key: get_dependencies(getattr(self, key, None)), - fmto.dependencies))) + return fmto.dependencies + list( + chain( + *map( + lambda key: get_dependencies(getattr(self, key, None)), + fmto.dependencies, + ) + ) + ) + seen = seen or {fmto.key for fmto in fmtos} keys = {fmto.key for fmto in fmtos} self.replot = self.replot or any( - fmto.requires_replot for fmto in fmtos) + fmto.requires_replot for fmto in fmtos + ) if self.replot or any(fmto.priority >= START for fmto in fmtos): self.replot = True self.plot_data = self.data - new_fmtos = dict((f.key, f) for f in self._fmtos - if ((f not in fmtos and is_data_dependent( - f, self.data)))) + new_fmtos = dict( + (f.key, f) + for f in self._fmtos + if ((f not in fmtos and is_data_dependent(f, self.data))) + ) seen.update(new_fmtos) keys.update(new_fmtos) fmtos += list(new_fmtos.values()) @@ -1769,8 +1897,11 @@ def get_dependencies(fmto): # insert the formatoptions that have to be updated if the plot is # changed if any(fmto.priority >= BEFOREPLOTTING for fmto in fmtos): - new_fmtos = dict((f.key, f) for f in self._fmtos - if ((f not in fmtos and f.update_after_plot))) + new_fmtos = dict( + (f.key, f) + for f in self._fmtos + if ((f not in fmtos and f.update_after_plot)) + ) fmtos += list(new_fmtos.values()) for fmto in set(self._fmtos).difference(fmtos): all_dependencies = get_dependencies(fmto) @@ -1799,6 +1930,7 @@ def _sorted_by_priority(self, fmtos, changed=None): Warnings -------- The list `fmtos` is cleared by this method!""" + def pop_fmto(key): idx = fmtos_keys.index(key) del fmtos_keys[idx] @@ -1811,11 +1943,14 @@ def get_children(fmto, parents_keys): continue child_fmto = pop_fmto(key) for childs_child in get_children( - child_fmto, parents_keys + [child_fmto.key]): + child_fmto, parents_keys + [child_fmto.key] + ): yield childs_child # filter out if parent is in update list - if (any(key in all_fmtos for key in child_fmto.parents) or - fmto.key in child_fmto.parents): + if ( + any(key in all_fmtos for key in child_fmto.parents) + or fmto.key in child_fmto.parents + ): continue yield child_fmto @@ -1856,24 +1991,31 @@ def _get_formatoptions(cls, include_bases=True): See Also -------- _format_keys""" + def base_fmtos(base): return filter( lambda key: isinstance(getattr(cls, key), Formatoption), - getattr(base, '_get_formatoptions', empty)(False)) + getattr(base, "_get_formatoptions", empty)(False), + ) def empty(*args, **kwargs): return list() - fmtos = (attr for attr, obj in six.iteritems(cls.__dict__) - if isinstance(obj, Formatoption)) + + fmtos = ( + attr + for attr, obj in six.iteritems(cls.__dict__) + if isinstance(obj, Formatoption) + ) if not include_bases: return fmtos return unique_everseen(chain(fmtos, *map(base_fmtos, cls.__mro__))) - docstrings.keep_types('check_key.parameters', 'kwargs', - r'``\*args,\*\*kwargs``') + docstrings.keep_types( + "check_key.parameters", "kwargs", r"``\*args,\*\*kwargs``" + ) @classmethod - @docstrings.get_sections(base='Plotter._enhance_keys') + @docstrings.get_sections(base="Plotter._enhance_keys") @docstrings.dedent def _enhance_keys(cls, keys=None, *args, **kwargs): """ @@ -1904,7 +2046,6 @@ def _enhance_keys(cls, keys=None, *args, **kwargs): fmto_groups[getattr(cls, key).group].append(key) new_i = 0 for i, key in enumerate(keys[:]): - if key in fmto_groups: del keys[new_i] for key2 in fmto_groups[key]: @@ -1913,8 +2054,13 @@ def _enhance_keys(cls, keys=None, *args, **kwargs): new_i += 1 else: valid, similar, message = check_key( - key, all_keys, False, 'formatoption keyword', *args, - **kwargs) + key, + all_keys, + False, + "formatoption keyword", + *args, + **kwargs, + ) if not valid: keys.remove(key) new_i -= 1 @@ -1923,12 +2069,21 @@ def _enhance_keys(cls, keys=None, *args, **kwargs): return keys @classmethod - @docstrings.get_sections(base= - 'Plotter.show_keys', sections=['Parameters', 'Returns', - 'Other Parameters']) + @docstrings.get_sections( + base="Plotter.show_keys", + sections=["Parameters", "Returns", "Other Parameters"], + ) @docstrings.dedent - def show_keys(cls, keys=None, indent=0, grouped=False, func=None, - include_links=False, *args, **kwargs): + def show_keys( + cls, + keys=None, + indent=0, + grouped=False, + func=None, + include_links=False, + *args, + **kwargs, + ): """ Classmethod to return a nice looking table with the given formatoptions @@ -1961,23 +2116,32 @@ def show_keys(cls, keys=None, indent=0, grouped=False, func=None, See Also -------- show_summaries, show_docs""" + def titled_group(groupname): - bars = str_indent + '*' * len(groupname) + '\n' - return bars + str_indent + groupname + '\n' + bars + bars = str_indent + "*" * len(groupname) + "\n" + return bars + str_indent + groupname + "\n" + bars keys = cls._enhance_keys(keys, *args, **kwargs) str_indent = " " * indent func = func or default_print_func # call this function recursively when grouped is True if grouped: - grouped_keys = DefaultOrderedDict(list) + grouped_keys = Defaultdict(list) for fmto in map(lambda key: getattr(cls, key), keys): grouped_keys[fmto.groupname].append(fmto.key) text = "" for group, keys in six.iteritems(grouped_keys): - text += titled_group(group) + cls.show_keys( - keys, indent=indent, grouped=False, func=six.text_type, - include_links=include_links) + '\n\n' + text += ( + titled_group(group) + + cls.show_keys( + keys, + indent=indent, + grouped=False, + func=six.text_type, + include_links=include_links, + ) + + "\n\n" + ) return func(text.rstrip()) if not keys: @@ -1990,28 +2154,48 @@ def titled_group(groupname): # 3. The number of keys plus the empty cells in the last column ncells = n + ((ncols - (n % ncols)) if n != ncols else 0) if include_links or (include_links is None and cls.include_links): - long_keys = list(map(lambda key: ':attr:`~%s.%s.%s`' % ( - cls.__module__, cls.__name__, key), keys)) + long_keys = list( + map( + lambda key: ":attr:`~%s.%s.%s`" + % (cls.__module__, cls.__name__, key), + keys, + ) + ) else: long_keys = keys maxn = max(map(len, long_keys)) # maximal lenght of the keys # extend with empty cells - long_keys.extend([' ' * maxn] * (ncells - n)) - bars = (str_indent + '+-' + ("-"*(maxn) + "-+-")*ncols)[:-1] - lines = ('| %s |\n%s' % (' | '.join( - key.ljust(maxn) for key in long_keys[i:i+ncols]), bars) - for i in range(0, n, ncols)) - text = bars + "\n" + str_indent + ("\n" + str_indent).join( - lines) + long_keys.extend([" " * maxn] * (ncells - n)) + bars = (str_indent + "+-" + ("-" * (maxn) + "-+-") * ncols)[:-1] + lines = ( + "| %s |\n%s" + % ( + " | ".join( + key.ljust(maxn) for key in long_keys[i : i + ncols] + ), + bars, + ) + for i in range(0, n, ncols) + ) + text = bars + "\n" + str_indent + ("\n" + str_indent).join(lines) if six.PY2: - text = text.encode('utf-8') + text = text.encode("utf-8") return func(text) @classmethod @docstrings.dedent - def _show_doc(cls, fmt_func, keys=None, indent=0, grouped=False, - func=None, include_links=False, *args, **kwargs): + def _show_doc( + cls, + fmt_func, + keys=None, + indent=0, + grouped=False, + func=None, + include_links=False, + *args, + **kwargs, + ): """ Classmethod to print the formatoptions and their documentation @@ -2037,34 +2221,51 @@ def _show_doc(cls, fmt_func, keys=None, indent=0, grouped=False, See Also -------- show_summaries, show_docs""" + def titled_group(groupname): - bars = str_indent + '*' * len(groupname) + '\n' - return bars + str_indent + groupname + '\n' + bars + bars = str_indent + "*" * len(groupname) + "\n" + return bars + str_indent + groupname + "\n" + bars func = func or default_print_func keys = cls._enhance_keys(keys, *args, **kwargs) str_indent = " " * indent if grouped: - grouped_keys = DefaultOrderedDict(list) + grouped_keys = Defaultdict(list) for fmto in map(lambda key: getattr(cls, key), keys): grouped_keys[fmto.groupname].append(fmto.key) text = "\n\n".join( - titled_group(group) + cls._show_doc( - fmt_func, keys, indent=indent, grouped=False, - func=str, include_links=include_links) - for group, keys in six.iteritems(grouped_keys)) + titled_group(group) + + cls._show_doc( + fmt_func, + keys, + indent=indent, + grouped=False, + func=str, + include_links=include_links, + ) + for group, keys in six.iteritems(grouped_keys) + ) return func(text.rstrip()) if include_links or (include_links is None and cls.include_links): - long_keys = list(map(lambda key: ':attr:`~%s.%s.%s`' % ( - cls.__module__, cls.__name__, key), keys)) + long_keys = list( + map( + lambda key: ":attr:`~%s.%s.%s`" + % (cls.__module__, cls.__name__, key), + keys, + ) + ) else: long_keys = keys - text = '\n'.join(str_indent + long_key + '\n' + fmt_func( - key, long_key, getattr(cls, key).__doc__) for long_key, key in zip( - long_keys, keys)) + text = "\n".join( + str_indent + + long_key + + "\n" + + fmt_func(key, long_key, getattr(cls, key).__doc__) + for long_key, key in zip(long_keys, keys) + ) return func(text) @classmethod @@ -2088,13 +2289,19 @@ def show_summaries(cls, keys=None, indent=0, *args, **kwargs): See Also -------- show_keys, show_docs""" + def find_summary(key, key_txt, doc): - return '\n'.join(wrapper.wrap(doc[:doc.find('\n\n')])) + return "\n".join(wrapper.wrap(doc[: doc.find("\n\n")])) + str_indent = " " * indent - wrapper = TextWrapper(width=80, initial_indent=str_indent + ' ' * 4, - subsequent_indent=str_indent + ' ' * 4) - return cls._show_doc(find_summary, keys=keys, indent=indent, - *args, **kwargs) + wrapper = TextWrapper( + width=80, + initial_indent=str_indent + " " * 4, + subsequent_indent=str_indent + " " * 4, + ) + return cls._show_doc( + find_summary, keys=keys, indent=indent, *args, **kwargs + ) @classmethod @docstrings.dedent @@ -2117,10 +2324,13 @@ def show_docs(cls, keys=None, indent=0, *args, **kwargs): See Also -------- show_keys, show_docs""" + def full_doc(key, key_txt, doc): - return ('=' * len(key_txt)) + '\n' + doc + '\n' - return cls._show_doc(full_doc, keys=keys, indent=indent, - *args, **kwargs) + return ("=" * len(key_txt)) + "\n" + doc + "\n" + + return cls._show_doc( + full_doc, keys=keys, indent=indent, *args, **kwargs + ) @classmethod def _get_rc_strings(cls): @@ -2140,33 +2350,56 @@ def _get_rc_strings(cls): the following the :attr:`_rcparams_string` attributes of the base classes according to the method resolution order of this class""" - return list(unique_everseen(chain( - *map(lambda base: getattr(base, '_rcparams_string', []), - cls.__mro__)))) + return list( + unique_everseen( + chain( + *map( + lambda base: getattr(base, "_rcparams_string", []), + cls.__mro__, + ) + ) + ) + ) def _set_rc(self): """Method to set the rcparams and defaultParams for this plotter""" base_str = self._get_rc_strings() # to make sure that the '.' is not interpreted as a regex pattern, # we specify the pattern_base by ourselves - pattern_base = map(lambda s: s.replace('.', r'\.'), base_str) + pattern_base = map(lambda s: s.replace(".", r"\."), base_str) # pattern for valid keys being all formatoptions in this plotter - pattern = '(%s)(?=$)' % '|'.join(self._get_formatoptions()) - self._rc = rcParams.find_and_replace(base_str, pattern=pattern, - pattern_base=pattern_base) - user_rc = SubDict(rcParams['plotter.user'], base_str, pattern=pattern, - pattern_base=pattern_base) + pattern = "(%s)(?=$)" % "|".join(self._get_formatoptions()) + self._rc = rcParams.find_and_replace( + base_str, pattern=pattern, pattern_base=pattern_base + ) + user_rc = SubDict( + rcParams["plotter.user"], + base_str, + pattern=pattern, + pattern_base=pattern_base, + ) self._rc.update(user_rc.data) - self._defaultParams = SubDict(rcParams.defaultParams, base_str, - pattern=pattern, - pattern_base=pattern_base) + self._defaultParams = SubDict( + rcParams.defaultParams, + base_str, + pattern=pattern, + pattern_base=pattern_base, + ) - docstrings.keep_params('InteractiveBase.update.parameters', 'auto_update') + docstrings.keep_params("InteractiveBase.update.parameters", "auto_update") @docstrings.dedent - def update(self, fmt={}, replot=False, auto_update=False, draw=None, - force=False, todefault=False, **kwargs): + def update( + self, + fmt={}, + replot=False, + auto_update=False, + draw=None, + force=False, + todefault=False, + **kwargs, + ): """ Update the formatoptions and the plot @@ -2199,8 +2432,9 @@ def update(self, fmt={}, replot=False, auto_update=False, draw=None, self[key] = val return - self._register_update(fmt=fmt, replot=replot, force=force, - todefault=todefault) + self._register_update( + fmt=fmt, replot=replot, force=force, todefault=todefault + ) if not self.no_auto_update or auto_update: self.start_update(draw=draw) @@ -2224,12 +2458,18 @@ def _set_sharing_keys(self, keys): keys = {keys} keys = set(self) if keys is None else set(keys) fmto_groups = self._fmto_groups - keys.update(chain(*(map(lambda fmto: fmto.key, fmto_groups[key]) - for key in keys.intersection(fmto_groups)))) + keys.update( + chain( + *( + map(lambda fmto: fmto.key, fmto_groups[key]) + for key in keys.intersection(fmto_groups) + ) + ) + ) keys.difference_update(fmto_groups) return keys - @docstrings.get_sections(base='Plotter.share') + @docstrings.get_sections(base="Plotter.share") @docstrings.dedent def share(self, plotters, keys=None, draw=None, auto_update=False): """ @@ -2274,16 +2514,16 @@ def share(self, plotters, keys=None, draw=None, auto_update=False): plotter._registered_updates.clear() try: plotter.update(force=keys, auto_update=auto_update, draw=draw) - except: + except Exception: raise finally: plotter._registered_updates.clear() plotter._registered_updates.update(old_registered) if draw is None: - draw = rcParams['auto_draw'] + draw = rcParams["auto_draw"] if draw: self.draw() - if rcParams['auto_show']: + if rcParams["auto_show"]: self.show() @docstrings.dedent @@ -2316,14 +2556,16 @@ def unshare(self, plotters, keys=None, auto_update=False, draw=None): plotters = [plotters] keys = self._set_sharing_keys(keys) for plotter in plotters: - plotter.unshare_me(keys, auto_update=auto_update, draw=draw, - update_other=False) + plotter.unshare_me( + keys, auto_update=auto_update, draw=draw, update_other=False + ) self.update(force=keys, auto_update=auto_update, draw=draw) - @docstrings.get_sections(base='Plotter.unshare_me') + @docstrings.get_sections(base="Plotter.unshare_me") @docstrings.dedent - def unshare_me(self, keys=None, auto_update=False, draw=None, - update_other=True): + def unshare_me( + self, keys=None, auto_update=False, draw=None, update_other=True + ): """ Close the sharing connection of this plotter with others @@ -2355,8 +2597,7 @@ def unshare_me(self, keys=None, auto_update=False, draw=None, else: other_fmto.shared.remove(fmto) if update_other: - other_fmto.plotter._register_update( - force=[other_fmto.key]) + other_fmto.plotter._register_update(force=[other_fmto.key]) to_update.append(other_fmto.plotter) self.update(force=keys, draw=draw, auto_update=auto_update) if update_other and auto_update: @@ -2384,6 +2625,7 @@ def _save_state(self): def show(self): """Shows all open figures""" import matplotlib.pyplot as plt + plt.show(block=False) @dedent @@ -2414,17 +2656,24 @@ def has_changed(self, key, include_last=True): old_val = self._old_fmt[-1][key] else: old_val = fmto.default - if (fmto.diff(old_val) or (include_last and - fmto.key in self._last_update)): + if fmto.diff(old_val) or ( + include_last and fmto.key in self._last_update + ): return [old_val, fmto.value] - def get_enhanced_attrs(self, arr, axes=['x', 'y', 't', 'z']): + def get_enhanced_attrs(self, arr, axes=["x", "y", "t", "z"]): if isinstance(arr, InteractiveList): - all_attrs = list(starmap(self.get_enhanced_attrs, zip( - arr, repeat(axes)))) - attrs = {key: val for key, val in six.iteritems(all_attrs[0]) - if all(key in attrs and attrs[key] == val - for attrs in all_attrs[1:])} + all_attrs = list( + starmap(self.get_enhanced_attrs, zip(arr, repeat(axes))) + ) + attrs = { + key: val + for key, val in six.iteritems(all_attrs[0]) + if all( + key in attrs and attrs[key] == val + for attrs in all_attrs[1:] + ) + } attrs.update(arr.attrs) else: attrs = arr.attrs.copy() @@ -2432,12 +2681,15 @@ def get_enhanced_attrs(self, arr, axes=['x', 'y', 't', 'z']): if len(base_variables) > 1: # multiple variables for name, base_var in six.iteritems(base_variables): attrs.update( - {six.text_type(name)+key: value - for key, value in six.iteritems(base_var.attrs)}) + { + six.text_type(name) + key: value + for key, value in six.iteritems(base_var.attrs) + } + ) else: base_var = next(six.itervalues(base_variables)) - attrs['name'] = arr.name - for dim, coord in six.iteritems(getattr(arr, 'coords', {})): + attrs["name"] = arr.name + for dim, coord in six.iteritems(getattr(arr, "coords", {})): if coord.size == 1: attrs[dim] = format_time(coord.values) if isinstance(self.data, InteractiveList): @@ -2446,13 +2698,14 @@ def get_enhanced_attrs(self, arr, axes=['x', 'y', 't', 'z']): decoder = self.data.psy.decoder for dim in axes: for obj in [base_var, arr]: - coord = getattr(decoder, 'get_' + dim)( - obj, coords=getattr(arr, 'coords', None)) + coord = getattr(decoder, "get_" + dim)( + obj, coords=getattr(arr, "coords", None) + ) if coord is None: continue if coord.size == 1: attrs[dim] = format_time(coord.values) - attrs[dim + 'name'] = coord.name + attrs[dim + "name"] = coord.name for key, val in six.iteritems(coord.attrs): attrs[dim + key] = val self._enhanced_attrs = attrs diff --git a/psyplot/plugin-template-files/COPYING b/psyplot/plugin-template-files/COPYING deleted file mode 100644 index f288702..0000000 --- a/psyplot/plugin-template-files/COPYING +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/psyplot/plugin-template-files/COPYING.LESSER b/psyplot/plugin-template-files/COPYING.LESSER deleted file mode 100644 index 0a04128..0000000 --- a/psyplot/plugin-template-files/COPYING.LESSER +++ /dev/null @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/psyplot/plugin-template-files/MANIFEST.in b/psyplot/plugin-template-files/MANIFEST.in deleted file mode 100644 index 04f196a..0000000 --- a/psyplot/plugin-template-files/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -include README.md -include LICENSE diff --git a/psyplot/plugin-template-files/README.md b/psyplot/plugin-template-files/README.md deleted file mode 100644 index b0b7bcc..0000000 --- a/psyplot/plugin-template-files/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# PLUGIN_NAME: PLUGIN_DESC - -This template serves as a basis for new psyplot plugins. Go through every file -and adapt it to your needs. A small overview on the important files in this -package: - -- setup.py: The installation script -- PLUGIN_PYNAME/plugin.py: The plugin module that is imported at startup of - psyplot -- PLUGIN_PYNAME/plotters.py: The module that defines the plotters for the plugin -- COPYING and COPYING.LESSER: The license file of your package (uses LGPL-3.0 - but you can change this) - -Of course you can change the names of these files to anything you want. Just -make sure that they are correctly specified in the install script. diff --git a/psyplot/plugin-template-files/plugin_template/__init__.py b/psyplot/plugin-template-files/plugin_template/__init__.py deleted file mode 100644 index 166a80c..0000000 --- a/psyplot/plugin-template-files/plugin_template/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -"""PLUGIN_NAME: PLUGIN_DESC - -Describe your plugin here and do whatever you want. It should at least have a -__version__ attribute to specify the version of the package -""" - -# Disclaimer -# ---------- -# -# Copyright (C) YOUR-INSTITUTION -# -# This file is part of PLUGIN_NAME and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. -# -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . - -__version__ = "PLUGIN_VERSION" diff --git a/psyplot/plugin-template-files/plugin_template/plotters.py b/psyplot/plugin-template-files/plugin_template/plotters.py deleted file mode 100644 index 84267a5..0000000 --- a/psyplot/plugin-template-files/plugin_template/plotters.py +++ /dev/null @@ -1,53 +0,0 @@ -"""plotters module of the PLUGIN_NAME psyplot plugin - -This module defines the plotters for the PLUGIN_NAME package. It should import -all requirements and define the formatoptions and plotters that are specified -in the :mod:`PLUGIN_PYNAME.plugin` module. -""" - -# Disclaimer -# ---------- -# -# Copyright (C) YOUR-INSTITUTION -# -# This file is part of PLUGIN_NAME and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. -# -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . - -from psyplot.plotter import Formatoption, Plotter - - -# ----------------------------------------------------------------------------- -# ---------------------------- Formatoptions ---------------------------------- -# ----------------------------------------------------------------------------- - - -class MyNewFormatoption(Formatoption): - - def update(self, value): - # hooray - pass - - -# ----------------------------------------------------------------------------- -# ------------------------------ Plotters ------------------------------------- -# ----------------------------------------------------------------------------- - - -class MyPlotter(Plotter): - - _rcparams_string = ['plotter.PLUGIN_PYNAME.'] - - my_fmt = MyNewFormatoption('my_fmt') diff --git a/psyplot/plugin-template-files/plugin_template/plugin.py b/psyplot/plugin-template-files/plugin_template/plugin.py deleted file mode 100644 index 93651db..0000000 --- a/psyplot/plugin-template-files/plugin_template/plugin.py +++ /dev/null @@ -1,109 +0,0 @@ -"""PLUGIN_NAME psyplot plugin - -This module defines the rcParams for the PLUGIN_NAME plugin. This module will -be imported when psyplot is imported. What is should contain is: - -- an rcParams variable as instance of :class:`psyplot.config.rcsetup.RcParams` - that describes the configuration of your plugin -- a get_versions function that returns the version of your plugin and the ones - from its requirements - -.. warning:: - - Because of recursion issues, You have to load the psyplot module before - loading this module! In other words, you have to type - - .. code-block:: python - - import psyplot - import PLUGIN_PYNAME.plugin""" - -# Disclaimer -# ---------- -# -# Copyright (C) YOUR-INSTITUTION -# -# This file is part of PLUGIN_NAME and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. -# -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . - -from psyplot.config.rcsetup import RcParams -from PLUGIN_PYNAME import __version__ as plugin_version - - -def get_versions(requirements=True): - """Get the versions of PLUGIN_NAME and it's requirements - - Parameters - ---------- - requirements: bool - If True, the requirements are imported and it's versions are included - """ - ret = {'version': plugin_version} - if requirements: - # insert versions of the requirements, e.g. via - # >>> import requirement - # >>> ret['requirement'] = requirement.__version__ - pass - return ret - - -# ----------------------------------------------------------------------------- -# ------------------------- validation functions ------------------------------ -# ----------------------------------------------------------------------------- - - -# define your validation functions for the values in the rcParams here. If -# a validation fails, the function should raise a ValueError or TypeError - - -# ----------------------------------------------------------------------------- -# ------------------------------ rcParams ------------------------------------- -# ----------------------------------------------------------------------------- - - -# define your defaultParams. A mapping from rcParams key to a list of length 3: -# -# 1. the default value -# 2. the validation function of type conversion function -# 3. a short description of the default value -# -# Example:: -# -# defaultParams = {'my.key': [True, bool, 'What my key does']} -defaultParams = { - - # key for defining new plotters - 'project.plotters': [ - {'plot_method_identifer': { - 'module': 'PLUGIN_PYNAME.plotters', - 'plotter_name': 'MyPlotter', # or any other name - # any other item for the :func:`psyplot.project.register_plotter` - # function - # 'plot_func': False, - # 'prefer_list': True, - # ... - }, - }, dict, "The plot methods in the PLUGIN_NAME package"], - # if you define new plotters, we recommend to assign a specific rcParams - # key for it, e.g. - # 'plotter.PLUGIN_PYNAME.my_fmt': [1, int, ' the value for my_fmt'] - } - -# create the rcParams and populate them with the defaultParams. For more -# information on this class, see the :class:`psyplot.config.rcsetup.RcParams` -# class -rcParams = RcParams(defaultParams=defaultParams) -rcParams.update_from_defaultParams() diff --git a/psyplot/plugin-template-files/setup.py b/psyplot/plugin-template-files/setup.py deleted file mode 100644 index a7e7eb9..0000000 --- a/psyplot/plugin-template-files/setup.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Setup file for plugin PLUGIN_NAME - -This file is used to install the package to your python distribution. -Installation goes simply via:: - - python -m pip install . -""" - -# Disclaimer -# ---------- -# -# Copyright (C) YOUR-INSTITUTION -# -# This file is part of PLUGIN_NAME and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. -# -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . - -from setuptools import setup, find_packages - - -setup(name='PLUGIN_NAME', - version='PLUGIN_VERSION', - description='PLUGIN_DESC', - keywords='visualization psyplot', - license="LGPL-3.0-only", - packages=find_packages(exclude=['docs', 'tests*', 'examples']), - install_requires=[ - 'psyplot', - ], - classifiers=[ - 'Development Status :: 1 - Planning', - 'Intended Audience :: Developers', - 'Topic :: Scientific/Engineering :: Visualization', - 'Topic :: Scientific/Engineering :: GIS', - 'Topic :: Scientific/Engineering', - 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3 :: Only', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Operating System :: OS Independent', - ], - - entry_points={'psyplot': ['plugin=PLUGIN_PYNAME.plugin']}, - zip_safe=False) diff --git a/psyplot/plugin_template.py b/psyplot/plugin_template.py deleted file mode 100644 index ec97621..0000000 --- a/psyplot/plugin_template.py +++ /dev/null @@ -1,90 +0,0 @@ -"""Module for creating a new template for a psyplot plugin.""" - -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. -# -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . - -import funcargparse -import os -import os.path as osp -import shutil - - -def new_plugin(odir, py_name=None, version='0.0.1.dev0', - description='New plugin'): - """ - Create a new plugin for the psyplot package - - Parameters - ---------- - odir: str - The name of the directory where the data will be copied to. The - directory must not exist! The name of the directory also defines the - name of the package. - py_name: str - The name of the python package. If None, the basename of `odir` is used - (and ``'-'`` is replaced by ``'_'``) - version: str - The version of the package - description: str - The description of the plugin""" - name = osp.basename(odir) - if py_name is None: - py_name = name.replace('-', '_') - - src = osp.join(osp.dirname(__file__), 'plugin-template-files') - # copy the source files - shutil.copytree(src, odir) - os.rename(osp.join(odir, 'plugin_template'), osp.join(odir, py_name)) - - replacements = {'PLUGIN_NAME': name, - 'PLUGIN_PYNAME': py_name, - 'PLUGIN_VERSION': version, - 'PLUGIN_DESC': description, - } - files = [ - 'README.md', - 'setup.py', - osp.join(py_name, 'plugin.py'), - osp.join(py_name, 'plotters.py'), - osp.join(py_name, '__init__.py'), - ] - - for fname in files: - with open(osp.join(odir, fname)) as f: - s = f.read() - for key, val in replacements.items(): - s = s.replace(key, val) - with open(osp.join(odir, fname), 'w') as f: - f.write(s) - - -def main(args=None): - parser = funcargparse.FuncArgParser() - parser.setup_args(new_plugin) - parser.update_short(version='v', description='d') - parser.create_arguments() - parser.parse2func(args) - - -if __name__ == '__main__': - main() diff --git a/psyplot/project.py b/psyplot/project.py index 6336ce0..6411ce5 100755 --- a/psyplot/project.py +++ b/psyplot/project.py @@ -8,80 +8,73 @@ Furthermore this module contains an easy pyplot-like API to the current subproject.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only +import logging import os import os.path as osp -import yaml +import pickle import sys -import six +from collections import defaultdict from copy import deepcopy as _deepcopy -import logging -import inspect -import pickle +from functools import partial, wraps from importlib import import_module -from itertools import chain, repeat, cycle, count, islice -from collections import defaultdict -from functools import wraps, partial -import xarray -import pandas as pd +from itertools import chain, count, cycle, islice, repeat import matplotlib as mpl import matplotlib.figure as mfig import numpy as np +import pandas as pd +import six +import xarray +import yaml +from matplotlib.axes import SubplotBase + import psyplot -from psyplot import rcParams, get_versions import psyplot.utils as utils +from psyplot import get_versions, rcParams from psyplot.config.rcsetup import get_configdir, psyplot_fname -from psyplot.warning import warn, critical -from psyplot.docstring import docstrings, dedent, safe_modulo -import psyplot.data as psyd from psyplot.data import ( - ArrayList, open_dataset, open_mfdataset, _MissingModule, - to_netcdf, Signal, CFDecoder, safe_list, InteractiveList) -from psyplot.plotter import unique_everseen, Plotter -from psyplot.compat.pycompat import (OrderedDict, range, getcwd, - get_default_value as _get_default_value) + ArrayList, + CFDecoder, + InteractiveList, + Signal, + _MissingModule, + open_dataset, + open_mfdataset, + safe_list, +) +from psyplot.docstring import dedent, docstrings, safe_modulo +from psyplot.plotter import Plotter, unique_everseen +from psyplot.utils import get_default_value as _get_default_value +from psyplot.warning import critical, warn + try: - from cdo import Cdo as _CdoBase, CDO_PY_VERSION as cdo_version + from cdo import CDO_PY_VERSION as cdo_version + from cdo import Cdo as _CdoBase + with_cdo = True - cdo_version = tuple(map(int, cdo_version.split('.')[:2])) + cdo_version = tuple(map(int, cdo_version.split(".")[:2])) except ImportError as e: Cdo = _MissingModule(e) with_cdo = False cdo_version = None try: # try import show_colormaps for convenience - from psy_simple.colors import show_colormaps, get_cmap + from psy_simple.colors import get_cmap, show_colormaps # noqa: F401 except ImportError: pass -if rcParams['project.import_seaborn'] is not False: +if rcParams["project.import_seaborn"] is not False: try: import seaborn as _sns except ImportError as e: - if rcParams['project.import_seaborn']: + if rcParams["project.import_seaborn"]: raise _sns = _MissingModule(e) @@ -93,7 +86,7 @@ _versions = get_versions(requirements=False) -_concat_dim_default = _get_default_value(xarray.open_mfdataset, 'concat_dim') +_concat_dim_default = _get_default_value(xarray.open_mfdataset, "concat_dim") def _update_versions(): @@ -101,15 +94,26 @@ def _update_versions(): for pm_name in plot._plot_methods: pm = getattr(plot, pm_name) plugin = pm._plugin - if (plugin is not None and plugin not in _versions and - pm.module in sys.modules): + if ( + plugin is not None + and plugin not in _versions + and pm.module in sys.modules + ): _versions.update(get_versions(key=lambda s: s == plugin)) -@docstrings.get_sections(base='multiple_subplots') +@docstrings.get_sections(base="multiple_subplots") @docstrings.dedent -def multiple_subplots(rows=1, cols=1, maxplots=None, n=1, delete=True, - for_maps=False, *args, **kwargs): +def multiple_subplots( + rows=1, + cols=1, + maxplots=None, + n=1, + delete=True, + for_maps=False, + *args, + **kwargs, +): """ Function to create subplots. @@ -143,14 +147,15 @@ def multiple_subplots(rows=1, cols=1, maxplots=None, n=1, delete=True, list list of maplotlib.axes.SubplotBase instances""" import matplotlib.pyplot as plt + axes = np.array([]) maxplots = maxplots or rows * cols - kwargs.setdefault('figsize', [ - min(8.*cols, 16), min(6.5*rows, 12)]) + kwargs.setdefault("figsize", [min(8.0 * cols, 16), min(6.5 * rows, 12)]) if for_maps: import cartopy.crs as ccrs - subplot_kw = kwargs.setdefault('subplot_kw', {}) - subplot_kw['projection'] = ccrs.PlateCarree() + + subplot_kw = kwargs.setdefault("subplot_kw", {}) + subplot_kw["projection"] = ccrs.PlateCarree() for i in range(0, n, maxplots): fig, ax = plt.subplots(rows, cols, *args, **kwargs) try: @@ -173,22 +178,26 @@ def _is_slice(val): def _only_main(func): """Call the given `func` only from the main project""" + @wraps(func) def wrapper(self, *args, **kwargs): if not self.is_main: return getattr(self.main, func.__name__)(*args, **kwargs) return func(self, *args, **kwargs) + return wrapper def _first_main(func): """Call the given `func` with the same arguments but after the function of the main project""" + @wraps(func) def wrapper(self, *args, **kwargs): if not self.is_main: getattr(self.main, func.__name__)(*args, **kwargs) return func(self, *args, **kwargs) + return wrapper @@ -200,7 +209,7 @@ class Project(ArrayList): _registered_plotters = {} #: registered plotter identifiers #: signal to be emiitted when the current main and/or subproject changes - oncpchange = Signal(name='oncpchange', cls_signal=True) + oncpchange = Signal(name="oncpchange", cls_signal=True) # block the signals of this class block_signals = utils._TempBool() @@ -234,8 +243,10 @@ def _fmtos(self): p0 = plotters[0] if len(plotters) == 1: return p0._fmtos - return (getattr(p0, key) for key in set(p0).intersection( - *map(set, plotters[1:]))) + return ( + getattr(p0, key) + for key in set(p0).intersection(*map(set, plotters[1:])) + ) @property def is_csp(self): @@ -251,21 +262,20 @@ def is_cmp(self): def figs(self): """A mapping from figures to data objects with the plotter in this figure""" - ret = utils.DefaultOrderedDict(lambda: self[1:0]) + ret = utils.Defaultdict(lambda: self[1:0]) for arr in self: if arr.psy.plotter is not None: ret[arr.psy.plotter.ax.get_figure()].append(arr) - return OrderedDict(ret) + return dict(ret) @property def axes(self): - """A mapping from axes to data objects with the plotter in this axes - """ - ret = utils.DefaultOrderedDict(lambda: self[1:0]) + """A mapping from axes to data objects with the plotter in this axes""" + ret = utils.Defaultdict(lambda: self[1:0]) for arr in self: if arr.psy.plotter is not None: ret[arr.psy.plotter.ax].append(arr) - return OrderedDict(ret) + return dict(ret) @property def is_main(self): @@ -280,10 +290,13 @@ def logger(self): try: return self._logger except AttributeError: - name = '%s.%s.%s' % (self.__module__, self.__class__.__name__, - self.num) + name = "%s.%s.%s" % ( + self.__module__, + self.__class__.__name__, + self.num, + ) self._logger = logging.getLogger(name) - self.logger.debug('Initializing...') + self.logger.debug("Initializing...") return self._logger @logger.setter @@ -310,15 +323,20 @@ def arr_names(self, value): value = list(islice(value, len(self))) if not len(set(value)) == len(self): raise ValueError( - "Got %i unique array names for %i data objects!" % ( - len(set(value)), len(self))) + "Got %i unique array names for %i data objects!" + % (len(set(value)), len(self)) + ) elif not self.is_main and set(value) & ( - set(self.main.arr_names) - set(self.arr_names)): + set(self.main.arr_names) - set(self.arr_names) + ): raise ValueError( "Cannot rename arrays because there are duplicates with the " - "main project: %s" % ( - set(value) & ( - set(self.main.arr_names) - set(self.arr_names)), )) + "main project: %s" + % ( + set(value) + & (set(self.main.arr_names) - set(self.arr_names)), + ) + ) for arr, n in zip(self, value): arr.psy.arr_name = n if self.main is gcp(True): @@ -333,23 +351,35 @@ def plotters(self): @property def datasets(self): """A mapping from dataset numbers to datasets in this list""" - return {key: val['ds'] for key, val in six.iteritems( - self._get_ds_descriptions(self.array_info(ds_description=['ds'])))} + return { + key: val["ds"] + for key, val in six.iteritems( + self._get_ds_descriptions( + self.array_info(ds_description=["ds"]) + ) + ) + } @property def dsnames_map(self): """A dictionary from the dataset numbers in this list to their filenames""" - return {key: val['fname'] for key, val in six.iteritems( - self._get_ds_descriptions(self.array_info( - ds_description=['num', 'fname']), ds_description={'fname'}))} + return { + key: val["fname"] + for key, val in six.iteritems( + self._get_ds_descriptions( + self.array_info(ds_description=["num", "fname"]), + ds_description={"fname"}, + ) + ) + } @property def dsnames(self): """The set of dataset names in this instance""" return {t[0] for t in self._get_dsnames(self.array_info()) if t[0]} - @docstrings.get_sections(base='Project') + @docstrings.get_sections(base="Project") @docstrings.dedent def __init__(self, *args, **kwargs): """ @@ -362,18 +392,19 @@ def __init__(self, *args, **kwargs): num: int The number of the project """ - self.main = kwargs.pop('main', None) + self.main = kwargs.pop("main", None) self._plot = ProjectPlotter(self) - self.num = kwargs.pop('num', 1) + self.num = kwargs.pop("num", 1) self._ds_counter = count() with self.block_signals: super(Project, self).__init__(*args, **kwargs) @classmethod - @docstrings.get_sections(base='Project._register_plotter') + @docstrings.get_sections(base="Project._register_plotter") @dedent - def _register_plotter(cls, identifier, module, plotter_name, - plotter_cls=None): + def _register_plotter( + cls, identifier, module, plotter_name, plotter_cls=None + ): """ Register a plotter in the :class:`Project` class to easy access it @@ -391,14 +422,27 @@ def _register_plotter(cls, identifier, module, plotter_name, when it is needed """ if plotter_cls is not None: # plotter has already been imported + def get_x(self): return self(plotter_cls) + else: + def get_x(self): return self(getattr(import_module(module), plotter_name)) - setattr(cls, identifier, property(get_x, doc=( - "List of data arrays that are plotted by :class:`%s.%s`" - " plotters") % (module, plotter_name))) + + setattr( + cls, + identifier, + property( + get_x, + doc=( + "List of data arrays that are plotted by :class:`%s.%s`" + " plotters" + ) + % (module, plotter_name), + ), + ) cls._registered_plotters[identifier] = (module, plotter_name) def disable(self): @@ -424,8 +468,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.close(True, True, True) @staticmethod - @docstrings.get_sections(base='Project._load_preset', - sections=["Parameters", "Notes"]) + @docstrings.get_sections( + base="Project._load_preset", sections=["Parameters", "Notes"] + ) def _load_preset(preset: str): """Load a preset from disk @@ -451,7 +496,7 @@ def _load_preset(preset: str): config = preset else: path = Project._resolve_preset_path(preset) - if path in rcParams['presets.trusted']: + if path in rcParams["presets.trusted"]: loader = yaml.Loader else: loader = yaml.SafeLoader @@ -459,12 +504,12 @@ def _load_preset(preset: str): try: config = yaml.load(f, loader) except yaml.constructor.ConstructorError as e: - e.note = (e.note or '') + ( - ' You might want to add it to the trusted presets ' + e.note = (e.note or "") + ( + " You might want to add it to the trusted presets " 'via\n\npsy.rcParams["presets.trusted"].append("{}")\n\n' - 'and run this method again. To permanently store ' - 'this preset, edit the file at\n\n{} ').format( - path, psyplot_fname()) + "and run this method again. To permanently store " + "this preset, edit the file at\n\n{} " + ).format(path, psyplot_fname()) raise return config @@ -475,18 +520,19 @@ def _resolve_preset_path(preset, if_exists=True): return preset else: confdir = get_configdir() - presets_dir = osp.join(confdir, 'presets') + presets_dir = osp.join(confdir, "presets") if osp.exists(osp.join(presets_dir, preset)): return osp.join(presets_dir, preset) - elif osp.exists(osp.join(presets_dir, preset + '.yml')): - return osp.join(presets_dir, preset + '.yml') + elif osp.exists(osp.join(presets_dir, preset + ".yml")): + return osp.join(presets_dir, preset + ".yml") else: if if_exists: raise ValueError( - f"Could not find a preset with name {preset}") + f"Could not find a preset with name {preset}" + ) else: - if not preset.endswith('.yml'): - return osp.join(presets_dir, preset + '.yml') + if not preset.endswith(".yml"): + return osp.join(presets_dir, preset + ".yml") return preset @docstrings.dedent @@ -516,8 +562,11 @@ def load_preset(self, preset: str, **kwargs): sp = getattr(self, pm) if sp: valid = list(method.plotter_cls._get_formatoptions()) - fmts = {key: val for key, val in defaults.items() - if key in valid} + fmts = { + key: val + for key, val in defaults.items() + if key in valid + } fmts.update(pm_config.get(pm, {})) sp.update(fmt=fmts, **kwargs) self.start_update() @@ -546,12 +595,10 @@ def extract_fmts_from_preset(preset: str, plotmethod: str): plotmethods = plot._plot_methods pm_config, defaults = utils.sort_kwargs(preset, plotmethods) valid = list(method.plotter_cls._get_formatoptions()) - fmts = {key: val for key, val in defaults.items() - if key in valid} + fmts = {key: val for key, val in defaults.items() if key in valid} fmts.update(pm_config.get(plotmethod, {})) return fmts - def save_preset(self, fname=None, include_defaults=False, update=False): """Save the formatoptions of this project as a preset @@ -566,7 +613,7 @@ def include(fmto, plotters): return True if include_defaults else fmto.changed if update: - with open(f) as f: + with open(fname) as f: preset = yaml.load(f, yaml.Loader) else: preset = {} @@ -588,7 +635,7 @@ def include(fmto, plotters): if fname is not None: fname = self._resolve_preset_path(fname, False) os.makedirs(osp.dirname(fname), exist_ok=True) - with open(fname, 'w') as f: + with open(fname, "w") as f: yaml.dump(preset, f) else: return preset @@ -623,7 +670,7 @@ def append(self, *args, **kwargs): __call__.__doc__ = ArrayList.__call__.__doc__ - @docstrings.get_sections(base='Project.close') + @docstrings.get_sections(base="Project.close") @dedent def close(self, figs=True, data=False, ds=False, remove_only=False): """ @@ -641,6 +688,7 @@ def close(self, figs=True, data=False, ds=False, remove_only=False): If True and `figs` is True, the figures are not closed but the plotters are removed""" import matplotlib.pyplot as plt + close_ds = ds for arr in self[:]: if figs and arr.psy.plotter is not None: @@ -662,10 +710,17 @@ def close(self, figs=True, data=False, ds=False, remove_only=False): pass if close_ds: if isinstance(arr, InteractiveList): - for ds in [val['ds'] for val in six.itervalues( - arr._get_ds_descriptions( - arr.array_info(ds_description=['ds'], - standardize_dims=False)))]: + for ds in [ + val["ds"] + for val in six.itervalues( + arr._get_ds_descriptions( + arr.array_info( + ds_description=["ds"], + standardize_dims=False, + ) + ) + ) + ]: ds.close() else: arr.psy.base.close() @@ -676,23 +731,39 @@ def close(self, figs=True, data=False, ds=False, remove_only=False): elif self.main.is_cmp: self.oncpchange.emit(self.main) - docstrings.keep_params('multiple_subplots.parameters', 'delete') - docstrings.delete_params('ArrayList.from_dataset.parameters', 'base') - docstrings.delete_kwargs('ArrayList.from_dataset.other_parameters', - kwargs='kwargs') - docstrings.keep_params('xarray.open_mfdataset.parameters', 'concat_dim') - docstrings.keep_params('Project._load_preset.parameters', 'preset') + docstrings.keep_params("multiple_subplots.parameters", "delete") + docstrings.delete_params("ArrayList.from_dataset.parameters", "base") + docstrings.delete_kwargs( + "ArrayList.from_dataset.other_parameters", kwargs="kwargs" + ) + docstrings.keep_params("xarray.open_mfdataset.parameters", "concat_dim") + docstrings.keep_params("Project._load_preset.parameters", "preset") @_only_main - @docstrings.get_sections(base='Project._add_data', - sections=['Parameters', 'Other Parameters', - 'Returns']) + @docstrings.get_sections( + base="Project._add_data", + sections=["Parameters", "Other Parameters", "Returns"], + ) @docstrings.dedent - def _add_data(self, plotter_cls, filename_or_obj, fmt={}, make_plot=True, - draw=False, mf_mode=False, ax=None, engine=None, delete=True, - share=False, clear=False, enable_post=None, - concat_dim=_concat_dim_default, load=False, - *args, **kwargs): + def _add_data( + self, + plotter_cls, + filename_or_obj, + fmt={}, + make_plot=True, + draw=False, + mf_mode=False, + ax=None, + engine=None, + delete=True, + share=False, + clear=False, + enable_post=None, + concat_dim=_concat_dim_default, + load=False, + *args, + **kwargs, + ): """ Extract data from a dataset and visualize it with the given plotter @@ -762,12 +833,11 @@ def _add_data(self, plotter_cls, filename_or_obj, fmt={}, make_plot=True, The subproject that contains the new (visualized) data array""" if not isinstance(filename_or_obj, xarray.Dataset): if mf_mode: - filename_or_obj = open_mfdataset(filename_or_obj, - engine=engine, - concat_dim=concat_dim) + filename_or_obj = open_mfdataset( + filename_or_obj, engine=engine, concat_dim=concat_dim + ) else: - filename_or_obj = open_dataset(filename_or_obj, - engine=engine) + filename_or_obj = open_dataset(filename_or_obj, engine=engine) if load: old = filename_or_obj filename_or_obj = filename_or_obj.load() @@ -775,32 +845,43 @@ def _add_data(self, plotter_cls, filename_or_obj, fmt={}, make_plot=True, fmt = dict(fmt) possible_fmts = list(plotter_cls._get_formatoptions()) - additional_fmt, kwargs = utils.sort_kwargs( - kwargs, possible_fmts) + additional_fmt, kwargs = utils.sort_kwargs(kwargs, possible_fmts) fmt.update(additional_fmt) if enable_post is None: - enable_post = bool(fmt.get('post')) + enable_post = bool(fmt.get("post")) # create the subproject sub_project = self.from_dataset(filename_or_obj, **kwargs) sub_project.main = self sub_project.no_auto_update = not ( - not sub_project.no_auto_update or not self.no_auto_update) + not sub_project.no_auto_update or not self.no_auto_update + ) # create the subplots proj = plotter_cls._get_sample_projection() if isinstance(ax, tuple): - axes = iter(multiple_subplots( - *ax, n=len(sub_project), subplot_kw={'projection': proj})) - elif ax is None or isinstance(ax, (mpl.axes.SubplotBase, - mpl.axes.Axes)): + axes = iter( + multiple_subplots( + *ax, n=len(sub_project), subplot_kw={"projection": proj} + ) + ) + elif ax is None or isinstance( + ax, (mpl.axes.SubplotBase, mpl.axes.Axes) + ): axes = repeat(ax) else: axes = iter(ax) clear = clear or (isinstance(ax, tuple) and proj is not None) for arr in sub_project: - plotter_cls(arr, make_plot=(not bool(share) and make_plot), - draw=False, ax=next(axes), clear=clear, - project=self, enable_post=enable_post, **fmt) + plotter_cls( + arr, + make_plot=(not bool(share) and make_plot), + draw=False, + ax=next(axes), + clear=clear, + project=self, + enable_post=enable_post, + **fmt, + ) if share: if share is True: share = possible_fmts @@ -809,16 +890,18 @@ def _add_data(self, plotter_cls, filename_or_obj, fmt={}, make_plot=True, else: share = list(share) sub_project[0].psy.plotter.share( - [arr.psy.plotter for arr in sub_project[1:]], keys=share, - draw=False) + [arr.psy.plotter for arr in sub_project[1:]], + keys=share, + draw=False, + ) if make_plot: for arr in sub_project: arr.psy.plotter.reinit(draw=False, clear=clear) if draw is None: - draw = rcParams['auto_draw'] + draw = rcParams["auto_draw"] if draw: sub_project.draw() - if rcParams['auto_show']: + if rcParams["auto_show"]: self.show() self.extend(sub_project, new_name=True) if self is gcp(True): @@ -829,14 +912,14 @@ def __getitem__(self, key): """Overwrites lists __getitem__ by returning subproject if `key` is a slice""" if isinstance(key, slice): # return a new project - ret = self.__class__( - super(Project, self).__getitem__(key)) + ret = self.__class__(super(Project, self).__getitem__(key)) ret.main = self.main else: # return the item ret = super(Project, self).__getitem__(key) return ret if six.PY2: # for compatibility to python 2.7 + def __getslice__(self, *args): return self[slice(*args)] @@ -850,15 +933,17 @@ def __add__(self, other): def show(): """Shows all open figures""" import matplotlib.pyplot as plt + plt.show(block=False) - docstrings.keep_params('join_dicts.parameters', 'delimiter') - docstrings.keep_params('join_dicts.parameters', 'keep_all') + docstrings.keep_params("join_dicts.parameters", "delimiter") + docstrings.keep_params("join_dicts.parameters", "keep_all") - @docstrings.get_sections(base='Project.joined_attrs') + @docstrings.get_sections(base="Project.joined_attrs") @docstrings.with_indent(8) - def joined_attrs(self, delimiter=', ', enhanced=True, plot_data=False, - keep_all=True): + def joined_attrs( + self, delimiter=", ", enhanced=True, plot_data=False, keep_all=True + ): """Join the attributes of the arrays in this project Parameters @@ -882,21 +967,26 @@ def joined_attrs(self, delimiter=', ', enhanced=True, plot_data=False, if enhanced: all_attrs = [ plotter.get_enhanced_attrs( - getattr(plotter, 'plot_data' if plot_data else 'data')) - for plotter in self.plotters] + getattr(plotter, "plot_data" if plot_data else "data") + ) + for plotter in self.plotters + ] else: if plot_data: - all_attrs = [plotter.plot_data.attrs - for plotter in self.plotters] + all_attrs = [ + plotter.plot_data.attrs for plotter in self.plotters + ] else: all_attrs = [arr.attrs for arr in self] - return utils.join_dicts(all_attrs, delimiter=delimiter, - keep_all=keep_all) + return utils.join_dicts( + all_attrs, delimiter=delimiter, keep_all=keep_all + ) - @docstrings.get_sections(base='Project.format_string') + @docstrings.get_sections(base="Project.format_string") @docstrings.with_indent(8) - def format_string(self, s, use_time=False, format_args=None, *args, - **kwargs): + def format_string( + self, s, use_time=False, format_args=None, *args, **kwargs + ): """Format a string with the attributes in this project Parameters @@ -936,11 +1026,18 @@ def format_string(self, s, use_time=False, format_args=None, *args, pass return safe_modulo(s, attrs) - docstrings.keep_params('Project.format_string.parameters', 'use_time') + docstrings.keep_params("Project.format_string.parameters", "use_time") @docstrings.with_indent(8) - def export(self, output, tight=False, concat=True, close_pdf=None, - use_time=False, **kwargs): + def export( + self, + output, + tight=False, + concat=True, + close_pdf=None, + use_time=False, + **kwargs, + ): """Exports the figures of the project to one or more image files Parameters @@ -1006,8 +1103,9 @@ def export(self, output, tight=False, concat=True, close_pdf=None, >>> p.export(['my_plots1.pdf', 'my_plots2.pdf']) """ from matplotlib.backends.backend_pdf import PdfPages + if tight: - kwargs['bbox_inches'] = 'tight' + kwargs["bbox_inches"] = "tight" not_enough_files_warnings = ( "Not enough output files specified! %i figures are open " @@ -1015,12 +1113,13 @@ def export(self, output, tight=False, concat=True, close_pdf=None, "that some figures may be overwritten after being " "exported! Use a pdf instead if you want to save all " "figures or include a '%%i' string in the filename to " - "avoid duplicates.") + "avoid duplicates." + ) if isinstance(output, six.string_types): # a single string - out_fmt = kwargs.pop('format', os.path.splitext(output))[1][1:] - if out_fmt.lower() == 'pdf' and concat: - output = self.format_string(output, use_time, delimiter='-') + out_fmt = kwargs.pop("format", os.path.splitext(output))[1][1:] + if out_fmt.lower() == "pdf" and concat: + output = self.format_string(output, use_time, delimiter="-") pdf = PdfPages(output) for fig in self.figs: @@ -1034,12 +1133,14 @@ def export(self, output, tight=False, concat=True, close_pdf=None, output = [output] * len(self.figs) if utils.is_iterable(output): # a list of strings - output = [sp.format_string(out, use_time, i, delimiter='-') - for i, (out, sp) in enumerate( - zip(output, self.figs.values()), 1)] + output = [ + sp.format_string(out, use_time, i, delimiter="-") + for i, (out, sp) in enumerate( + zip(output, self.figs.values()), 1 + ) + ] if len(set(output)) != len(output): - warn(not_enough_files_warnings % ( - len(output), len(self.figs))) + warn(not_enough_files_warnings % (len(output), len(self.figs))) output = iter(output) for fig, out in zip(self.figs, output): @@ -1050,8 +1151,8 @@ def export(self, output, tight=False, concat=True, close_pdf=None, if close_pdf: output.close() - docstrings.keep_params('Plotter.share.parameters', 'keys') - docstrings.delete_params('Plotter.share.parameters', 'keys', 'plotters') + docstrings.keep_params("Plotter.share.parameters", "keys") + docstrings.delete_params("Plotter.share.parameters", "keys", "plotters") @docstrings.dedent def share(self, base=None, keys=None, by=None, **kwargs): @@ -1081,21 +1182,25 @@ def share(self, base=None, keys=None, by=None, **kwargs): psyplot.plotter.share""" if by is not None: if base is not None: - if hasattr(base, 'psy') or isinstance(base, Plotter): + if hasattr(base, "psy") or isinstance(base, Plotter): base = [base] - if by.lower() in ['ax', 'axes']: - bases = {ax: p[0] for ax, p in six.iteritems( - Project(base).axes)} - elif by.lower() in ['fig', 'figure']: - bases = {fig: p[0] for fig, p in six.iteritems( - Project(base).figs)} + if by.lower() in ["ax", "axes"]: + bases = { + ax: p[0] for ax, p in six.iteritems(Project(base).axes) + } + elif by.lower() in ["fig", "figure"]: + bases = { + fig: p[0] + for fig, p in six.iteritems(Project(base).figs) + } else: raise ValueError( "*by* must be out of {'fig', 'figure', 'ax', 'axes'}. " - "Not %s" % (by, )) + "Not %s" % (by,) + ) else: bases = {} - projects = self.axes if by == 'axes' else self.figs + projects = self.axes if by == "axes" else self.figs for obj, p in projects.items(): p.share(bases.get(obj), keys, **kwargs) else: @@ -1108,7 +1213,7 @@ def share(self, base=None, keys=None, by=None, **kwargs): base = plotters[0] plotters = plotters[1:] elif not isinstance(base, Plotter): - base = getattr(getattr(base, 'psy', base), 'plotter', base) + base = getattr(getattr(base, "psy", base), "plotter", base) base.share(plotters, keys=keys, **kwargs) @docstrings.dedent @@ -1129,9 +1234,9 @@ def unshare(self, **kwargs): for plotter in self.plotters: plotter.unshare_me(**kwargs) - docstrings.delete_params('ArrayList.array_info.parameters', 'pwd', 'copy') + docstrings.delete_params("ArrayList.array_info.parameters", "pwd", "copy") - @docstrings.get_sections(base='Project.save_project') + @docstrings.get_sections(base="Project.save_project") @docstrings.dedent def save_project(self, fname=None, pwd=None, pack=False, **kwargs): """ @@ -1163,31 +1268,34 @@ def save_project(self, fname=None, pwd=None, pack=False, **kwargs): if pack and fname is not None: target_dir = os.path.dirname(fname) if not os.path.exists(target_dir): - os.makedirs(target_dir) + os.makedirs(target_dir, exist_ok=True) def tmp_it(): from tempfile import NamedTemporaryFile + while True: - yield NamedTemporaryFile( - dir=target_dir, suffix='.nc').name + yield NamedTemporaryFile(dir=target_dir, suffix=".nc").name - kwargs.setdefault('paths', tmp_it()) + kwargs.setdefault("paths", tmp_it()) if fname is not None: - kwargs['copy'] = True + kwargs["copy"] = True _update_versions() - ret = {'figs': dict(map(_ProjectLoader.inspect_figure, self.figs)), - 'arrays': self.array_info(pwd=pwd, **kwargs), - 'versions': _deepcopy(_versions)} + ret = { + "figs": dict(map(_ProjectLoader.inspect_figure, self.figs)), + "arrays": self.array_info(pwd=pwd, **kwargs), + "versions": _deepcopy(_versions), + } if pack and fname is not None: # we get the filenames out of the results and copy the datasets # there. After that we check the filenames again and force them # to the desired directory from shutil import copyfile - fnames = (f[0] for f in self._get_dsnames(ret['arrays'])) - alternative_paths = kwargs.pop('alternative_paths', {}) + + fnames = (f[0] for f in self._get_dsnames(ret["arrays"])) + alternative_paths = kwargs.pop("alternative_paths", {}) counters = defaultdict(int) - if kwargs.get('use_rel_paths', True): + if kwargs.get("use_rel_paths", True): get_path = partial(os.path.relpath, start=target_dir) else: get_path = os.path.abspath @@ -1195,51 +1303,73 @@ def tmp_it(): if ds_fname is None or utils.is_remote_url(ds_fname): continue dst_file = alternative_paths.get( - ds_fname, os.path.join(target_dir, os.path.basename( - ds_fname))) + ds_fname, + os.path.join(target_dir, os.path.basename(ds_fname)), + ) orig_dst_file = dst_file if counters[dst_file] and ( - not os.path.exists(dst_file) or - not os.path.samefile(ds_fname, dst_file)): + not os.path.exists(dst_file) + or not os.path.samefile(ds_fname, dst_file) + ): dst_file, ext = os.path.splitext(dst_file) - dst_file += '-' + str(counters[orig_dst_file]) + ext - if (not os.path.exists(dst_file) or - not os.path.samefile(ds_fname, dst_file)): + dst_file += "-" + str(counters[orig_dst_file]) + ext + if not os.path.exists(dst_file) or not os.path.samefile( + ds_fname, dst_file + ): copyfile(ds_fname, dst_file) counters[orig_dst_file] += 1 alternative_paths.setdefault(ds_fname, get_path(dst_file)) - ret['arrays'] = self.array_info( - pwd=pwd, alternative_paths=alternative_paths, **kwargs) + ret["arrays"] = self.array_info( + pwd=pwd, alternative_paths=alternative_paths, **kwargs + ) # store the plotter settings - for arr, d in zip(self, six.itervalues(ret['arrays'])): + for arr, d in zip(self, six.itervalues(ret["arrays"])): if arr.psy.plotter is None: continue plotter = arr.psy.plotter - d['plotter'] = { - 'ax': _ProjectLoader.inspect_axes(plotter.ax), - 'fmt': {key: getattr(plotter, key).value2pickle - for key in plotter}, - 'cls': (plotter.__class__.__module__, - plotter.__class__.__name__), - 'shared': {}} - d['plotter']['ax']['shared'] = set( - other.psy.arr_name for other in self - if other.psy.ax == plotter.ax) + d["plotter"] = { + "ax": _ProjectLoader.inspect_axes(plotter.ax), + "fmt": { + key: getattr(plotter, key).value2pickle for key in plotter + }, + "cls": ( + plotter.__class__.__module__, + plotter.__class__.__name__, + ), + "shared": {}, + } + d["plotter"]["ax"]["shared"] = set( + other.psy.arr_name + for other in self + if other.psy.ax == plotter.ax + ) if plotter.ax._sharex: - d['plotter']['ax']['sharex'] = next( - (other.psy.arr_name for other in self - if other.psy.ax == plotter.ax._sharex), None) + d["plotter"]["ax"]["sharex"] = next( + ( + other.psy.arr_name + for other in self + if other.psy.ax == plotter.ax._sharex + ), + None, + ) if plotter.ax._sharey: - d['plotter']['ax']['sharey'] = next( - (other.psy.arr_name for other in self - if other.psy.ax == plotter.ax._sharey), None) - shared = d['plotter']['shared'] + d["plotter"]["ax"]["sharey"] = next( + ( + other.psy.arr_name + for other in self + if other.psy.ax == plotter.ax._sharey + ), + None, + ) + shared = d["plotter"]["shared"] for fmto in plotter._fmtos: if fmto.shared: - shared[fmto.key] = [other_fmto.plotter.data.psy.arr_name - for other_fmto in fmto.shared] + shared[fmto.key] = [ + other_fmto.plotter.data.psy.arr_name + for other_fmto in fmto.shared + ] if fname is not None: - with open(fname, 'wb') as f: + with open(fname, "wb") as f: pickle.dump(ret, f) return None @@ -1264,6 +1394,7 @@ def keys(self, *args, **kwargs): class TmpClass(Plotter): pass + for fmto in self._fmtos: setattr(TmpClass, fmto.key, type(fmto)(fmto.key)) return TmpClass.show_keys(*args, **kwargs) @@ -1287,6 +1418,7 @@ def summaries(self, *args, **kwargs): class TmpClass(Plotter): pass + for fmto in self._fmtos: setattr(TmpClass, fmto.key, type(fmto)(fmto.key)) return TmpClass.show_summaries(*args, **kwargs) @@ -1310,6 +1442,7 @@ def docs(self, *args, **kwargs): class TmpClass(Plotter): pass + for fmto in self._fmtos: setattr(TmpClass, fmto.key, type(fmto)(fmto.key)) return TmpClass.show_docs(*args, **kwargs) @@ -1334,24 +1467,34 @@ def from_dataset(cls, *args, **kwargs): Project The newly created project instance """ - main = kwargs.pop('main', None) + main = kwargs.pop("main", None) ret = super(Project, cls).from_dataset(*args, **kwargs) if main is not None: ret.main = main main.extend(ret, new_name=False) return ret - docstrings.delete_params('ArrayList.from_dict.parameters', 'd', 'pwd') - docstrings.keep_params('Project._add_data.parameters', 'make_plot') - docstrings.keep_params('Project._add_data.parameters', 'clear') + docstrings.delete_params("ArrayList.from_dict.parameters", "d", "pwd") + docstrings.keep_params("Project._add_data.parameters", "make_plot") + docstrings.keep_params("Project._add_data.parameters", "clear") @classmethod - @docstrings.get_sections(base='Project.load_project') + @docstrings.get_sections(base="Project.load_project") @docstrings.dedent - def load_project(cls, fname, auto_update=None, make_plot=True, - draw=False, alternative_axes=None, main=False, - encoding=None, enable_post=False, new_fig=True, - clear=None, **kwargs): + def load_project( + cls, + fname, + auto_update=None, + make_plot=True, + draw=False, + alternative_axes=None, + main=False, + encoding=None, + enable_post=False, + new_fig=True, + clear=None, + **kwargs, + ): """ Load a project from a file or dict @@ -1425,33 +1568,35 @@ def get_ax_base(name, alternatives): alternatives.difference_update(obj(ax=ax_base).arr_names) return ax_base - pwd = kwargs.pop('pwd', None) + pwd = kwargs.pop("pwd", None) if isinstance(fname, six.string_types): - with open(fname, 'rb') as f: - pickle_kws = {} if not encoding else {'encoding': encoding} + with open(fname, "rb") as f: + pickle_kws = {} if not encoding else {"encoding": encoding} d = pickle.load(f, **pickle_kws) pwd = pwd or os.path.dirname(fname) else: d = dict(fname) - pwd = pwd or getcwd() + pwd = pwd or os.getcwd() # check for patches of plugins - for ep in iter_entry_points('psyplot', name='patches'): + for ep in iter_entry_points("psyplot", name="patches"): patches = ep.load() - for arr_d in d.get('arrays').values(): - plotter_cls = arr_d.get('plotter', {}).get('cls') + for arr_d in d.get("arrays").values(): + plotter_cls = arr_d.get("plotter", {}).get("cls") if plotter_cls is not None and plotter_cls in patches: # apply the patch - patches[plotter_cls](arr_d['plotter'], - d.get('versions', {})) + patches[plotter_cls]( + arr_d["plotter"], d.get("versions", {}) + ) fig_map = {} if alternative_axes is None: - for fig_dict in six.itervalues(d.get('figs', {})): - orig_num = fig_dict.get('num') or 1 + for fig_dict in six.itervalues(d.get("figs", {})): + orig_num = fig_dict.get("num") or 1 fig_map[orig_num] = _ProjectLoader.load_figure( - fig_dict, new_fig=new_fig).number + fig_dict, new_fig=new_fig + ).number elif not isinstance(alternative_axes, dict): alternative_axes = cycle(iter(alternative_axes)) - obj = cls.from_dict(d['arrays'], pwd=pwd, **kwargs) + obj = cls.from_dict(d["arrays"], pwd=pwd, **kwargs) if main: # we create a new project with the project factory to make sure # that everything is handled correctly @@ -1461,75 +1606,94 @@ def get_ax_base(name, alternatives): sharex = defaultdict(set) sharey = defaultdict(set) for arr, (arr_name, arr_dict) in zip( - obj, filter(lambda t: t[0] in arr_names, - six.iteritems(d['arrays']))): - if not arr_dict.get('plotter'): + obj, + filter(lambda t: t[0] in arr_names, six.iteritems(d["arrays"])), + ): + if not arr_dict.get("plotter"): continue - plot_dict = arr_dict['plotter'] + plot_dict = arr_dict["plotter"] plotter_cls = getattr( - import_module(plot_dict['cls'][0]), plot_dict['cls'][1]) + import_module(plot_dict["cls"][0]), plot_dict["cls"][1] + ) ax = None if alternative_axes is not None: if isinstance(alternative_axes, dict): ax = alternative_axes.get(arr.arr_name) else: ax = next(alternative_axes, None) - if ax is None and 'ax' in plot_dict: - already_opened = plot_dict['ax'].get( - 'shared', set()).intersection(axes) + if ax is None and "ax" in plot_dict: + already_opened = ( + plot_dict["ax"].get("shared", set()).intersection(axes) + ) if already_opened: ax = axes[next(iter(already_opened))] else: - plot_dict['ax'].pop('shared', None) - plot_dict['ax']['fig'] = fig_map[ - plot_dict['ax'].get('fig') or 1] - if plot_dict['ax'].get('sharex'): - sharex[plot_dict['ax'].pop('sharex')].add( - arr.psy.arr_name) - if plot_dict['ax'].get('sharey'): - sharey[plot_dict['ax'].pop('sharey')].add( - arr.psy.arr_name) + plot_dict["ax"].pop("shared", None) + plot_dict["ax"]["fig"] = fig_map[ + plot_dict["ax"].get("fig") or 1 + ] + if plot_dict["ax"].get("sharex"): + sharex[plot_dict["ax"].pop("sharex")].add( + arr.psy.arr_name + ) + if plot_dict["ax"].get("sharey"): + sharey[plot_dict["ax"].pop("sharey")].add( + arr.psy.arr_name + ) axes[arr.psy.arr_name] = ax = _ProjectLoader.load_axes( - plot_dict['ax']) + plot_dict["ax"] + ) plotter_cls( - arr, make_plot=False, draw=False, clear=False, - ax=ax, project=obj.main, enable_post=enable_post, - **plot_dict['fmt']) + arr, + make_plot=False, + draw=False, + clear=False, + ax=ax, + project=obj.main, + enable_post=enable_post, + **plot_dict["fmt"], + ) # handle shared x and y-axes for key, names in sharex.items(): ax_base = get_ax_base(key, names) if ax_base is not None: ax_base.get_shared_x_axes().join( - ax_base, *obj(arr_name=names).axes) + ax_base, *obj(arr_name=names).axes + ) for ax in obj(arr_name=names).axes: ax._sharex = ax_base for key, names in sharey.items(): ax_base = get_ax_base(key, names) if ax_base is not None: ax_base.get_shared_y_axes().join( - ax_base, *obj(arr_name=names).axes) + ax_base, *obj(arr_name=names).axes + ) for ax in obj(arr_name=names).axes: ax._sharey = ax_base for arr in obj.with_plotter: - shared = d['arrays'][arr.psy.arr_name]['plotter'].get('shared', {}) + shared = d["arrays"][arr.psy.arr_name]["plotter"].get("shared", {}) for key, arr_names in six.iteritems(shared): - arr.psy.plotter.share(obj(arr_name=arr_names).plotters, - keys=[key]) + arr.psy.plotter.share( + obj(arr_name=arr_names).plotters, keys=[key] + ) if make_plot: for plotter in obj.plotters: plotter.reinit( draw=False, - clear=clear or ( - clear is None and - plotter_cls._get_sample_projection() is not None)) + clear=clear + or ( + clear is None + and plotter_cls._get_sample_projection() is not None + ), + ) if draw is None: - draw = rcParams['auto_draw'] + draw = rcParams["auto_draw"] if draw: obj.draw() - if rcParams['auto_show']: + if rcParams["auto_show"]: obj.show() if auto_update is None: - auto_update = rcParams['lists.auto_update'] + auto_update = rcParams["lists.auto_update"] if not main: obj._main = gcp(True) obj.main.extend(obj, new_name=True) @@ -1538,7 +1702,7 @@ def get_ax_base(name, alternatives): return obj @classmethod - @docstrings.get_sections(base='Project.scp') + @docstrings.get_sections(base="Project.scp") @dedent def scp(cls, project): """ @@ -1573,7 +1737,7 @@ def scp(cls, project): _scp(sp) cls.oncpchange.emit(sp) - docstrings.delete_params('Project.parameters', 'num') + docstrings.delete_params("Project.parameters", "num") @classmethod @docstrings.dedent @@ -1603,8 +1767,9 @@ def new(cls, num=None, *args, **kwargs): return project def __str__(self): - return (('%i Main ' % self.num) if self.is_main else '') + super( - Project, self).__str__() + return (("%i Main " % self.num) if self.is_main else "") + super( + Project, self + ).__str__() class _ProjectLoader(object): @@ -1618,62 +1783,71 @@ def inspect_figure(fig): containing the necessary information for the :func:`matplotlib.pyplot.figure` function""" return fig.number, { - 'num': fig.number, - 'figsize': (fig.get_figwidth(), fig.get_figheight()), - 'dpi': fig.get_dpi() / getattr(fig.canvas, '_dpi_ratio', 1), - 'facecolor': fig.get_facecolor(), - 'edgecolor': fig.get_edgecolor(), - 'frameon': fig.get_frameon(), - 'tight_layout': fig.get_tight_layout(), - 'subplotpars': vars(fig.subplotpars)} + "num": fig.number, + "figsize": (fig.get_figwidth(), fig.get_figheight()), + "dpi": fig.get_dpi() / getattr(fig.canvas, "_dpi_ratio", 1), + "facecolor": fig.get_facecolor(), + "edgecolor": fig.get_edgecolor(), + "frameon": fig.get_frameon(), + "tight_layout": fig.get_tight_layout(), + "subplotpars": vars(fig.subplotpars), + } @staticmethod def load_figure(d, new_fig=True): """Create a figure from what is returned by :meth:`inspect_figure`""" import matplotlib.pyplot as plt - subplotpars = d.pop('subplotpars', None) + + subplotpars = d.pop("subplotpars", None) if subplotpars is not None: - subplotpars.pop('validate', None) - subplotpars.pop('_validate', None) + subplotpars.pop("validate", None) + subplotpars.pop("_validate", None) subplotpars = mfig.SubplotParams(**subplotpars) if new_fig: nums = plt.get_fignums() - if d.get('num') in nums: - d['num'] = next( - i for i in range(max(plt.get_fignums()) + 1, 0, -1) - if i not in nums) + if d.get("num") in nums: + d["num"] = next( + i + for i in range(max(plt.get_fignums()) + 1, 0, -1) + if i not in nums + ) return plt.figure(subplotpars=subplotpars, **d) @staticmethod def inspect_axes(ax): """Inspect an axes or subplot to get the initialization parameters""" - ret = {'fig': ax.get_figure().number} - if mpl.__version__ < '2.0': - ret['axisbg'] = ax.get_axis_bgcolor() + ret = {"fig": ax.get_figure().number} + if mpl.__version__ < "2.0": + ret["axisbg"] = ax.get_axis_bgcolor() else: # axisbg is depreceated - ret['facecolor'] = ax.get_facecolor() - proj = getattr(ax, 'projection', None) + ret["facecolor"] = ax.get_facecolor() + proj = getattr(ax, "projection", None) if proj is not None and not isinstance(proj, six.string_types): proj = (proj.__class__.__module__, proj.__class__.__name__) - ret['projection'] = proj - ret['visible'] = ax.get_visible() - ret['spines'] = {} - ret['zorder'] = ax.get_zorder() - ret['yaxis_inverted'] = ax.yaxis_inverted() - ret['xaxis_inverted'] = ax.xaxis_inverted() + ret["projection"] = proj + ret["visible"] = ax.get_visible() + ret["spines"] = {} + ret["zorder"] = ax.get_zorder() + ret["yaxis_inverted"] = ax.yaxis_inverted() + ret["xaxis_inverted"] = ax.xaxis_inverted() for key, val in ax.spines.items(): - ret['spines'][key] = {} - for prop in ['linestyle', 'edgecolor', 'linewidth', - 'facecolor', 'visible']: - ret['spines'][key][prop] = getattr(val, 'get_' + prop)() - if isinstance(ax, mfig.SubplotBase): + ret["spines"][key] = {} + for prop in [ + "linestyle", + "edgecolor", + "linewidth", + "facecolor", + "visible", + ]: + ret["spines"][key][prop] = getattr(val, "get_" + prop)() + if isinstance(ax, SubplotBase): sp = ax.get_subplotspec().get_topmost_subplotspec() - ret['grid_spec'] = sp.get_geometry()[:2] - ret['subplotspec'] = [sp.num1, sp.num2] - ret['is_subplot'] = True + ret["grid_spec"] = sp.get_geometry()[:2] + ret["subplotspec"] = [sp.num1, sp.num2] + ret["is_subplot"] = True else: - ret['args'] = [ax.get_position(True).bounds] - ret['is_subplot'] = False + ret["args"] = [ax.get_position(True).bounds] + ret["is_subplot"] = False return ret @staticmethod @@ -1681,23 +1855,25 @@ def load_axes(d): """Create an axes or subplot from what is returned by :meth:`inspect_axes`""" import matplotlib.pyplot as plt - fig = plt.figure(d.pop('fig', None)) - proj = d.pop('projection', None) - spines = d.pop('spines', None) - invert_yaxis = d.pop('yaxis_inverted', None) - invert_xaxis = d.pop('xaxis_inverted', None) - if mpl.__version__ >= '2.0' and 'axisbg' in d: # axisbg is depreceated - d['facecolor'] = d.pop('axisbg') - elif mpl.__version__ < '2.0' and 'facecolor' in d: - d['axisbg'] = d.pop('facecolor') + + fig = plt.figure(d.pop("fig", None)) + proj = d.pop("projection", None) + spines = d.pop("spines", None) + invert_yaxis = d.pop("yaxis_inverted", None) + invert_xaxis = d.pop("xaxis_inverted", None) + if mpl.__version__ >= "2.0" and "axisbg" in d: # axisbg is depreceated + d["facecolor"] = d.pop("axisbg") + elif mpl.__version__ < "2.0" and "facecolor" in d: + d["axisbg"] = d.pop("facecolor") if proj is not None and not isinstance(proj, six.string_types): proj = getattr(import_module(proj[0]), proj[1])() - if d.pop('is_subplot', None): - grid_spec = mpl.gridspec.GridSpec(*d.pop('grid_spec', (1, 1))) + if d.pop("is_subplot", None): + grid_spec = mpl.gridspec.GridSpec(*d.pop("grid_spec", (1, 1))) subplotspec = mpl.gridspec.SubplotSpec( - grid_spec, *d.pop('subplotspec', (1, None))) + grid_spec, *d.pop("subplotspec", (1, None)) + ) return fig.add_subplot(subplotspec, projection=proj, **d) - ret = fig.add_axes(*d.pop('args', []), projection=proj, **d) + ret = fig.add_axes(*d.pop("args", []), projection=proj, **d) if spines is not None: for key, val in spines.items(): ret.spines[key].update(val) @@ -1724,8 +1900,9 @@ def project(self): def __init__(self, project=None): self._project = project - docstrings.keep_params('ArrayList.from_dataset.parameters', - 'default_slice') + docstrings.keep_params( + "ArrayList.from_dataset.parameters", "default_slice" + ) @property def _plot_methods(self): @@ -1743,12 +1920,14 @@ def show_plot_methods(self): if print_func is None: print_func = six.print_ s = "\n".join( - "%s\n %s" % t for t in six.iteritems(self._plot_methods)) + "%s\n %s" % t for t in six.iteritems(self._plot_methods) + ) return print_func(s) - @docstrings.get_sections(base='ProjectPlotter._add_data', - sections=['Parameters', 'Other Parameters', - 'Returns']) + @docstrings.get_sections( + base="ProjectPlotter._add_data", + sections=["Parameters", "Other Parameters", "Returns"], + ) @docstrings.dedent def _add_data(self, *args, **kwargs): """ @@ -1772,14 +1951,22 @@ def _add_data(self, *args, **kwargs): return self.project._add_data(*args, **kwargs) @classmethod - @docstrings.get_sections(base='ProjectPlotter._register_plotter') + @docstrings.get_sections(base="ProjectPlotter._register_plotter") @docstrings.dedent - def _register_plotter(cls, identifier, module, plotter_name, - plotter_cls=None, summary='', prefer_list=False, - default_slice=None, default_dims={}, - show_examples=True, - example_call="filename, name=['my_variable'], ...", - plugin=None): + def _register_plotter( + cls, + identifier, + module, + plotter_name, + plotter_cls=None, + summary="", + prefer_list=False, + default_slice=None, + default_dims={}, + show_examples=True, + example_call="filename, name=['my_variable'], ...", + plugin=None, + ): """ Register a plotter for making plots @@ -1812,28 +1999,39 @@ class under the name of the given `identifier` plugin: str The name of the plugin """ - full_name = '%s.%s' % (module, plotter_name) + full_name = "%s.%s" % (module, plotter_name) if plotter_cls is not None: # plotter has already been imported - docstrings.params['%s.formatoptions' % (full_name)] = \ - plotter_cls.show_keys( - indent=4, func=str, - # include links in sphinx doc - include_links=None) - doc_str = ('Possible formatoptions are\n\n' - '%%(%s.formatoptions)s') % full_name + docstrings.params[ + "%s.formatoptions" % (full_name) + ] = plotter_cls.show_keys( + indent=4, + func=str, + # include links in sphinx doc + include_links=None, + ) + doc_str = ( + "Possible formatoptions are\n\n" "%%(%s.formatoptions)s" + ) % full_name else: - doc_str = '' + doc_str = "" summary = summary or ( - 'Open and plot data via :class:`%s.%s` plotters' % ( - module, plotter_name)) + "Open and plot data via :class:`%s.%s` plotters" + % (module, plotter_name) + ) if plotter_cls is not None: _versions.update(get_versions(key=lambda s: s == plugin)) class PlotMethod(cls._plot_method_base_cls): - __doc__ = cls._gen_doc(summary, full_name, identifier, - example_call, doc_str, show_examples) + __doc__ = cls._gen_doc( + summary, + full_name, + identifier, + example_call, + doc_str, + show_examples, + ) _default_slice = default_slice _default_dims = default_dims @@ -1846,10 +2044,18 @@ class PlotMethod(cls._plot_method_base_cls): setattr(cls, identifier, PlotMethod(identifier, module, plotter_name)) @classmethod - def _gen_doc(cls, summary, full_name, identifier, example_call, doc_str, - show_examples): + def _gen_doc( + cls, + summary, + full_name, + identifier, + example_call, + doc_str, + show_examples, + ): """Generate the documentation docstring for a PlotMethod""" - ret = docstrings.dedent(""" + ret = docstrings.dedent( + """ %s This plotting method adds data arrays and plots them via @@ -1859,16 +2065,19 @@ def _gen_doc(cls, summary, full_name, identifier, example_call, doc_str, >>> psy.plot.%s(%s) - %s""" % (summary, full_name, identifier, example_call, doc_str)) + %s""" + % (summary, full_name, identifier, example_call, doc_str) + ) if show_examples: - ret += '\n\n' + cls._gen_examples(identifier) + ret += "\n\n" + cls._gen_examples(identifier) return ret @classmethod def _gen_examples(cls, identifier): """Generate examples how to axes the formatoption docs""" - return docstrings.dedent(""" + return docstrings.dedent( + """ Examples -------- To explore the formatoptions and their documentations, use the @@ -1888,7 +2097,9 @@ def _gen_examples(cls, identifier): >>> psy.plot.%(id)s.docs('plot') # or access the documentation via the attribute - >>> psy.plot.%(id)s.plot""" % {'id': identifier}) + >>> psy.plot.%(id)s.plot""" + % {"id": identifier} + ) class PlotterInterface(object): @@ -1898,8 +2109,11 @@ class PlotterInterface(object): @property def _logger(self): - name = '%s.%s.%s' % (self.__module__, self.__class__.__name__, - self._method) + name = "%s.%s.%s" % ( + self.__module__, + self.__class__.__name__, + self._method, + ) return logging.getLogger(name) @property @@ -1912,12 +2126,13 @@ def plotter_cls(self): """The plotter class""" ret = self._plotter_cls if ret is None: - self._logger.debug('importing %s', self.module) + self._logger.debug("importing %s", self.module) mod = import_module(self.module) plotter = self.plotter_name if plotter not in vars(mod): - raise ImportError("Module %r does not have a %r plotter!" % ( - mod, plotter)) + raise ImportError( + "Module %r does not have a %r plotter!" % (mod, plotter) + ) ret = self._plotter_cls = getattr(mod, plotter) _versions.update(get_versions(key=lambda s: s == self._plugin)) return ret @@ -1946,8 +2161,9 @@ def __init__(self, methodname, module, plotter_name, project_plotter=None): self.module = module self.plotter_name = plotter_name - docstrings.delete_params('ProjectPlotter._add_data.parameters', - 'plotter_cls') + docstrings.delete_params( + "ProjectPlotter._add_data.parameters", "plotter_cls" + ) @docstrings.dedent def __call__(self, *args, **kwargs): @@ -1966,13 +2182,13 @@ def __call__(self, *args, **kwargs): ------- %(ProjectPlotter._add_data.returns)s """ - preset = kwargs.pop('preset', None) + preset = kwargs.pop("preset", None) if preset: preset = self._project_plotter.project._load_preset(preset) if len(args) >= 2: fmt = args[1] else: - fmt = kwargs.setdefault('fmt', {}) + fmt = kwargs.setdefault("fmt", {}) for key, val in preset.get(self._method, {}).items(): fmt.setdefault(key, val) valid = list(self.plotter_cls._get_formatoptions()) @@ -1981,41 +2197,61 @@ def __call__(self, *args, **kwargs): fmt.setdefault(key, val) return self._project_plotter._add_data( - self.plotter_cls, *args, **dict(chain( - [('prefer_list', self._prefer_list), - ('default_slice', self._default_slice)], - six.iteritems(self._default_dims), six.iteritems(kwargs)))) + self.plotter_cls, + *args, + **dict( + chain( + [ + ("prefer_list", self._prefer_list), + ("default_slice", self._default_slice), + ], + six.iteritems(self._default_dims), + six.iteritems(kwargs), + ) + ), + ) def __getattr__(self, attr): if attr in self.plotter_cls._get_formatoptions(): - return partial(self.print_func, - getattr(self.plotter_cls, attr).__doc__) + return partial( + self.print_func, getattr(self.plotter_cls, attr).__doc__ + ) else: raise AttributeError( - "%s instance does not have a %s attribute" % ( - self.__class__.__name__, attr)) + "%s instance does not have a %s attribute" + % (self.__class__.__name__, attr) + ) def __get__(self, instance, owner): if instance is None: return self else: try: - return getattr(instance, '_' + self._method) + return getattr(instance, "_" + self._method) except AttributeError: - setattr(instance, '_' + self._method, self.__class__( - self._method, self.module, self.plotter_name, - instance)) - return getattr(instance, '_' + self._method) + setattr( + instance, + "_" + self._method, + self.__class__( + self._method, self.module, self.plotter_name, instance + ), + ) + return getattr(instance, "_" + self._method) def __set__(self, instance, value): """Actually not required. We just implement it to ensure the python "help" function works well""" - setattr(instance, '_' + self._method, value) + setattr(instance, "_" + self._method, value) def __dir__(self): try: - return sorted(chain(dir(self.__class__), self.__dict__, - self.plotter_cls._get_formatoptions())) + return sorted( + chain( + dir(self.__class__), + self.__dict__, + self.plotter_cls._get_formatoptions(), + ) + ) except Exception: return sorted(chain(dir(self.__class__), self.__dict__)) @@ -2126,13 +2362,18 @@ def check_data(self, ds, name, dims, decoder=None, *args, **kwargs): decoder = {} if isinstance(decoder, dict): decoders[i] = CFDecoder.get_decoder(ds, var, **decoder) - default_slice = slice(None) if self._default_slice is None else \ - self._default_slice - for i, (dim_dict, var, decoder) in enumerate(zip( - dims, variables, decoders)): - corrected = decoder.correct_dims(var, dict(chain( - six.iteritems(self._default_dims), - dim_dict.items()))) + default_slice = ( + slice(None) if self._default_slice is None else self._default_slice + ) + for i, (dim_dict, var, decoder) in enumerate( + zip(dims, variables, decoders) + ): + corrected = decoder.correct_dims( + var, + dict( + chain(six.iteritems(self._default_dims), dim_dict.items()) + ), + ) # now use the default slice (we don't do this before because the # `correct_dims` method doesn't use 'x', 'y', 'z' and 't' (as used # for the _default_dims) if the real dimension name is already in @@ -2140,12 +2381,20 @@ def check_data(self, ds, name, dims, decoder=None, *args, **kwargs): for dim in var.dims: corrected.setdefault(dim, default_slice) dims[i] = [ - dim for dim, val in map(lambda t: (t[0], safe_list(t[1])), - six.iteritems(corrected)) - if val and (len(val) > 1 or _is_slice(val[0]))] + dim + for dim, val in map( + lambda t: (t[0], safe_list(t[1])), six.iteritems(corrected) + ) + if val and (len(val) > 1 or _is_slice(val[0])) + ] return self.plotter_cls.check_data( - name, dims, [decoder.is_unstructured(var) for decoder, var in zip( - decoders, variables)]) + name, + dims, + [ + decoder.is_unstructured(var) + for decoder, var in zip(decoders, variables) + ], + ) # set the base class for the :class:`ProjectPlotter` plot methods @@ -2158,8 +2407,9 @@ class DatasetPlotterInterface(PlotterInterface): # there are not changes here compared to :class:`PlotterInterface`, except # for a different docstring for the __call__ method - docstrings.delete_params('ProjectPlotter._add_data.parameters', - 'plotter_cls', 'filename_or_obj') + docstrings.delete_params( + "ProjectPlotter._add_data.parameters", "plotter_cls", "filename_or_obj" + ) @docstrings.dedent def __call__(self, *args, **kwargs): @@ -2193,12 +2443,14 @@ def __init__(self, ds, *args, **kwargs): super(DatasetPlotter, self).__init__(*args, **kwargs) self._ds = ds - docstrings.delete_params('ProjectPlotter._add_data.parameters', - 'filename_or_obj') + docstrings.delete_params( + "ProjectPlotter._add_data.parameters", "filename_or_obj" + ) - @docstrings.get_sections(base='ProjectPlotter._add_data', - sections=['Parameters', 'Other Parameters', - 'Returns']) + @docstrings.get_sections( + base="ProjectPlotter._add_data", + sections=["Parameters", "Other Parameters", "Returns"], + ) @docstrings.dedent def _add_data(self, plotter_cls, *args, **kwargs): """ @@ -2219,16 +2471,25 @@ def _add_data(self, plotter_cls, *args, **kwargs): # this method is just a shortcut to the :meth:`Project._add_data` # method but is reimplemented by subclasses as the # :class:`DatasetPlotter` or the :class:`DataArrayPlotter` - return super(DatasetPlotter, self)._add_data(plotter_cls, self._ds, - *args, **kwargs) + return super(DatasetPlotter, self)._add_data( + plotter_cls, self._ds, *args, **kwargs + ) @classmethod - def _gen_doc(cls, summary, full_name, identifier, example_call, doc_str, - show_examples): + def _gen_doc( + cls, + summary, + full_name, + identifier, + example_call, + doc_str, + show_examples, + ): """Generate the documentation docstring for a PlotMethod""" # leave out the first argument - example_call = ', '.join(map(str.strip, example_call.split(',')[1:])) - ret = docstrings.dedent(""" + example_call = ", ".join(map(str.strip, example_call.split(",")[1:])) + ret = docstrings.dedent( + """ %s This plotting method adds data arrays and plots them via @@ -2238,16 +2499,19 @@ def _gen_doc(cls, summary, full_name, identifier, example_call, doc_str, >>> ds.psy.plot.%s(%s) - %s""" % (summary, full_name, identifier, example_call, doc_str)) + %s""" + % (summary, full_name, identifier, example_call, doc_str) + ) if show_examples: - ret += '\n\n' + cls._gen_examples(identifier) + ret += "\n\n" + cls._gen_examples(identifier) return ret @classmethod def _gen_examples(cls, identifier): """Generate examples how to axes the formatoption docs""" - return docstrings.dedent(""" + return docstrings.dedent( + """ Examples -------- To explore the formatoptions and their documentations, use the @@ -2265,7 +2529,9 @@ def _gen_examples(cls, identifier): >>> ds.psy.plot.%(id)s.docs('plot') # or access the documentation via the attribute - >>> ds.psy.plot.%(id)s.plot""" % {'id': identifier}) + >>> ds.psy.plot.%(id)s.plot""" + % {"id": identifier} + ) class DataArrayPlotterInterface(PlotterInterface): @@ -2275,7 +2541,7 @@ class DataArrayPlotterInterface(PlotterInterface): # prefer_list, etc. keywords. And we reimplment the check_data method # because we use the data array directly - docstrings.delete_params('Plotter.parameters', 'data') + docstrings.delete_params("Plotter.parameters", "data") @docstrings.dedent def __call__(self, *args, **kwargs): @@ -2293,18 +2559,20 @@ def __call__(self, *args, **kwargs): checks, messages = self.check_data() if not all(checks): raise ValueError( - 'Cannot visualize the data using %s! Reasons:\n %s' % ( - self.plotter_name, '\n '.join(filter(None, messages)))) + "Cannot visualize the data using %s! Reasons:\n %s" + % (self.plotter_name, "\n ".join(filter(None, messages))) + ) return self._project_plotter._add_data( - self.plotter_cls, *args, **kwargs) + self.plotter_cls, *args, **kwargs + ) def check_data(self, *args, **kwargs): - """Check whether the plotter of this plot method can visualize the data - """ + """Check whether the plotter of this plot method can visualize the data""" plotter_cls = self.plotter_cls da_list = self._project_plotter._da.psy.to_interactive_list() return plotter_cls.check_data( - da_list.all_names, da_list.all_dims, da_list.is_unstructured) + da_list.all_names, da_list.all_dims, da_list.is_unstructured + ) class DataArrayPlotter(ProjectPlotter): @@ -2318,7 +2586,7 @@ class DataArrayPlotter(ProjectPlotter): def __init__(self, da, *args, **kwargs): super(DataArrayPlotter, self).__init__(*args, **kwargs) - self._da = getattr(da, 'arr', da) + self._da = getattr(da, "arr", da) @docstrings.dedent def _add_data(self, plotter_cls, *args, **kwargs): @@ -2340,12 +2608,20 @@ def _add_data(self, plotter_cls, *args, **kwargs): return plotter_cls(self._da, *args, **kwargs) @classmethod - def _gen_doc(cls, summary, full_name, identifier, example_call, doc_str, - show_examples): + def _gen_doc( + cls, + summary, + full_name, + identifier, + example_call, + doc_str, + show_examples, + ): """Generate the documentation docstring for a PlotMethod""" # leave out the first argument - example_call = ', '.join(map(str.strip, example_call.split(',')[1:])) - ret = docstrings.dedent(""" + example_call = ", ".join(map(str.strip, example_call.split(",")[1:])) + ret = docstrings.dedent( + """ %s This plotting method visualizes the data via a @@ -2355,16 +2631,19 @@ def _gen_doc(cls, summary, full_name, identifier, example_call, doc_str, >>> da.psy.plot.%s() - %s""" % (summary, full_name, identifier, doc_str)) + %s""" + % (summary, full_name, identifier, doc_str) + ) if show_examples: - ret += '\n\n' + cls._gen_examples(identifier) + ret += "\n\n" + cls._gen_examples(identifier) return ret @classmethod def _gen_examples(cls, identifier): """Generate examples how to axes the formatoption docs""" - return docstrings.dedent(""" + return docstrings.dedent( + """ Examples -------- To explore the formatoptions and their documentations, use the @@ -2382,17 +2661,24 @@ def _gen_examples(cls, identifier): >>> da.psy.plot.%(id)s.docs('plot') # or access the documentation via the attribute - >>> da.psy.plot.%(id)s.plot""" % {'id': identifier}) + >>> da.psy.plot.%(id)s.plot""" + % {"id": identifier} + ) if with_cdo: - CDF_MOD_NCREADER = 'xarray' + CDF_MOD_NCREADER = "xarray" - docstrings.keep_params('Project._add_data.parameters', 'dims', - 'fmt', 'ax', 'make_plot', 'method') + docstrings.keep_params( + "Project._add_data.parameters", + "dims", + "fmt", + "ax", + "make_plot", + "method", + ) class Cdo(_CdoBase): - __doc__ = docstrings.dedent( """ Subclass of the original cdo.Cdo class in the cdo.py module @@ -2450,11 +2736,12 @@ class Cdo(_CdoBase): sp = psy.plot.mapplot( cdo.timmean(input='ifile.nc', returnCdf=True), name='temperature', plot_method=psy.plot.mapplot) - """) + """ + ) def __init__(self, *args, **kwargs): if cdo_version < (1, 5): - kwargs.setdefault('cdfMod', CDF_MOD_NCREADER) + kwargs.setdefault("cdfMod", CDF_MOD_NCREADER) super(Cdo, self).__init__(*args, **kwargs) if cdo_version < (1, 5): self.loadCdf() @@ -2462,9 +2749,11 @@ def __init__(self, *args, **kwargs): def loadCdf(self, *args, **kwargs): """Load data handler as specified by self.cdfMod""" if cdo_version < (1, 5): + def open_nc(*args, **kwargs): - kwargs.pop('mode', None) + kwargs.pop("mode", None) return open_dataset(*args, **kwargs) + if self.cdfMod == CDF_MOD_NCREADER: self.cdf = open_nc else: @@ -2474,37 +2763,45 @@ def open_nc(*args, **kwargs): def __getattr__(self, method_name): def my_get(get): - """Wrapper for get method of Cdo class to include several plotters - """ + """Wrapper for get method of Cdo class to include several plotters""" + @wraps(get) def wrapper(self, *args, **kwargs): - added_kwargs = {'plot_method', 'name', 'dims', 'fmt'} + added_kwargs = {"plot_method", "name", "dims", "fmt"} if added_kwargs.intersection(kwargs): - plot_method = kwargs.pop('plot_method', None) - ax = kwargs.pop('ax', None) - make_plot = kwargs.pop('make_plot', True) - fmt = kwargs.pop('fmt', {}) - dims = kwargs.pop('dims', {}) - name = kwargs.pop('name', None) - method = kwargs.pop('method', 'isel') + plot_method = kwargs.pop("plot_method", None) + ax = kwargs.pop("ax", None) + make_plot = kwargs.pop("make_plot", True) + fmt = kwargs.pop("fmt", {}) + dims = kwargs.pop("dims", {}) + name = kwargs.pop("name", None) + method = kwargs.pop("method", "isel") if cdo_version < (1, 5): - kwargs['returnCdf'] = True + kwargs["returnCdf"] = True else: - kwargs['returnXDataset'] = True + kwargs["returnXDataset"] = True ds = get(*args, **kwargs) if isinstance(plot_method, six.string_types): plot_method = getattr(plot, plot_method) if plot_method is None: ret = Project.from_dataset( - ds, name=name, dims=dims, method=method) + ds, name=name, dims=dims, method=method + ) ret.main = gcp(True) return ret else: return plot_method( - ds, name=name, fmt=fmt, dims=dims, ax=ax, - make_plot=make_plot, method=method) + ds, + name=name, + fmt=fmt, + dims=dims, + ax=ax, + make_plot=make_plot, + method=method, + ) else: return get(*args, **kwargs) + return wrapper get = my_get(super(Cdo, self).__getattr__(method_name)) @@ -2529,8 +2826,9 @@ def gcp(main=False): if main: return project() if _current_project is None else _current_project else: - return gcp(True) if _current_subproject is None else \ - _current_subproject + return ( + gcp(True) if _current_subproject is None else _current_subproject + ) @dedent @@ -2555,8 +2853,9 @@ def _scp(p, main=False): global _current_subproject global _current_project if p is None: - mp = project() if main or _current_project is None else \ - _current_project + mp = ( + project() if main or _current_project is None else _current_project + ) _current_subproject = Project(main=mp) elif not main: _current_subproject = p @@ -2621,7 +2920,7 @@ def close(num=None, figs=True, data=True, ds=True, remove_only=False): project = gcp() scp(None) project.close(**kws) - elif num == 'all': + elif num == "all": for project in _open_projects[:]: project.close(**kws) got_cp = got_cp or project.main.num == cp_num @@ -2630,8 +2929,9 @@ def close(num=None, figs=True, data=True, ds=True, remove_only=False): if isinstance(num, Project): project = num else: - project = [project for project in _open_projects - if project.num == num][0] + project = [ + project for project in _open_projects if project.num == num + ][0] project.close(**kws) try: _open_projects.remove(project) @@ -2646,13 +2946,20 @@ def close(num=None, figs=True, data=True, ds=True, remove_only=False): _scp(None, True) # set the current project to None -docstrings.delete_params('Project._register_plotter.parameters', 'plotter_cls') +docstrings.delete_params("Project._register_plotter.parameters", "plotter_cls") @docstrings.dedent -def register_plotter(identifier, module, plotter_name, plotter_cls=None, - sorter=True, plot_func=True, import_plotter=None, - **kwargs): +def register_plotter( + identifier, + module, + plotter_name, + plotter_cls=None, + sorter=True, + plot_func=True, + import_plotter=None, + **kwargs, +): """ Register a :class:`psyplot.plotter.Plotter` for the projects @@ -2682,34 +2989,50 @@ def register_plotter(identifier, module, plotter_name, plotter_cls=None, %(ProjectPlotter._register_plotter.other_parameters)s """ if plotter_cls is None: - if ((import_plotter is None and rcParams['project.auto_import']) or - import_plotter): + if ( + import_plotter is None and rcParams["project.auto_import"] + ) or import_plotter: try: plotter_cls = getattr(import_module(module), plotter_name) except Exception as e: - critical(("Could not import %s!\n" % module) + - e.message if six.PY2 else str(e)) + critical( + ("Could not import %s!\n" % module) + e.message + if six.PY2 + else str(e) + ) return if sorter: if hasattr(Project, identifier): raise ValueError( - "Project class already has a %s attribute" % identifier) + "Project class already has a %s attribute" % identifier + ) Project._register_plotter( - identifier, module, plotter_name, plotter_cls) + identifier, module, plotter_name, plotter_cls + ) if plot_func: if hasattr(ProjectPlotter, identifier): raise ValueError( - "Project class already has a %s attribute" % identifier) + "Project class already has a %s attribute" % identifier + ) ProjectPlotter._register_plotter( - identifier, module, plotter_name, plotter_cls, **kwargs) + identifier, module, plotter_name, plotter_cls, **kwargs + ) DatasetPlotter._register_plotter( - identifier, module, plotter_name, plotter_cls, **kwargs) + identifier, module, plotter_name, plotter_cls, **kwargs + ) DataArrayPlotter._register_plotter( - identifier, module, plotter_name, plotter_cls, **kwargs) + identifier, module, plotter_name, plotter_cls, **kwargs + ) if identifier not in registered_plotters: - kwargs.update(dict( - module=module, plotter_name=plotter_name, sorter=sorter, - plot_func=plot_func, import_plotter=import_plotter)) + kwargs.update( + dict( + module=module, + plotter_name=plotter_name, + sorter=sorter, + plot_func=plot_func, + import_plotter=import_plotter, + ) + ) registered_plotters[identifier] = kwargs return @@ -2733,22 +3056,22 @@ def unregister_plotter(identifier, sorter=True, plot_func=True): d = registered_plotters.get(identifier, {}) if sorter and hasattr(Project, identifier): delattr(Project, identifier) - d['sorter'] = False + d["sorter"] = False if plot_func and hasattr(ProjectPlotter, identifier): for cls in [ProjectPlotter, DatasetPlotter, DataArrayPlotter]: delattr(cls, identifier) try: - delattr(plot, '_' + identifier) + delattr(plot, "_" + identifier) except AttributeError: pass - d['plot_func'] = False + d["plot_func"] = False if sorter and plot_func: registered_plotters.pop(identifier, None) registered_plotters = {} -for _identifier, _plotter_settings in rcParams['project.plotters'].items(): +for _identifier, _plotter_settings in rcParams["project.plotters"].items(): register_plotter(_identifier, **_plotter_settings) @@ -2756,6 +3079,7 @@ def get_project_nums(): """Returns the project numbers of the open projects""" return [p.num for p in _open_projects] + #: :class:`ProjectPlotter` of the current project. See the class documentation #: for available plotting methods plot = ProjectPlotter() diff --git a/psyplot/sphinxext/__init__.py b/psyplot/sphinxext/__init__.py index 7b1b6de..02dc29c 100755 --- a/psyplot/sphinxext/__init__.py +++ b/psyplot/sphinxext/__init__.py @@ -1,24 +1,8 @@ """Sphinx extension package of the psyplot module""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only diff --git a/psyplot/sphinxext/extended_napoleon.py b/psyplot/sphinxext/extended_napoleon.py index 9be1a17..7f68c49 100755 --- a/psyplot/sphinxext/extended_napoleon.py +++ b/psyplot/sphinxext/extended_napoleon.py @@ -10,32 +10,17 @@ :mod:`sphinx.ext.napoleon` module in the extensions variable of your conf.py. This module has been tested for sphinx 1.3.1.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only from abc import ABCMeta, abstractmethod -from sphinx.ext.napoleon import ( - NumpyDocstring, GoogleDocstring, setup as napoleon_setup) + +from sphinx.ext.napoleon import GoogleDocstring, NumpyDocstring +from sphinx.ext.napoleon import setup as napoleon_setup class DocstringExtension(object): @@ -54,9 +39,9 @@ class DocstringExtension(object): >>> from sphinx.ext.napoleon import Config >>> from psyplot.sphinxext.extended_napoleon import ( - ... ExtendedNumpyDocstring) - >>> config = Config(napoleon_use_param=True, - ... napoleon_use_rtype=True) + ... ExtendedNumpyDocstring, + ... ) + >>> config = Config(napoleon_use_param=True, napoleon_use_rtype=True) >>> docstring = ''' ... Possible types ... -------------- @@ -71,19 +56,20 @@ class DocstringExtension(object): Description of `type1` * *type2* -- Description of `type2`""" + __metaclass__ = ABCMeta def _parse_possible_types_section(self, section): fields = self._consume_fields(prefer_type=True) - lines = ['.. rubric:: %s' % section, ''] + lines = [".. rubric:: %s" % section, ""] multi = len(fields) > 1 for _name, _type, _desc in fields: field = self._format_field(_name, _type, _desc) if multi: - lines.extend(self._format_block('* ', field)) + lines.extend(self._format_block("* ", field)) else: lines.extend(field) - return lines + [''] + return lines + [""] @abstractmethod def _parse(self): @@ -94,14 +80,15 @@ class ExtendedNumpyDocstring(NumpyDocstring, DocstringExtension): """:class:`sphinx.ext.napoleon.NumpyDocstring` with more sections""" def _parse(self, *args, **kwargs): - self._sections['possible types'] = self._parse_possible_types_section + self._sections["possible types"] = self._parse_possible_types_section return super(ExtendedNumpyDocstring, self)._parse(*args, **kwargs) class ExtendedGoogleDocstring(GoogleDocstring, DocstringExtension): """:class:`sphinx.ext.napoleon.GoogleDocstring` with more sections""" + def _parse(self, *args, **kwargs): - self._sections['possible types'] = self._parse_possible_types_section + self._sections["possible types"] = self._parse_possible_types_section return super(ExtendedGoogleDocstring, self)._parse(*args, **kwargs) @@ -147,11 +134,13 @@ def process_docstring(app, what, name, obj, options, lines): result_lines = lines if app.config.napoleon_numpy_docstring: docstring = ExtendedNumpyDocstring( - result_lines, app.config, app, what, name, obj, options) + result_lines, app.config, app, what, name, obj, options + ) result_lines = docstring.lines() if app.config.napoleon_google_docstring: docstring = ExtendedGoogleDocstring( - result_lines, app.config, app, what, name, obj, options) + result_lines, app.config, app, what, name, obj, options + ) result_lines = docstring.lines() lines[:] = result_lines[:] @@ -174,8 +163,9 @@ def setup(app): This function uses the setup function of the :mod:`sphinx.ext.napoleon` module""" from sphinx.application import Sphinx + if not isinstance(app, Sphinx): return # probably called by tests - app.connect('autodoc-process-docstring', process_docstring) + app.connect("autodoc-process-docstring", process_docstring) return napoleon_setup(app) diff --git a/psyplot/utils.py b/psyplot/utils.py index b508d6f..3c2cfe1 100644 --- a/psyplot/utils.py +++ b/psyplot/utils.py @@ -1,66 +1,68 @@ """Miscallaneous utility functions for the psyplot package.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only -import sys +import inspect import re -import six +import sys from difflib import get_close_matches -from itertools import chain -from psyplot.compat.pycompat import OrderedDict, filterfalse +from itertools import chain, filterfalse + +import six + from psyplot.docstring import dedent, docstrings +def get_default_value(func, arg): + argspec = inspect.getfullargspec(func) + return next( + default + for a, default in zip(reversed(argspec[0]), reversed(argspec.defaults)) + if a == arg + ) + + +def isstring(s): + return isinstance(s, str) + + def plugin_entrypoints(group="psyplot", name="name"): """This utility function gets the entry points of the psyplot plugins""" if sys.version_info[:2] > (3, 7): from importlib.metadata import entry_points + try: eps = entry_points(group=group, name=name) except TypeError: # python<3.10 - eps = [ep for ep in entry_points().get(group, []) - if ep.name == name] + eps = [ + ep for ep in entry_points().get(group, []) if ep.name == name + ] else: from pkg_resources import iter_entry_points + eps = iter_entry_points(group=group, name=name) return eps -class DefaultOrderedDict(OrderedDict): +class Defaultdict(dict): """An ordered :class:`collections.defaultdict` Taken from http://stackoverflow.com/a/6190500/562769""" + def __init__(self, default_factory=None, *a, **kw): - if (default_factory is not None and - not callable(default_factory)): - raise TypeError('first argument must be callable') - OrderedDict.__init__(self, *a, **kw) + if default_factory is not None and not callable(default_factory): + raise TypeError("first argument must be callable") + dict.__init__(self, *a, **kw) self.default_factory = default_factory def __getitem__(self, key): try: - return OrderedDict.__getitem__(self, key) + return dict.__getitem__(self, key) except KeyError: return self.__missing__(key) @@ -74,7 +76,7 @@ def __reduce__(self): if self.default_factory is None: args = tuple() else: - args = self.default_factory, + args = (self.default_factory,) return type(self), args, None, None, self.items() def copy(self): @@ -86,12 +88,14 @@ def __copy__(self): def __deepcopy__(self, memo): import copy - return type(self)(self.default_factory, - copy.deepcopy(self.items())) + + return type(self)(self.default_factory, copy.deepcopy(self.items())) def __repr__(self): - return 'DefaultOrderedDict(%s, %s)' % (self.default_factory, - OrderedDict.__repr__(self)) + return "Defaultdict(%s, %s)" % ( + self.default_factory, + dict.__repr__(self), + ) class _TempBool(object): @@ -129,9 +133,12 @@ def __exit__(self, type, value, tb): self.value = self.default if six.PY2: + def __nonzero__(self): return self.value + else: + def __bool__(self): return self.value @@ -172,12 +179,13 @@ def _temp_bool_prop(propname, doc="", default=False): The documentation of the property default: bool The default value of the _TempBool class""" + def getx(self): - if getattr(self, '_' + propname, None) is not None: - return getattr(self, '_' + propname) + if getattr(self, "_" + propname, None) is not None: + return getattr(self, "_" + propname) else: - setattr(self, '_' + propname, _TempBool(default)) - return getattr(self, '_' + propname) + setattr(self, "_" + propname, _TempBool(default)) + return getattr(self, "_" + propname) def setx(self, value): getattr(self, propname).value = bool(value) @@ -209,20 +217,25 @@ def unique_everseen(iterable, key=None): def is_remote_url(path): - patt = re.compile(r'^https?\://') + patt = re.compile(r"^https?\://") if not isinstance(path, six.string_types): - return all(map(patt.search, (s or '' for s in path))) - return bool(re.search(r'^https?\://', path)) + return all(map(patt.search, (s or "" for s in path))) + return bool(re.search(r"^https?\://", path)) -@docstrings.get_sections(base='check_key', sections=['Parameters', 'Returns', - 'Raises']) +@docstrings.get_sections( + base="check_key", sections=["Parameters", "Returns", "Raises"] +) @dedent -def check_key(key, possible_keys, raise_error=True, - name='formatoption keyword', - msg=("See show_fmtkeys function for possible formatopion " - "keywords"), - *args, **kwargs): +def check_key( + key, + possible_keys, + raise_error=True, + name="formatoption keyword", + msg=("See show_fmtkeys function for possible formatopion " "keywords"), + *args, + **kwargs, +): """ Checks whether the key is in a list of possible keys @@ -263,15 +276,18 @@ def check_key(key, possible_keys, raise_error=True, if key not in possible_keys: similarkeys = get_close_matches(key, possible_keys, *args, **kwargs) if similarkeys: - msg = ('Unknown %s %s! Possible similiar ' - 'frasings are %s.') % (name, key, ', '.join(similarkeys)) + msg = ("Unknown %s %s! Possible similiar " "frasings are %s.") % ( + name, + key, + ", ".join(similarkeys), + ) else: msg = ("Unknown %s %s! ") % (name, key) + msg if not raise_error: - return '', similarkeys, msg + return "", similarkeys, msg raise KeyError(msg) else: - return key, [key], '' + return key, [key], "" def sort_kwargs(kwargs, *param_lists): @@ -294,8 +310,12 @@ def sort_kwargs(kwargs, *param_lists): `kwargs` corresponding to the specified list in ``*param_lists``. The last dictionary contains the remaining items""" return chain( - ({key: kwargs.pop(key) for key in params.intersection(kwargs)} - for params in map(set, param_lists)), [kwargs]) + ( + {key: kwargs.pop(key) for key in params.intersection(kwargs)} + for params in map(set, param_lists) + ), + [kwargs], + ) def hashable(val): @@ -320,7 +340,7 @@ def hashable(val): return val -@docstrings.get_sections(base='join_dicts') +@docstrings.get_sections(base="join_dicts") def join_dicts(dicts, delimiter=None, keep_all=False): """Join multiple dictionaries into one diff --git a/psyplot/warning.py b/psyplot/warning.py index 7f011f1..7a3e2d4 100755 --- a/psyplot/warning.py +++ b/psyplot/warning.py @@ -10,49 +10,44 @@ PsyPlotWarning PsyPlotCritical""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only -import warnings import logging - +import warnings # disable a warning about "comparison to 'None' in backend_pdf which occurs # in the matplotlib.backends.backend_pdf.PdfPages class warnings.filterwarnings( - 'ignore', 'comparison', FutureWarning, 'matplotlib.backends.backend_pdf', - 2264) + "ignore", + "comparison", + FutureWarning, + "matplotlib.backends.backend_pdf", + 2264, +) # disable a warning about "np.array_split" that occurs for certain numpy # versions warnings.filterwarnings( - 'ignore', 'in the future np.array_split will retain', FutureWarning, - 'numpy.lib.shape_base', 431) + "ignore", + "in the future np.array_split will retain", + FutureWarning, + "numpy.lib.shape_base", + 431, +) # disable a warning about "elementwise comparison of a string" in the # matplotlib.collection.Collection.get_edgecolor method that occurs for certain # matplotlib and numpy versions warnings.filterwarnings( - 'ignore', 'elementwise comparison failed', FutureWarning, - 'matplotlib.collections', 590) + "ignore", + "elementwise comparison failed", + FutureWarning, + "matplotlib.collections", + 590, +) logger = logging.getLogger(__name__) @@ -60,21 +55,24 @@ class PsyPlotRuntimeWarning(RuntimeWarning): """Runtime warning that appears only ones""" + pass class PsyPlotWarning(UserWarning): """Normal UserWarning for psyplot module""" + pass class PsyPlotCritical(UserWarning): """Critical UserWarning for psyplot module""" + pass -warnings.simplefilter('always', PsyPlotWarning, append=True) -warnings.simplefilter('always', PsyPlotCritical, append=True) +warnings.simplefilter("always", PsyPlotWarning, append=True) +warnings.simplefilter("always", PsyPlotCritical, append=True) def disable_warnings(critical=False): @@ -82,9 +80,9 @@ def disable_warnings(critical=False): critical evaluates to True) related to the psyplot Module. Please note that you can also configure the warnings via the psyplot.warning logger (logging.getLogger(psyplot.warning)).""" - warnings.filterwarnings('ignore', '\w', PsyPlotWarning, 'psyplot', 0) + warnings.filterwarnings("ignore", r"\w", PsyPlotWarning, "psyplot", 0) if critical: - warnings.filterwarnings('ignore', '\w', PsyPlotCritical, 'psyplot', 0) + warnings.filterwarnings("ignore", r"\w", PsyPlotCritical, "psyplot", 0) def warn(message, category=PsyPlotWarning, logger=None): @@ -111,12 +109,18 @@ def customwarn(message, category, filename, lineno, *args, **kwargs): PsyPlotWarning and PsyPlotCritical and the default warnings.showwarning function for all the others.""" if category is PsyPlotWarning: - logger.warning(warnings.formatwarning( - "\n%s" % message, category, filename, lineno)) + logger.warning( + warnings.formatwarning( + "\n%s" % message, category, filename, lineno + ) + ) elif category is PsyPlotCritical: - logger.critical(warnings.formatwarning( - "\n%s" % message, category, filename, lineno), - exc_info=True) + logger.critical( + warnings.formatwarning( + "\n%s" % message, category, filename, lineno + ), + exc_info=True, + ) else: old_showwarning(message, category, filename, lineno, *args, **kwargs) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..a09b350 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,149 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + +[build-system] +build-backend = 'setuptools.build_meta' +requires = ['setuptools >= 61.0', 'versioneer[toml]'] + +[project] +name = "psyplot" +dynamic = ["version"] +description = "Python package for interactive data visualization" + +readme = "README.rst" +keywords = [ + "visualization", + + "netcdf", + + "raster", + + "cartopy", + + "earth-sciences", + ] + +authors = [ + { name = 'Philipp S. Sommer', email = 'philipp.sommer@hereon.de' }, +] +maintainers = [ + { name = 'Philipp S. Sommer', email = 'philipp.sommer@hereon.de' }, +] +license = { text = 'LGPL-3.0-only' } + +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Topic :: Scientific/Engineering :: Visualization", + "Topic :: Scientific/Engineering :: GIS", + "Topic :: Scientific/Engineering", + "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Operating System :: OS Independent", +] + +requires-python = '>= 3.9' +dependencies = [ + # add your dependencies here + "matplotlib", + "docrep>=0.3", + "funcargparse", + "xarray>=0.17", + "PyYAML>=4.2b", +] + +[project.urls] +Homepage = 'https://codebase.helmholtz.cloud/psyplot/psyplot' +Documentation = "https://psyplot.github.io" +Source = "https://codebase.helmholtz.cloud/psyplot/psyplot" +Tracker = "https://codebase.helmholtz.cloud/psyplot/psyplot/issues/" + + +[project.optional-dependencies] +testsite = [ + "tox", + "isort==5.12.0", + "black==23.1.0", + "blackdoc==0.3.8", + "flake8==6.0.0", + "pre-commit", + "mypy", + "pytest-cov", + "reuse", + "cffconvert", + "netCDF4", + "dask", + "scipy", + "pytest", +] +docs = [ + "autodocsumm", + "sphinx-rtd-theme", + "hereon-netcdf-sphinxext", + "sphinx-design", + "ipython", + "pickleshare", # required for IPythons savefig + "seaborn", + "dask", + "netCDF4", + "sphinx-argparse", + "cdo", +] +dev = [ + "psyplot[testsite]", + "psyplot[docs]", + "PyYAML", + "types-PyYAML", +] + +[project.scripts] +psyplot = "psyplot.__main__:main" + +[tool.mypy] +ignore_missing_imports = true + +[tool.setuptools] +zip-safe = false +license-files = ["LICENSES/*"] + +[tool.setuptools.package-data] +psyplot = [] + +[tool.setuptools.packages.find] +namespaces = false +exclude = [ + 'docs', + 'tests*', + 'examples' +] + +[tool.pytest.ini_options] +addopts = '-v' + +[tool.versioneer] +VCS = 'git' +style = 'pep440' +versionfile_source = 'psyplot/_version.py' +versionfile_build = 'psyplot/_version.py' +tag_prefix = 'v' +parentdir_prefix = 'psyplot-' + +[tool.isort] +profile = "black" +line_length = 79 +src_paths = ["psyplot"] +float_to_top = true +known_first_party = "psyplot" + +[tool.black] +line-length = 79 +target-version = ['py39'] + +[tool.coverage.run] +omit = ["psyplot/_version.py"] diff --git a/readthedocs.yml b/readthedocs.yml deleted file mode 100644 index 72abb85..0000000 --- a/readthedocs.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: 2 - -# Build all formats -formats: all -conda: - environment: docs/environment.yml -python: - version: 3.7 - install: - - method: pip - path: . diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index cef4efc..0000000 --- a/setup.cfg +++ /dev/null @@ -1,9 +0,0 @@ -[versioneer] -VCS = git -style = pep440 -versionfile_source = psyplot/_version.py -versionfile_build = psyplot/_version.py -tag_prefix = v -parentdir_prefix = psyplot- - - diff --git a/setup.py b/setup.py index 7186afe..c6adc8a 100644 --- a/setup.py +++ b/setup.py @@ -1,106 +1,12 @@ -"""Setup script for the psyplot package.""" - -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. -# -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: CC0-1.0 -import os.path as osp -from setuptools import setup, find_packages -from setuptools.command.test import test as TestCommand -import sys +"""Setup script for the psyplot package.""" import versioneer +from setuptools import setup - -class PyTest(TestCommand): - user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")] - - def initialize_options(self): - TestCommand.initialize_options(self) - self.pytest_args = '' - - def run_tests(self): - import shlex - # import here, cause outside the eggs aren't loaded - import pytest - errno = pytest.main(shlex.split(self.pytest_args)) - sys.exit(errno) - - -version = versioneer.get_version() - - -def readme(): - with open('README.rst') as f: - return f.read() - -cmdclass = versioneer.get_cmdclass({'test': PyTest}) - -setup(name='psyplot', - version=version, - description='Python package for interactive data visualization', - long_description=readme(), - long_description_content_type="text/x-rst", - classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Intended Audience :: Developers', - 'Topic :: Scientific/Engineering :: Visualization', - 'Topic :: Scientific/Engineering :: GIS', - 'Topic :: Scientific/Engineering', - 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3 :: Only', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Operating System :: OS Independent', - ], - python_requires=">=3.6", - keywords='visualization netcdf raster cartopy earth-sciences', - project_urls={ - 'Documentation': 'https://psyplot.github.io', - 'Source': 'https://github.com/psyplot/psyplot', - 'Tracker': 'https://github.com/psyplot/psyplot/issues', - }, - url='https://github.com/psyplot/psyplot', - author='Philipp S. Sommer', - author_email='psyplot@hereon.de', - license="LGPL-3.0-only", - packages=find_packages(exclude=['docs', 'tests*', 'examples']), - install_requires=[ - 'matplotlib', - 'docrep>=0.3', - 'funcargparse', - 'xarray>=0.17', - 'PyYAML>=4.2b4' - ], - package_data={'psyplot': [ - osp.join('psyplot', 'plugin-template-files', '*'), - osp.join('psyplot', 'plugin-template-files', 'plugin_template', '*'), - ]}, - include_package_data=True, - tests_require=['pytest'], - cmdclass=cmdclass, - entry_points={'console_scripts': [ - 'psyplot=psyplot.__main__:main', - 'psyplot-plugin=psyplot.plugin_template:main']}, - zip_safe=False) +setup( + version=versioneer.get_version(), + cmdclass=versioneer.get_cmdclass(), +) diff --git a/tests/_base_testing.py b/tests/_base_testing.py index 72bd24a..8502096 100644 --- a/tests/_base_testing.py +++ b/tests/_base_testing.py @@ -1,52 +1,39 @@ """Base testing module.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only import os -import sys import os.path as osp import subprocess as spr +import sys test_dir = osp.dirname(__file__) -os.environ['PSYPLOT_PLUGINS'] = 'yes:psyplot_test.plugin' +os.environ["PSYPLOT_PLUGINS"] = "yes:psyplot_test.plugin" def get_file(fname): """Get the full path to the given file name in the test directory""" return osp.join(test_dir, fname) + # check if the seaborn version is smaller than 0.8 (without actually importing # it), due to https://github.com/mwaskom/seaborn/issues/966 # If so, disable the import of it when import psyplot.project try: sns_version = spr.check_output( - [sys.executable, '-c', 'import seaborn; print(seaborn.__version__)']) + [sys.executable, "-c", "import seaborn; print(seaborn.__version__)"] + ) except spr.CalledProcessError: # seaborn is not installed pass else: - if sns_version.decode('utf-8') < '0.8': + if sns_version.decode("utf-8") < "0.8": import psyplot - psyplot.rcParams['project.import_seaborn'] = False + + psyplot.rcParams["project.import_seaborn"] = False diff --git a/tests/circumpolar_test.nc.license b/tests/circumpolar_test.nc.license new file mode 100644 index 0000000..919c9c1 --- /dev/null +++ b/tests/circumpolar_test.nc.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC0-1.0 diff --git a/tests/conftest.py b/tests/conftest.py index f5f1dcc..b3bc92d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,39 +1,27 @@ """pytest configuration file.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only -import _base_testing as bt def pytest_addoption(parser): group = parser.getgroup("psyplot", "psyplot specific options") - group.addoption('--no-removal', help='Do not remove created test files', - action='store_true') + group.addoption( + "--no-removal", + help="Do not remove created test files", + action="store_true", + ) def pytest_configure(config): - if config.getoption('no_removal'): + if config.getoption("no_removal"): import test_project + test_project.remove_temp_files = False import test_main + test_main.remove_temp_files = False diff --git a/tests/icon_test.nc.license b/tests/icon_test.nc.license new file mode 100644 index 0000000..919c9c1 --- /dev/null +++ b/tests/icon_test.nc.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC0-1.0 diff --git a/tests/logging.yml b/tests/logging.yml index 141ec79..c24e9cd 100755 --- a/tests/logging.yml +++ b/tests/logging.yml @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +# +# SPDX-License-Identifier: CC0-1.0 + --- # logging settings for the nc2map module @@ -79,4 +83,4 @@ loggers: propagate: False level: WARNING -... \ No newline at end of file +... diff --git a/tests/main.py b/tests/main.py index 392d971..0d294fc 100755 --- a/tests/main.py +++ b/tests/main.py @@ -10,38 +10,20 @@ failure of some tests in python 3.4) """ -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only +import unittest import _base_testing as bt -import os - -import unittest test_suite = unittest.defaultTestLoader.discover(bt.test_dir) -if __name__ == '__main__': +if __name__ == "__main__": test_runner = unittest.TextTestRunner(verbosity=2, failfast=True) test_runner.run(test_suite) diff --git a/tests/rotated-pole-test.nc.license b/tests/rotated-pole-test.nc.license new file mode 100644 index 0000000..919c9c1 --- /dev/null +++ b/tests/rotated-pole-test.nc.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC0-1.0 diff --git a/tests/simple_triangular_grid_si0.nc.license b/tests/simple_triangular_grid_si0.nc.license new file mode 100644 index 0000000..919c9c1 --- /dev/null +++ b/tests/simple_triangular_grid_si0.nc.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC0-1.0 diff --git a/tests/test-t2m-1979-01-31T18-00-00.tif.license b/tests/test-t2m-1979-01-31T18-00-00.tif.license new file mode 100644 index 0000000..919c9c1 --- /dev/null +++ b/tests/test-t2m-1979-01-31T18-00-00.tif.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC0-1.0 diff --git a/tests/test-t2m-1979-02-28T18-00-00.tif.license b/tests/test-t2m-1979-02-28T18-00-00.tif.license new file mode 100644 index 0000000..919c9c1 --- /dev/null +++ b/tests/test-t2m-1979-02-28T18-00-00.tif.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC0-1.0 diff --git a/tests/test-t2m-u-v.nc.license b/tests/test-t2m-u-v.nc.license new file mode 100644 index 0000000..919c9c1 --- /dev/null +++ b/tests/test-t2m-u-v.nc.license @@ -0,0 +1,3 @@ +SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH + +SPDX-License-Identifier: CC0-1.0 diff --git a/tests/test_data.py b/tests/test_data.py index e3aca3f..1a0b429 100755 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -1,43 +1,28 @@ """Test module of the :mod:`psyplot.data` module""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only import os import os.path as osp -import six +import tempfile import unittest + +import _base_testing as bt +import numpy as np import pandas as pd +import six import xarray as xr -from psyplot.compat.pycompat import range + import psyplot.data as psyd -import _base_testing as bt -import numpy as np -from collections import OrderedDict -import tempfile try: import PyNio + with_nio = True except ImportError as e: PyNio = psyd._MissingModule(e) @@ -45,6 +30,7 @@ try: import netCDF4 as nc + with_netcdf4 = True except ImportError as e: nc = psyd._MissingModule(e) @@ -52,6 +38,7 @@ try: import scipy + with_scipy = True except ImportError as e: scipy = psyd._MissingModule(e) @@ -59,6 +46,7 @@ try: from cdo import Cdo + Cdo() except Exception: with_cdo = False @@ -66,13 +54,13 @@ with_cdo = True -xr_version = tuple(map(float, xr.__version__.split('.')[:3])) +xr_version = tuple(map(float, xr.__version__.split(".")[:3])) class AlmostArrayEqualMixin(object): - - def assertAlmostArrayEqual(self, actual, desired, rtol=1e-07, atol=0, - msg=None, **kwargs): + def assertAlmostArrayEqual( + self, actual, desired, rtol=1e-07, atol=0, msg=None, **kwargs + ): """Asserts that the two given arrays are almost the same This method uses the :func:`numpy.testing.assert_allclose` function @@ -96,8 +84,14 @@ def assertAlmostArrayEqual(self, actual, desired, rtol=1e-07, atol=0, If True, the conflicting values are appended to the error message. """ try: - np.testing.assert_allclose(actual, desired, rtol=rtol, atol=atol, - err_msg=msg or '', **kwargs) + np.testing.assert_allclose( + actual, + desired, + rtol=rtol, + atol=atol, + err_msg=msg or "", + **kwargs, + ) except AssertionError as e: self.fail(e if six.PY3 else e.message) @@ -107,41 +101,42 @@ class DecoderTest(unittest.TestCase, AlmostArrayEqualMixin): def test_decode_grid_mapping(self): ds = xr.Dataset() - ds['var'] = (('x', 'y'), np.zeros((5, 4)), {'grid_mapping': 'crs'}) - ds['crs'] = ((), 1) + ds["var"] = (("x", "y"), np.zeros((5, 4)), {"grid_mapping": "crs"}) + ds["crs"] = ((), 1) - self.assertNotIn('crs', ds.coords) + self.assertNotIn("crs", ds.coords) ds = psyd.CFDecoder.decode_coords(ds) - self.assertIn('crs', ds.coords) + self.assertIn("crs", ds.coords) def test_1D_cf_bounds(self): """Test whether the CF Conventions for 1D bounaries are correct""" final_bounds = np.arange(-180, 181, 30) - lon = xr.Variable(('lon', ), np.arange(-165, 166, 30), - {'bounds': 'lon_bounds'}) - cf_bounds = xr.Variable(('lon', 'bnds'), np.zeros((len(lon), 2))) + lon = xr.Variable( + ("lon",), np.arange(-165, 166, 30), {"bounds": "lon_bounds"} + ) + cf_bounds = xr.Variable(("lon", "bnds"), np.zeros((len(lon), 2))) for i in range(len(lon)): - cf_bounds[i, :] = final_bounds[i:i+2] - ds = xr.Dataset(coords={'lon': lon, 'lon_bounds': cf_bounds}) + cf_bounds[i, :] = final_bounds[i : i + 2] + ds = xr.Dataset(coords={"lon": lon, "lon_bounds": cf_bounds}) decoder = psyd.CFDecoder(ds) - self.assertEqual(list(final_bounds), - list(decoder.get_plotbounds(lon))) + self.assertEqual(list(final_bounds), list(decoder.get_plotbounds(lon))) def test_1D_bounds_calculation(self): """Test whether the 1D cell boundaries are calculated correctly""" final_bounds = np.arange(-180, 181, 30) - lon = xr.Variable(('lon', ), np.arange(-165, 166, 30)) - ds = xr.Dataset(coords={'lon': lon}) + lon = xr.Variable(("lon",), np.arange(-165, 166, 30)) + ds = xr.Dataset(coords={"lon": lon}) decoder = psyd.CFDecoder(ds) - self.assertEqual(list(final_bounds), - list(decoder.get_plotbounds(lon))) + self.assertEqual(list(final_bounds), list(decoder.get_plotbounds(lon))) - def _test_dimname(self, func_name, name, uname=None, name2d=False, - circ_name=None): + def _test_dimname( + self, func_name, name, uname=None, name2d=False, circ_name=None + ): def check_ds(name): self.assertEqual(getattr(d, func_name)(ds.t2m), name) - self.assertEqual(getattr(d, func_name)(ds.t2m, - coords=ds.t2m.coords), name) + self.assertEqual( + getattr(d, func_name)(ds.t2m, coords=ds.t2m.coords), name + ) if name2d: self.assertEqual(getattr(d, func_name)(ds.t2m_2d), name) else: @@ -149,27 +144,28 @@ def check_ds(name): if six.PY3: # Test whether the warning is raised if the decoder finds # multiple dimensions - with self.assertWarnsRegex(RuntimeWarning, - 'multiple matches'): - coords = 'time lat lon lev x y latitude longitude'.split() - ds.t2m.attrs.pop('coordinates', None) - for dim in 'xytz': + with self.assertWarnsRegex(RuntimeWarning, "multiple matches"): + coords = "time lat lon lev x y latitude longitude".split() + ds.t2m.attrs.pop("coordinates", None) + for dim in "xytz": getattr(d, dim).update(coords) for coord in set(coords).intersection(ds.coords): - ds.coords[coord].attrs.pop('axis', None) + ds.coords[coord].attrs.pop("axis", None) getattr(d, func_name)(ds.t2m) + uname = uname or name circ_name = circ_name or name - ds = psyd.open_dataset(os.path.join(bt.test_dir, 'test-t2m-u-v.nc')) + ds = psyd.open_dataset(os.path.join(bt.test_dir, "test-t2m-u-v.nc")) d = psyd.CFDecoder(ds) check_ds(name) ds.close() - ds = psyd.open_dataset(os.path.join(bt.test_dir, 'icon_test.nc')) + ds = psyd.open_dataset(os.path.join(bt.test_dir, "icon_test.nc")) d = psyd.CFDecoder(ds) check_ds(uname) ds.close() ds = psyd.open_dataset( - os.path.join(bt.test_dir, 'circumpolar_test.nc')) + os.path.join(bt.test_dir, "circumpolar_test.nc") + ) d = psyd.CFDecoder(ds) check_ds(circ_name) ds.close() @@ -177,45 +173,46 @@ def check_ds(name): def test_xname_no_dims(self): """Test the get_xname method for a variable without dimensions""" da = xr.DataArray(1) - self.assertIsNone(da.psy.get_dim('x')) + self.assertIsNone(da.psy.get_dim("x")) def test_yname_no_dims(self): """Test the get_yname method for a variable without dimensions""" da = xr.DataArray(1) - self.assertIsNone(da.psy.get_dim('y')) + self.assertIsNone(da.psy.get_dim("y")) def test_zname_no_dims(self): """Test the get_zname method for a variable without dimensions""" da = xr.DataArray(1) - self.assertIsNone(da.psy.get_dim('z')) + self.assertIsNone(da.psy.get_dim("z")) def test_tname_no_dims(self): """Test the get_tname method for a variable without dimensions""" da = xr.DataArray(1) - self.assertIsNone(da.psy.get_dim('t')) + self.assertIsNone(da.psy.get_dim("t")) def test_xcoord_no_dims(self): """Test the get_x method for a variable without dimensions""" da = xr.DataArray(1) - self.assertIsNone(da.psy.get_coord('x')) + self.assertIsNone(da.psy.get_coord("x")) def test_ycoord_no_dims(self): """Test the get_y method for a variable without dimensions""" da = xr.DataArray(1) - self.assertIsNone(da.psy.get_coord('y')) + self.assertIsNone(da.psy.get_coord("y")) def test_zcoord_no_dims(self): """Test the get_z method for a variable without dimensions""" da = xr.DataArray(1) - self.assertIsNone(da.psy.get_coord('z')) + self.assertIsNone(da.psy.get_coord("z")) def test_tcoord_no_dims(self): """Test the get_t method for a variable without dimensions""" da = xr.DataArray(1) - self.assertIsNone(da.psy.get_coord('t')) + self.assertIsNone(da.psy.get_coord("t")) - def _test_coord(self, func_name, name, uname=None, name2d=False, - circ_name=None): + def _test_coord( + self, func_name, name, uname=None, name2d=False, circ_name=None + ): def check_ds(name): self.assertEqual(getattr(d, func_name)(ds.t2m).name, name) if name2d: @@ -225,197 +222,212 @@ def check_ds(name): if six.PY3: # Test whether the warning is raised if the decoder finds # multiple dimensions - with self.assertWarnsRegex(RuntimeWarning, - 'multiple matches'): - coords = 'time lat lon lev x y latitude longitude'.split() - ds.t2m.attrs.pop('coordinates', None) - for dim in 'xytz': + with self.assertWarnsRegex(RuntimeWarning, "multiple matches"): + coords = "time lat lon lev x y latitude longitude".split() + ds.t2m.attrs.pop("coordinates", None) + for dim in "xytz": getattr(d, dim).update(coords) for coord in set(coords).intersection(ds.coords): - ds.coords[coord].attrs.pop('axis', None) + ds.coords[coord].attrs.pop("axis", None) getattr(d, func_name)(ds.t2m) + uname = uname or name circ_name = circ_name or name - ds = psyd.open_dataset(os.path.join(bt.test_dir, 'test-t2m-u-v.nc')) + ds = psyd.open_dataset(os.path.join(bt.test_dir, "test-t2m-u-v.nc")) d = psyd.CFDecoder(ds) check_ds(name) ds.close() - ds = psyd.open_dataset(os.path.join(bt.test_dir, 'icon_test.nc')) + ds = psyd.open_dataset(os.path.join(bt.test_dir, "icon_test.nc")) d = psyd.CFDecoder(ds) check_ds(uname) ds.close() ds = psyd.open_dataset( - os.path.join(bt.test_dir, 'circumpolar_test.nc')) + os.path.join(bt.test_dir, "circumpolar_test.nc") + ) d = psyd.CFDecoder(ds) check_ds(circ_name) ds.close() def test_tname(self): """Test CFDecoder.get_tname method""" - self._test_dimname('get_tname', 'time') + self._test_dimname("get_tname", "time") def test_zname(self): """Test CFDecoder.get_zname method""" - self._test_dimname('get_zname', 'lev') + self._test_dimname("get_zname", "lev") def test_xname(self): """Test CFDecoder.get_xname method""" - self._test_dimname('get_xname', 'lon', 'ncells', True, - circ_name='x') + self._test_dimname("get_xname", "lon", "ncells", True, circ_name="x") def test_yname(self): """Test CFDecoder.get_yname method""" - self._test_dimname('get_yname', 'lat', 'ncells', True, - circ_name='y') + self._test_dimname("get_yname", "lat", "ncells", True, circ_name="y") def test_t(self): """Test CFDecoder.get_t method""" - self._test_coord('get_t', 'time') + self._test_coord("get_t", "time") def test_z(self): """Test CFDecoder.get_z method""" - self._test_coord('get_z', 'lev') + self._test_coord("get_z", "lev") def test_x(self): """Test CFDecorder.get_x method""" - self._test_coord('get_x', 'lon', 'clon', True, - circ_name='longitude') + self._test_coord("get_x", "lon", "clon", True, circ_name="longitude") def test_y(self): """Test CFDecoder.get_y method""" - self._test_coord('get_y', 'lat', 'clat', True, - circ_name='latitude') + self._test_coord("get_y", "lat", "clat", True, circ_name="latitude") def test_standardization(self): """Test the :meth:`psyplot.data.CFDecoder.standardize_dims` method""" - ds = psyd.open_dataset(os.path.join(bt.test_dir, 'test-t2m-u-v.nc')) + ds = psyd.open_dataset(os.path.join(bt.test_dir, "test-t2m-u-v.nc")) decoder = psyd.CFDecoder(ds) - dims = {'time': 1, 'lat': 2, 'lon': 3, 'lev': 4} + dims = {"time": 1, "lat": 2, "lon": 3, "lev": 4} replaced = decoder.standardize_dims(ds.t2m, dims) - for dim, rep in [('time', 't'), ('lat', 'y'), ('lon', 'x'), - ('lev', 'z')]: + for dim, rep in [ + ("time", "t"), + ("lat", "y"), + ("lon", "x"), + ("lev", "z"), + ]: self.assertIn(rep, replaced) - self.assertEqual(replaced[rep], dims[dim], - msg="Wrong value for %s (%s-) dimension" % ( - dim, rep)) + self.assertEqual( + replaced[rep], + dims[dim], + msg="Wrong value for %s (%s-) dimension" % (dim, rep), + ) def test_idims(self): """Test the extraction of the slicers of the dimensions""" - ds = psyd.open_dataset(bt.get_file('test-t2m-u-v.nc')) + ds = psyd.open_dataset(bt.get_file("test-t2m-u-v.nc")) arr = ds.t2m[1:, 1] arr.psy.init_accessor(base=ds) dims = arr.psy.idims - for dim in ['time', 'lev', 'lat', 'lon']: + for dim in ["time", "lev", "lat", "lon"]: self.assertEqual( psyd.safe_list(ds[dim][dims[dim]]), psyd.safe_list(arr.coords[dim]), - msg="Slice %s for dimension %s is wrong!" % (dims[dim], dim)) + msg="Slice %s for dimension %s is wrong!" % (dims[dim], dim), + ) # test with unknown dimensions if xr_version[:2] >= (0, 9): try: - ds = ds.drop_vars('time') + ds = ds.drop_vars("time") except AttributeError: # xarray <=0.13 - ds = ds.drop('time') + ds = ds.drop("time") arr = ds.t2m[1:, 1] arr.psy.init_accessor(base=ds) if not six.PY2: - with self.assertWarnsRegex(RuntimeWarning, 'time'): + with self.assertWarnsRegex(RuntimeWarning, "time"): dims = arr.psy.idims - l = psyd.ArrayList.from_dataset( - ds, name='t2m', time=slice(1, None), lev=85000., method='sel') - arr = l[0] + arrays = psyd.ArrayList.from_dataset( + ds, name="t2m", time=slice(1, None), lev=85000.0, method="sel" + ) + arr = arrays[0] dims = arr.psy.idims - for dim in ['time', 'lev', 'lat', 'lon']: - if dim == 'time': + for dim in ["time", "lev", "lat", "lon"]: + if dim == "time": self.assertEqual(dims[dim], slice(1, 5, 1)) else: self.assertEqual( psyd.safe_list(ds[dim][dims[dim]]), psyd.safe_list(arr.coords[dim]), - msg="Slice %s for dimension %s is wrong!" % (dims[dim], - dim)) + msg="Slice %s for dimension %s is wrong!" + % (dims[dim], dim), + ) def test_unstructured_bounds(self): """Test the extraction of unstructured bounds""" - ds = psyd.open_dataset(os.path.join(bt.test_dir, 'icon_test.nc')) + ds = psyd.open_dataset(os.path.join(bt.test_dir, "icon_test.nc")) decoder = psyd.CFDecoder(ds) var = ds.t2m[0, 0] - var.attrs.pop('grid_type', None) + var.attrs.pop("grid_type", None) self.assertTrue(decoder.is_unstructured(var)) # x bounds - xbounds = decoder.get_cell_node_coord(var, axis='x') + xbounds = decoder.get_cell_node_coord(var, axis="x") self.assertIsNotNone(xbounds) self.assertEqual(xbounds.shape, ds.clon_bnds.shape) # y bounds - ybounds = decoder.get_cell_node_coord(var, axis='y') + ybounds = decoder.get_cell_node_coord(var, axis="y") self.assertIsNotNone(ybounds) self.assertEqual(ybounds.shape, ds.clon_bnds.shape) # Test for correct falsification - ds = psyd.open_dataset(os.path.join(bt.test_dir, 'test-t2m-u-v.nc')) + ds = psyd.open_dataset(os.path.join(bt.test_dir, "test-t2m-u-v.nc")) decoder = psyd.CFDecoder(ds) var = ds.t2m[0, 0] self.assertFalse(decoder.is_unstructured(var)) - xbounds = decoder.get_cell_node_coord(var, axis='x') + xbounds = decoder.get_cell_node_coord(var, axis="x") self.assertEqual(xbounds.shape, (np.prod(var.shape), 4)) def test_is_unstructured_2D_bounds(self): """Test that 3D bounds are not interpreted as unstructured""" with psyd.open_dataset( - os.path.join(bt.test_dir, "rotated-pole-test.nc")) as ds: + os.path.join(bt.test_dir, "rotated-pole-test.nc") + ) as ds: decoder = psyd.CFDecoder(ds) self.assertFalse(decoder.is_unstructured(ds.psy["HSURF"])) def test_is_circumpolar(self): """Test whether the is_circumpolar method works""" - ds = psyd.open_dataset(os.path.join(bt.test_dir, - 'circumpolar_test.nc')) + ds = psyd.open_dataset( + os.path.join(bt.test_dir, "circumpolar_test.nc") + ) decoder = psyd.CFDecoder(ds) self.assertTrue(decoder.is_circumpolar(ds.t2m)) # test for correct falsification - ds = psyd.open_dataset(os.path.join(bt.test_dir, 'icon_test.nc')) + ds = psyd.open_dataset(os.path.join(bt.test_dir, "icon_test.nc")) decoder = psyd.CFDecoder(ds) self.assertFalse(decoder.is_circumpolar(ds.t2m)) def test_get_variable_by_axis(self): """Test the :meth:`CFDecoder.get_variable_by_axis` method""" - ds = psyd.open_dataset(os.path.join(bt.test_dir, - 'circumpolar_test.nc')) + ds = psyd.open_dataset( + os.path.join(bt.test_dir, "circumpolar_test.nc") + ) decoder = psyd.CFDecoder(ds) arr = ds.t2m - arr.attrs.pop('coordinates', None) - arr.encoding.pop('coordinates', None) + arr.attrs.pop("coordinates", None) + arr.encoding.pop("coordinates", None) for c in ds.coords.values(): - c.attrs.pop('axis', None) - for dim in ['x', 'y', 'z', 't']: - self.assertIsNone(decoder.get_variable_by_axis(arr, dim), - msg="Accidently found coordinate %s" % dim) + c.attrs.pop("axis", None) + for dim in ["x", "y", "z", "t"]: + self.assertIsNone( + decoder.get_variable_by_axis(arr, dim), + msg="Accidently found coordinate %s" % dim, + ) # test coordinates attribute - arr.attrs['coordinates'] = 'latitude longitude' - self.assertEqual(decoder.get_variable_by_axis(arr, 'x').name, - 'longitude') - self.assertEqual(decoder.get_variable_by_axis(arr, 'y').name, - 'latitude') - self.assertIsNone(decoder.get_variable_by_axis(arr, 'z')) + arr.attrs["coordinates"] = "latitude longitude" + self.assertEqual( + decoder.get_variable_by_axis(arr, "x").name, "longitude" + ) + self.assertEqual( + decoder.get_variable_by_axis(arr, "y").name, "latitude" + ) + self.assertIsNone(decoder.get_variable_by_axis(arr, "z")) # test coordinates attribute but without specifying axis or matching # latitude or longitude - axes = {'lev': 'z', 'time': 't', 'x': 'x', 'y': 'y'} - arr.attrs['coordinates'] = 'time lev y x' + axes = {"lev": "z", "time": "t", "x": "x", "y": "y"} + arr.attrs["coordinates"] = "time lev y x" for name, axis in axes.items(): self.assertEqual( - decoder.get_variable_by_axis(arr, axis).name, name) + decoder.get_variable_by_axis(arr, axis).name, name + ) # test with specified axis attribute - arr.attrs['coordinates'] = 'time lev longitude latitude' - axes = {'lev': 'Z', 'time': 'T', 'latitude': 'X', 'longitude': 'Y'} + arr.attrs["coordinates"] = "time lev longitude latitude" + axes = {"lev": "Z", "time": "T", "latitude": "X", "longitude": "Y"} for name, axis in axes.items(): - ds.coords[name].attrs['axis'] = axis + ds.coords[name].attrs["axis"] = axis for name, axis in axes.items(): self.assertEqual( - decoder.get_variable_by_axis(arr, axis.lower()).name, name) + decoder.get_variable_by_axis(arr, axis.lower()).name, name + ) # close the dataset ds.close() @@ -423,19 +435,19 @@ def test_get_variable_by_axis(self): def test_get_variable_by_axis_02(self): """Test the :meth:`CFDecoder.get_variable_by_axis` method with missing coordinates, see https://github.com/psyplot/psyplot/pull/19""" - fname = os.path.join(bt.test_dir, 'icon_test.nc') + fname = os.path.join(bt.test_dir, "icon_test.nc") with psyd.open_dataset(fname) as ds: - ds['ncells'] = ('ncells', np.arange(ds.dims['ncells'])) + ds["ncells"] = ("ncells", np.arange(ds.dims["ncells"])) decoder = psyd.CFDecoder(ds) - arr = ds.psy['t2m'].psy.isel(ncells=slice(3, 10)) - del arr['clon'] - xcoord = decoder.get_variable_by_axis(arr, 'x', arr.coords) - self.assertEqual(xcoord.name, 'clon') + arr = ds.psy["t2m"].psy.isel(ncells=slice(3, 10)) + del arr["clon"] + xcoord = decoder.get_variable_by_axis(arr, "x", arr.coords) + self.assertEqual(xcoord.name, "clon") self.assertEqual(list(xcoord.ncells), list(arr.ncells)) def test_plot_bounds_1d(self): """Test to get 2d-interval breaks""" - x = xr.Variable(('x', ), np.arange(1, 5)) + x = xr.Variable(("x",), np.arange(1, 5)) d = psyd.CFDecoder() bounds = d.get_plotbounds(x) self.assertAlmostArrayEqual(bounds, np.arange(0.5, 4.51, 1.0)) @@ -454,11 +466,11 @@ def test_plot_bounds_2d(self): x2d_bnds, y2d_bnds = np.meshgrid(x_bnds, y_bnds) d = psyd.CFDecoder() # test x bounds - bounds = d.get_plotbounds(xr.Variable(('y', 'x'), x2d)) + bounds = d.get_plotbounds(xr.Variable(("y", "x"), x2d)) self.assertAlmostArrayEqual(bounds, x2d_bnds) # test y bounds - bounds = d.get_plotbounds(xr.Variable(('y', 'x'), y2d)) + bounds = d.get_plotbounds(xr.Variable(("y", "x"), y2d)) self.assertAlmostArrayEqual(bounds, y2d_bnds) @@ -467,7 +479,7 @@ class UGridDecoderTest(unittest.TestCase, AlmostArrayEqualMixin): def test_get_decoder(self): """Test to get the right decoder""" - ds = psyd.open_dataset(bt.get_file('simple_triangular_grid_si0.nc')) + ds = psyd.open_dataset(bt.get_file("simple_triangular_grid_si0.nc")) d = psyd.CFDecoder.get_decoder(ds, ds.Mesh2_fcvar) self.assertIsInstance(d, psyd.UGridDecoder) return ds, d @@ -476,16 +488,16 @@ def test_x(self): """Test the get_x method""" ds, d = self.test_get_decoder() x = d.get_x(ds.Mesh2_fcvar) - self.assertIn('standard_name', x.attrs) - self.assertEqual(x.attrs['standard_name'], 'longitude') + self.assertIn("standard_name", x.attrs) + self.assertEqual(x.attrs["standard_name"], "longitude") self.assertAlmostArrayEqual(x.values, [0.3, 0.56666667]) def test_y(self): """Test the get_y method""" ds, d = self.test_get_decoder() y = d.get_y(ds.Mesh2_fcvar) - self.assertIn('standard_name', y.attrs) - self.assertEqual(y.attrs['standard_name'], 'latitude') + self.assertIn("standard_name", y.attrs) + self.assertEqual(y.attrs["standard_name"], "latitude") self.assertAlmostArrayEqual(y.values, [0.4, 0.76666668]) @@ -497,7 +509,7 @@ def tearDown(self): def test_auto_update(self): """Test the :attr:`psyplot.plotter.Plotter.no_auto_update` attribute""" - ds = psyd.open_dataset(bt.get_file('test-t2m-u-v.nc')) + ds = psyd.open_dataset(bt.get_file("test-t2m-u-v.nc")) arr = ds.psy.t2m.psy[0, 0, 0] arr.psy.init_accessor(auto_update=False) @@ -512,27 +524,32 @@ def test_auto_update(self): def test_update_01_isel(self): """test the update of a single array through the isel method""" - ds = psyd.open_dataset(bt.get_file('test-t2m-u-v.nc')) + ds = psyd.open_dataset(bt.get_file("test-t2m-u-v.nc")) arr = ds.psy.t2m.psy[0, 0, 0] - arr.attrs['test'] = 4 - self.assertNotIn('test', ds.t2m.attrs) + arr.attrs["test"] = 4 + self.assertNotIn("test", ds.t2m.attrs) self.assertIs(arr.psy.base, ds) - self.assertEqual(dict(arr.psy.idims), {'time': 0, 'lev': 0, 'lat': 0, - 'lon': slice(None)}) + self.assertEqual( + dict(arr.psy.idims), + {"time": 0, "lev": 0, "lat": 0, "lon": slice(None)}, + ) # update to next time step arr.psy.update(time=1) self.assertEqual(arr.time, ds.time[1]) - self.assertEqual(arr.values.tolist(), - ds.t2m[1, 0, 0, :].values.tolist()) - self.assertEqual(dict(arr.psy.idims), {'time': 1, 'lev': 0, 'lat': 0, - 'lon': slice(None)}) - self.assertNotIn('test', ds.t2m.attrs) - self.assertIn('test', arr.attrs) + self.assertEqual( + arr.values.tolist(), ds.t2m[1, 0, 0, :].values.tolist() + ) + self.assertEqual( + dict(arr.psy.idims), + {"time": 1, "lev": 0, "lat": 0, "lon": slice(None)}, + ) + self.assertNotIn("test", ds.t2m.attrs) + self.assertIn("test", arr.attrs) self.assertEqual(arr.test, 4) @unittest.skipIf(xr_version[:2] < (0, 10), "Not implemented for xr<0.10") def test_shiftlon(self): - ds = psyd.open_dataset(bt.get_file('test-t2m-u-v.nc')) + ds = psyd.open_dataset(bt.get_file("test-t2m-u-v.nc")) da = ds.t2m nlon = da.lon.size @@ -551,302 +568,344 @@ def test_shiftlon(self): # shift 25% to left shifted = da.psy.shiftlon(da.lon[nlon // 4]) self.assertEqual(shifted.lon[0], da.lon[-nlon // 4 + 1] - 360) - self.assertAlmostArrayEqual(shifted[..., nlon // 2 - 1], - da[..., nlon // 4]) + self.assertAlmostArrayEqual( + shifted[..., nlon // 2 - 1], da[..., nlon // 4] + ) self.assertAlmostArrayEqual(shifted[..., 0], da[..., -nlon // 4 + 1]) def test_update_02_sel(self): """test the update of a single array through the sel method""" - ds = psyd.open_dataset(bt.get_file('test-t2m-u-v.nc')) + ds = psyd.open_dataset(bt.get_file("test-t2m-u-v.nc")) arr = ds.psy.t2m.psy[0, 0, 0] - arr.attrs['test'] = 4 - self.assertNotIn('test', ds.t2m.attrs) + arr.attrs["test"] = 4 + self.assertNotIn("test", ds.t2m.attrs) self.assertIs(arr.psy.base, ds) - self.assertEqual(dict(arr.psy.idims), {'time': 0, 'lev': 0, 'lat': 0, - 'lon': slice(None)}) + self.assertEqual( + dict(arr.psy.idims), + {"time": 0, "lev": 0, "lat": 0, "lon": slice(None)}, + ) # update to next time step - arr.psy.update(time='1979-02-28T18:00', method='nearest') + arr.psy.update(time="1979-02-28T18:00", method="nearest") self.assertEqual(arr.time, ds.time[1]) - self.assertEqual(arr.values.tolist(), - ds.t2m[1, 0, 0, :].values.tolist()) - self.assertEqual(dict(arr.psy.idims), {'time': 1, 'lev': 0, 'lat': 0, - 'lon': slice(None)}) - self.assertNotIn('test', ds.t2m.attrs) - self.assertIn('test', arr.attrs) + self.assertEqual( + arr.values.tolist(), ds.t2m[1, 0, 0, :].values.tolist() + ) + self.assertEqual( + dict(arr.psy.idims), + {"time": 1, "lev": 0, "lat": 0, "lon": slice(None)}, + ) + self.assertNotIn("test", ds.t2m.attrs) + self.assertIn("test", arr.attrs) self.assertEqual(arr.test, 4) def test_update_03_isel_concat(self): """test the update of a concatenated array through the isel method""" - ds = psyd.open_dataset(bt.get_file('test-t2m-u-v.nc'))[['t2m', 'u']] + ds = psyd.open_dataset(bt.get_file("test-t2m-u-v.nc"))[["t2m", "u"]] arr = ds.psy.to_array().psy.isel(time=0, lev=0, lat=0) - arr.attrs['test'] = 4 - self.assertNotIn('test', ds.t2m.attrs) - arr.name = 'something' + arr.attrs["test"] = 4 + self.assertNotIn("test", ds.t2m.attrs) + arr.name = "something" self.assertIs(arr.psy.base, ds) - self.assertEqual(dict(arr.psy.idims), {'time': 0, 'lev': 0, 'lat': 0, - 'lon': slice(None)}) - self.assertEqual(arr.coords['variable'].values.tolist(), ['t2m', 'u']) + self.assertEqual( + dict(arr.psy.idims), + {"time": 0, "lev": 0, "lat": 0, "lon": slice(None)}, + ) + self.assertEqual(arr.coords["variable"].values.tolist(), ["t2m", "u"]) # update to next time step arr.psy.update(time=1) self.assertEqual(arr.time, ds.time[1]) - self.assertEqual(arr.coords['variable'].values.tolist(), ['t2m', 'u']) - self.assertEqual(arr.values.tolist(), - ds[['t2m', 'u']].to_array()[ - :, 1, 0, 0, :].values.tolist()) - self.assertEqual(dict(arr.psy.idims), {'time': 1, 'lev': 0, 'lat': 0, - 'lon': slice(None)}) - self.assertNotIn('test', ds.t2m.attrs) - self.assertIn('test', arr.attrs) + self.assertEqual(arr.coords["variable"].values.tolist(), ["t2m", "u"]) + self.assertEqual( + arr.values.tolist(), + ds[["t2m", "u"]].to_array()[:, 1, 0, 0, :].values.tolist(), + ) + self.assertEqual( + dict(arr.psy.idims), + {"time": 1, "lev": 0, "lat": 0, "lon": slice(None)}, + ) + self.assertNotIn("test", ds.t2m.attrs) + self.assertIn("test", arr.attrs) self.assertEqual(arr.test, 4) - self.assertEqual(arr.name, 'something') + self.assertEqual(arr.name, "something") def test_update_04_sel_concat(self): """test the update of a concatenated array through the isel method""" - ds = psyd.open_dataset(bt.get_file('test-t2m-u-v.nc'))[['t2m', 'u']] + ds = psyd.open_dataset(bt.get_file("test-t2m-u-v.nc"))[["t2m", "u"]] arr = ds.psy.to_array().psy.isel(time=0, lev=0, lat=0) - arr.attrs['test'] = 4 - self.assertNotIn('test', ds.t2m.attrs) + arr.attrs["test"] = 4 + self.assertNotIn("test", ds.t2m.attrs) self.assertIs(arr.psy.base, ds) - self.assertEqual(dict(arr.psy.idims), {'time': 0, 'lev': 0, 'lat': 0, - 'lon': slice(None)}) - self.assertEqual(arr.coords['variable'].values.tolist(), ['t2m', 'u']) + self.assertEqual( + dict(arr.psy.idims), + {"time": 0, "lev": 0, "lat": 0, "lon": slice(None)}, + ) + self.assertEqual(arr.coords["variable"].values.tolist(), ["t2m", "u"]) # update to next time step - arr.psy.update(time='1979-02-28T18:00', method='nearest') + arr.psy.update(time="1979-02-28T18:00", method="nearest") self.assertEqual(arr.time, ds.time[1]) - self.assertEqual(arr.coords['variable'].values.tolist(), ['t2m', 'u']) - self.assertEqual(arr.values.tolist(), - ds[['t2m', 'u']].to_array()[ - :, 1, 0, 0, :].values.tolist()) - self.assertEqual(dict(arr.psy.idims), {'time': 1, 'lev': 0, 'lat': 0, - 'lon': slice(None)}) - self.assertNotIn('test', ds.t2m.attrs) - self.assertIn('test', arr.attrs) + self.assertEqual(arr.coords["variable"].values.tolist(), ["t2m", "u"]) + self.assertEqual( + arr.values.tolist(), + ds[["t2m", "u"]].to_array()[:, 1, 0, 0, :].values.tolist(), + ) + self.assertEqual( + dict(arr.psy.idims), + {"time": 1, "lev": 0, "lat": 0, "lon": slice(None)}, + ) + self.assertNotIn("test", ds.t2m.attrs) + self.assertIn("test", arr.attrs) self.assertEqual(arr.test, 4) def test_update_05_1variable(self): """Test to change the variable""" - ds = psyd.open_dataset(bt.get_file('test-t2m-u-v.nc')) + ds = psyd.open_dataset(bt.get_file("test-t2m-u-v.nc")) arr = ds.psy.t2m.psy[0, 0, 0] - arr.attrs['test'] = 4 - self.assertNotIn('test', ds.t2m.attrs) + arr.attrs["test"] = 4 + self.assertNotIn("test", ds.t2m.attrs) self.assertIs(arr.psy.base, ds) - self.assertEqual(dict(arr.psy.idims), {'time': 0, 'lev': 0, 'lat': 0, - 'lon': slice(None)}) + self.assertEqual( + dict(arr.psy.idims), + {"time": 0, "lev": 0, "lat": 0, "lon": slice(None)}, + ) # update to next time step - arr.psy.update(name='u', time=1) + arr.psy.update(name="u", time=1) self.assertEqual(arr.time, ds.time[1]) - self.assertEqual(arr.name, 'u') - self.assertEqual(arr.values.tolist(), - ds.u[1, 0, 0, :].values.tolist()) - self.assertEqual(dict(arr.psy.idims), {'time': 1, 'lev': 0, 'lat': 0, - 'lon': slice(None)}) - self.assertNotIn('test', ds.t2m.attrs) - self.assertIn('test', arr.attrs) + self.assertEqual(arr.name, "u") + self.assertEqual(arr.values.tolist(), ds.u[1, 0, 0, :].values.tolist()) + self.assertEqual( + dict(arr.psy.idims), + {"time": 1, "lev": 0, "lat": 0, "lon": slice(None)}, + ) + self.assertNotIn("test", ds.t2m.attrs) + self.assertIn("test", arr.attrs) self.assertEqual(arr.test, 4) def test_update_06_2variables(self): """test the change of the variable of a concatenated array""" - ds = psyd.open_dataset(bt.get_file('test-t2m-u-v.nc')) - arr = ds[['t2m', 'u']].to_array().isel(time=0, lev=0, lat=0) - arr.attrs['test'] = 4 - self.assertNotIn('test', ds.t2m.attrs) - arr.name = 'something' + ds = psyd.open_dataset(bt.get_file("test-t2m-u-v.nc")) + arr = ds[["t2m", "u"]].to_array().isel(time=0, lev=0, lat=0) + arr.attrs["test"] = 4 + self.assertNotIn("test", ds.t2m.attrs) + arr.name = "something" arr.psy.base = ds - self.assertEqual(dict(arr.psy.idims), {'time': 0, 'lev': 0, 'lat': 0, - 'lon': slice(None)}) - self.assertEqual(arr.coords['variable'].values.tolist(), ['t2m', 'u']) + self.assertEqual( + dict(arr.psy.idims), + {"time": 0, "lev": 0, "lat": 0, "lon": slice(None)}, + ) + self.assertEqual(arr.coords["variable"].values.tolist(), ["t2m", "u"]) # update to next time step - arr.psy.update(time=1, name=['u', 'v']) + arr.psy.update(time=1, name=["u", "v"]) self.assertEqual(arr.time, ds.time[1]) - self.assertEqual(arr.coords['variable'].values.tolist(), ['u', 'v']) - self.assertEqual(arr.values.tolist(), - ds[['u', 'v']].to_array()[ - :, 1, 0, 0, :].values.tolist()) - self.assertEqual(dict(arr.psy.idims), {'time': 1, 'lev': 0, 'lat': 0, - 'lon': slice(None)}) - self.assertNotIn('test', ds.t2m.attrs) - self.assertIn('test', arr.attrs) + self.assertEqual(arr.coords["variable"].values.tolist(), ["u", "v"]) + self.assertEqual( + arr.values.tolist(), + ds[["u", "v"]].to_array()[:, 1, 0, 0, :].values.tolist(), + ) + self.assertEqual( + dict(arr.psy.idims), + {"time": 1, "lev": 0, "lat": 0, "lon": slice(None)}, + ) + self.assertNotIn("test", ds.t2m.attrs) + self.assertIn("test", arr.attrs) self.assertEqual(arr.test, 4) - self.assertEqual(arr.name, 'something') + self.assertEqual(arr.name, "something") def test_update_07_variable_with_new_dims(self): ds = xr.Dataset() - ds['test1'] = (tuple('ab'), np.zeros((5, 4))) - ds['test2'] = (tuple('abc'), np.zeros((5, 4, 3))) - ds['a'] = ('a', np.arange(5)) - ds['b'] = ('b', np.arange(4)) - ds['c'] = ('c', np.arange(3)) - - da = ds.psy['test1'].psy.isel(a=slice(1, 3)) - self.assertEqual(da.name, 'test1') + ds["test1"] = (tuple("ab"), np.zeros((5, 4))) + ds["test2"] = (tuple("abc"), np.zeros((5, 4, 3))) + ds["a"] = ("a", np.arange(5)) + ds["b"] = ("b", np.arange(4)) + ds["c"] = ("c", np.arange(3)) + + da = ds.psy["test1"].psy.isel(a=slice(1, 3)) + self.assertEqual(da.name, "test1") self.assertEqual(da.shape, (2, 4)) - self.assertEqual(da.psy.idims, {'a': slice(1, 3, 1), 'b': slice(None)}) + self.assertEqual(da.psy.idims, {"a": slice(1, 3, 1), "b": slice(None)}) # update to test2 - da.psy.update(name='test2') - self.assertEqual(da.name, 'test2') + da.psy.update(name="test2") + self.assertEqual(da.name, "test2") self.assertEqual(da.shape, (2, 4, 3)) - self.assertEqual(da.psy.idims, {'a': slice(1, 3, 1), 'b': slice(None), - 'c': slice(None)}) + self.assertEqual( + da.psy.idims, + {"a": slice(1, 3, 1), "b": slice(None), "c": slice(None)}, + ) # update back to test1 - da.psy.update(name='test1') - self.assertEqual(da.name, 'test1') + da.psy.update(name="test1") + self.assertEqual(da.name, "test1") self.assertEqual(da.shape, (2, 4)) - self.assertEqual(da.psy.idims, {'a': slice(1, 3, 1), 'b': slice(None)}) + self.assertEqual(da.psy.idims, {"a": slice(1, 3, 1), "b": slice(None)}) # update to test2 but this time with specifying a dimension for c # does not yet work with c=1 - da.psy.update(name='test2', dims=dict(c=1)) - self.assertEqual(da.name, 'test2') + da.psy.update(name="test2", dims=dict(c=1)) + self.assertEqual(da.name, "test2") self.assertEqual(da.shape, (2, 4)) - self.assertEqual(da.psy.idims, {'a': slice(1, 3, 1), 'b': slice(None), - 'c': 1}) - self.assertEqual(da['c'], 1) + self.assertEqual( + da.psy.idims, {"a": slice(1, 3, 1), "b": slice(None), "c": 1} + ) + self.assertEqual(da["c"], 1) def test_update_08_2variables_with_new_dims(self): ds = xr.Dataset() - ds['test1'] = (tuple('ab'), np.zeros((5, 4))) - ds['test11'] = (tuple('ab'), np.zeros((5, 4))) - ds['test2'] = (tuple('abc'), np.zeros((5, 4, 3))) - ds['test22'] = (tuple('abc'), np.zeros((5, 4, 3))) - ds['a'] = ('a', np.arange(5)) - ds['b'] = ('b', np.arange(4)) - ds['c'] = ('c', np.arange(3)) - - da = ds.psy.create_list(name=[['test1', 'test11']], prefer_list=False, - a=slice(1, 3, 1))[0] + ds["test1"] = (tuple("ab"), np.zeros((5, 4))) + ds["test11"] = (tuple("ab"), np.zeros((5, 4))) + ds["test2"] = (tuple("abc"), np.zeros((5, 4, 3))) + ds["test22"] = (tuple("abc"), np.zeros((5, 4, 3))) + ds["a"] = ("a", np.arange(5)) + ds["b"] = ("b", np.arange(4)) + ds["c"] = ("c", np.arange(3)) + + da = ds.psy.create_list( + name=[["test1", "test11"]], prefer_list=False, a=slice(1, 3, 1) + )[0] self.assertEqual(da.shape, (2, 2, 4)) - self.assertEqual(list(da['variable']), ['test1', 'test11']) - self.assertEqual(da.psy.idims, {'a': slice(1, 3, 1), 'b': slice(None)}) + self.assertEqual(list(da["variable"]), ["test1", "test11"]) + self.assertEqual(da.psy.idims, {"a": slice(1, 3, 1), "b": slice(None)}) # update to test2 - da.psy.update(name=['test2', 'test22']) + da.psy.update(name=["test2", "test22"]) self.assertEqual(da.shape, (2, 2, 4, 3)) - self.assertEqual(list(da['variable']), ['test2', 'test22']) - self.assertEqual(da.psy.idims, {'a': slice(1, 3, 1), 'b': slice(None), - 'c': slice(None)}) + self.assertEqual(list(da["variable"]), ["test2", "test22"]) + self.assertEqual( + da.psy.idims, + {"a": slice(1, 3, 1), "b": slice(None), "c": slice(None)}, + ) # update back to test1 - da.psy.update(name=['test1', 'test11']) + da.psy.update(name=["test1", "test11"]) self.assertEqual(da.shape, (2, 2, 4)) - self.assertEqual(list(da['variable']), ['test1', 'test11']) - self.assertEqual(da.psy.idims, {'a': slice(1, 3, 1), 'b': slice(None)}) + self.assertEqual(list(da["variable"]), ["test1", "test11"]) + self.assertEqual(da.psy.idims, {"a": slice(1, 3, 1), "b": slice(None)}) # update to test2 but this time with specifying a dimension for c # does not yet work with c=1 - da.psy.update(name=['test2', 'test22'], dims=dict(c=1)) - self.assertEqual(list(da['variable']), ['test2', 'test22']) + da.psy.update(name=["test2", "test22"], dims=dict(c=1)) + self.assertEqual(list(da["variable"]), ["test2", "test22"]) self.assertEqual(da.shape, (2, 2, 4)) - self.assertEqual(da.psy.idims, {'a': slice(1, 3, 1), 'b': slice(None), - 'c': 1}) - self.assertEqual(da['c'], 1) - + self.assertEqual( + da.psy.idims, {"a": slice(1, 3, 1), "b": slice(None), "c": 1} + ) + self.assertEqual(da["c"], 1) - @unittest.skipIf(not with_cdo, 'CDOs are not installed') + @unittest.skipIf(not with_cdo, "CDOs are not installed") def test_gridweights_01_lola(self): - fname = bt.get_file('test-t2m-u-v.nc') + fname = bt.get_file("test-t2m-u-v.nc") ds = psyd.open_dataset(fname) weights = ds.psy.t2m.psy.gridweights() ds.close() - ref = Cdo().gridweights(input=fname, returnArray='cell_weights') + ref = Cdo().gridweights(input=fname, returnArray="cell_weights") self.assertAlmostArrayEqual(weights, ref, atol=1e-7) - @unittest.skipIf(not with_cdo, 'CDOs are not installed') + @unittest.skipIf(not with_cdo, "CDOs are not installed") def test_gridweights_02_icon(self): - fname = bt.get_file('icon_test.nc') + fname = bt.get_file("icon_test.nc") ds = psyd.open_dataset(fname) weights = ds.psy.t2m.psy.gridweights() ds.close() - ref = Cdo().gridweights(input=fname, returnArray='cell_weights') + ref = Cdo().gridweights(input=fname, returnArray="cell_weights") self.assertAlmostArrayEqual(weights, ref) - @unittest.skipIf(not with_cdo, 'CDOs are not installed') - @unittest.skipIf(xr_version[:2] < (0, 9), 'xarray version too low') + @unittest.skipIf(not with_cdo, "CDOs are not installed") + @unittest.skipIf(xr_version[:2] < (0, 9), "xarray version too low") def test_fldmean_01_lola(self): from psyplot.project import Cdo - fname = bt.get_file('test-t2m-u-v.nc') + + fname = bt.get_file("test-t2m-u-v.nc") ds = psyd.open_dataset(fname) - psyd.rcParams['gridweights.use_cdo'] = True + psyd.rcParams["gridweights.use_cdo"] = True means = ds.psy.t2m.psy.fldmean().values - ref = Cdo().fldmean(input=fname, name='t2m')[0] + ref = Cdo().fldmean(input=fname, name="t2m")[0] self.assertAlmostArrayEqual(means, ref) # try it with the self defined gridweights - psyd.rcParams['gridweights.use_cdo'] = False + psyd.rcParams["gridweights.use_cdo"] = False means = ds.psy.t2m.psy.fldmean().values self.assertAlmostArrayEqual(means, ref, rtol=1e-5) ds.close() - @unittest.skipIf(not with_cdo, 'CDOs are not installed') - @unittest.skipIf(xr_version[:2] < (0, 9), 'xarray version too low') + @unittest.skipIf(not with_cdo, "CDOs are not installed") + @unittest.skipIf(xr_version[:2] < (0, 9), "xarray version too low") def test_fldmean_02_icon(self): from psyplot.project import Cdo - fname = bt.get_file('icon_test.nc') + + fname = bt.get_file("icon_test.nc") ds = psyd.open_dataset(fname) - psyd.rcParams['gridweights.use_cdo'] = True + psyd.rcParams["gridweights.use_cdo"] = True means = ds.psy.t2m.psy.fldmean().values - ref = Cdo().fldmean(input=fname, name='t2m')[0] + ref = Cdo().fldmean(input=fname, name="t2m")[0] self.assertAlmostArrayEqual(means, ref) ds.close() - @unittest.skipIf(not with_cdo, 'CDOs are not installed') - @unittest.skipIf(xr_version[:2] < (0, 9), 'xarray version too low') + @unittest.skipIf(not with_cdo, "CDOs are not installed") + @unittest.skipIf(xr_version[:2] < (0, 9), "xarray version too low") def test_fldstd_01_lola(self): from psyplot.project import Cdo - fname = bt.get_file('test-t2m-u-v.nc') + + fname = bt.get_file("test-t2m-u-v.nc") ds = psyd.open_dataset(fname) - psyd.rcParams['gridweights.use_cdo'] = True + psyd.rcParams["gridweights.use_cdo"] = True std = ds.psy.t2m.psy.fldstd(keepdims=True).values - ref = Cdo().fldstd(input=fname, returnArray='t2m') + ref = Cdo().fldstd(input=fname, returnArray="t2m") self.assertAlmostArrayEqual(std, ref) # try it with the self defined gridweights - psyd.rcParams['gridweights.use_cdo'] = False + psyd.rcParams["gridweights.use_cdo"] = False std = ds.psy.t2m.psy.fldstd(keepdims=True).values self.assertAlmostArrayEqual(std, ref, rtol=1e-3) ds.close() - @unittest.skipIf(not with_cdo, 'CDOs are not installed') - @unittest.skipIf(xr_version[:2] < (0, 9), 'xarray version too low') + @unittest.skipIf(not with_cdo, "CDOs are not installed") + @unittest.skipIf(xr_version[:2] < (0, 9), "xarray version too low") def test_fldstd_02_icon(self): from psyplot.project import Cdo - fname = bt.get_file('icon_test.nc') + + fname = bt.get_file("icon_test.nc") ds = psyd.open_dataset(fname) - psyd.rcParams['gridweights.use_cdo'] = True + psyd.rcParams["gridweights.use_cdo"] = True std = ds.psy.t2m.psy.fldstd().values ds.close() - ref = Cdo().fldstd(input=fname, name='t2m')[0] + ref = Cdo().fldstd(input=fname, name="t2m")[0] self.assertAlmostArrayEqual(std, ref) - @unittest.skipIf(not with_cdo, 'CDOs are not installed') - @unittest.skipIf(xr_version[:2] < (0, 9), 'xarray version too low') + @unittest.skipIf(not with_cdo, "CDOs are not installed") + @unittest.skipIf(xr_version[:2] < (0, 9), "xarray version too low") def test_fldpctl_01_lola(self): - fname = bt.get_file('test-t2m-u-v.nc') + fname = bt.get_file("test-t2m-u-v.nc") ds = psyd.open_dataset(fname) pctl = ds.psy.t2m.psy.fldpctl(5).values self.assertEqual(pctl.shape, ds.t2m.shape[:-2]) pctl = ds.psy.t2m.psy.fldpctl([5, 95]).values - self.assertEqual(pctl.shape, (2, ) + ds.t2m.shape[:-2]) - self.assertTrue((pctl[1] >= pctl[0]).all(), - msg=('95th percentile should always be greater or ' - 'equal than the 5th percentile! %s %s') % ( - pctl[0], pctl[1])) + self.assertEqual(pctl.shape, (2,) + ds.t2m.shape[:-2]) + self.assertTrue( + (pctl[1] >= pctl[0]).all(), + msg=( + "95th percentile should always be greater or " + "equal than the 5th percentile! %s %s" + ) + % (pctl[0], pctl[1]), + ) ds.close() - @unittest.skipIf(not with_cdo, 'CDOs are not installed') - @unittest.skipIf(xr_version[:2] < (0, 9), 'xarray version too low') + @unittest.skipIf(not with_cdo, "CDOs are not installed") + @unittest.skipIf(xr_version[:2] < (0, 9), "xarray version too low") def test_fldpctl_02_icon(self): - fname = bt.get_file('icon_test.nc') + fname = bt.get_file("icon_test.nc") ds = psyd.open_dataset(fname) pctl = ds.psy.t2m.psy.fldpctl(5).values self.assertEqual(pctl.shape, ds.t2m.shape[:-1]) pctl = ds.psy.t2m.psy.fldpctl([5, 95]).values - self.assertEqual(pctl.shape, (2, ) + ds.t2m.shape[:-1]) - self.assertTrue((pctl[1] >= pctl[0]).all(), - msg=('95th percentile should always be greater or ' - 'equal than the 5th percentile! %s %s') % ( - pctl[0], pctl[1])) + self.assertEqual(pctl.shape, (2,) + ds.t2m.shape[:-1]) + self.assertTrue( + (pctl[1] >= pctl[0]).all(), + msg=( + "95th percentile should always be greater or " + "equal than the 5th percentile! %s %s" + ) + % (pctl[0], pctl[1]), + ) ds.close() @@ -870,408 +929,570 @@ def tearDown(self): def test_setup_coords(self): """Set the :func:`psyplot.data.setup_coords` function""" - coords = {'first': [1, 2]} - self.assertEqual(psyd.setup_coords(second=3, **coords), - {'arr0': {'first': 1, 'second': 3}, - 'arr1': {'first': 2, 'second': 3}}) - self.assertEqual(psyd.setup_coords(dims=coords, second=3), - {'arr0': {'first': 1, 'second': 3}, - 'arr1': {'first': 2, 'second': 3}}) - coords['third'] = [1, 2, 3] + coords = {"first": [1, 2]} + self.assertEqual( + psyd.setup_coords(second=3, **coords), + { + "arr0": {"first": 1, "second": 3}, + "arr1": {"first": 2, "second": 3}, + }, + ) + self.assertEqual( + psyd.setup_coords(dims=coords, second=3), + { + "arr0": {"first": 1, "second": 3}, + "arr1": {"first": 2, "second": 3}, + }, + ) + coords["third"] = [1, 2, 3] # test sorting - ret = psyd.setup_coords(arr_names='test{}', second=3, - sort=['third', 'first'], **coords) - self.assertEqual(ret, { - 'test0': {'third': 1, 'first': 1, 'second': 3}, - 'test1': {'third': 1, 'first': 2, 'second': 3}, - 'test2': {'third': 2, 'first': 1, 'second': 3}, - 'test3': {'third': 2, 'first': 2, 'second': 3}, - 'test4': {'third': 3, 'first': 1, 'second': 3}, - 'test5': {'third': 3, 'first': 2, 'second': 3}}) + ret = psyd.setup_coords( + arr_names="test{}", second=3, sort=["third", "first"], **coords + ) + self.assertEqual( + ret, + { + "test0": {"third": 1, "first": 1, "second": 3}, + "test1": {"third": 1, "first": 2, "second": 3}, + "test2": {"third": 2, "first": 1, "second": 3}, + "test3": {"third": 2, "first": 2, "second": 3}, + "test4": {"third": 3, "first": 1, "second": 3}, + "test5": {"third": 3, "first": 2, "second": 3}, + }, + ) @property def _filter_test_ds(self): return xr.Dataset( - {'v0': xr.Variable(('ydim', 'xdim'), np.zeros((4, 4)), - attrs={'test': 1, 'test2': 1}), - 'v1': xr.Variable(('xdim', ), np.zeros(4), attrs={'test': 2, - 'test2': 2}), - 'v2': xr.Variable(('xdim', ), np.zeros(4), attrs={'test': 3, - 'test2': 3})}, - {'ydim': xr.Variable(('ydim', ), np.arange(1, 5)), - 'xdim': xr.Variable(('xdim', ), np.arange(4))}) + { + "v0": xr.Variable( + ("ydim", "xdim"), + np.zeros((4, 4)), + attrs={"test": 1, "test2": 1}, + ), + "v1": xr.Variable( + ("xdim",), np.zeros(4), attrs={"test": 2, "test2": 2} + ), + "v2": xr.Variable( + ("xdim",), np.zeros(4), attrs={"test": 3, "test2": 3} + ), + }, + { + "ydim": xr.Variable(("ydim",), np.arange(1, 5)), + "xdim": xr.Variable(("xdim",), np.arange(4)), + }, + ) def test_filter_1_name(self): """Test the filtering of the ArrayList""" ds = self._filter_test_ds - l = self.list_class.from_dataset(ds, ydim=0) - l.extend(self.list_class.from_dataset(ds, ydim=1, name='v0'), - new_name=True) + arrays = self.list_class.from_dataset(ds, ydim=0) + arrays.extend( + self.list_class.from_dataset(ds, ydim=1, name="v0"), new_name=True + ) # filter by name - self.assertEqual([arr.name for arr in l(name='v1')], - ['v1']) - self.assertEqual([arr.name for arr in l(name=['v1', 'v2'])], - ['v1', 'v2']) + self.assertEqual([arr.name for arr in arrays(name="v1")], ["v1"]) self.assertEqual( - [arr.psy.arr_name for arr in l( - arr_name=lambda name: name == 'arr1')], ['arr1']) + [arr.name for arr in arrays(name=["v1", "v2"])], ["v1", "v2"] + ) + self.assertEqual( + [ + arr.psy.arr_name + for arr in arrays(arr_name=lambda name: name == "arr1") + ], + ["arr1"], + ) def test_filter_2_arr_name(self): """Test the filtering of the ArrayList""" ds = self._filter_test_ds - l = self.list_class.from_dataset(ds, ydim=0) - l.extend(self.list_class.from_dataset(ds, ydim=1, name='v0'), - new_name=True) + arrays = self.list_class.from_dataset(ds, ydim=0) + arrays.extend( + self.list_class.from_dataset(ds, ydim=1, name="v0"), new_name=True + ) # fillter by array name - self.assertEqual([arr.psy.arr_name for arr in l(arr_name='arr1')], - ['arr1']) - self.assertEqual([arr.psy.arr_name for arr in l(arr_name=['arr1', - 'arr2'])], - ['arr1', 'arr2']) self.assertEqual( - [arr.psy.arr_name for arr in l( - name=lambda name: name == 'v1')], ['arr1']) + [arr.psy.arr_name for arr in arrays(arr_name="arr1")], ["arr1"] + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(arr_name=["arr1", "arr2"])], + ["arr1", "arr2"], + ) + self.assertEqual( + [ + arr.psy.arr_name + for arr in arrays(name=lambda name: name == "v1") + ], + ["arr1"], + ) def test_filter_3_attribute(self): """Test the filtering of the ArrayList""" ds = self._filter_test_ds - l = self.list_class.from_dataset(ds, ydim=0) - l.extend(self.list_class.from_dataset(ds, ydim=1, name='v0'), - new_name=True) + arrays = self.list_class.from_dataset(ds, ydim=0) + arrays.extend( + self.list_class.from_dataset(ds, ydim=1, name="v0"), new_name=True + ) # filter by attribute - self.assertEqual([arr.name for arr in l(test=2)], ['v1']) - self.assertEqual([arr.name for arr in l(test=[2, 3])], - ['v1', 'v2']) - self.assertEqual([arr.name for arr in l(test=[1, 2], test2=2)], - ['v1']) + self.assertEqual([arr.name for arr in arrays(test=2)], ["v1"]) self.assertEqual( - [arr.psy.arr_name for arr in l(test=lambda val: val == 2)], - ['arr1']) + [arr.name for arr in arrays(test=[2, 3])], ["v1", "v2"] + ) + self.assertEqual( + [arr.name for arr in arrays(test=[1, 2], test2=2)], ["v1"] + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(test=lambda val: val == 2)], + ["arr1"], + ) def test_filter_4_coord(self): """Test the filtering of the ArrayList""" ds = self._filter_test_ds - l = self.list_class.from_dataset(ds, ydim=0) - l.extend(self.list_class.from_dataset(ds, ydim=1, name='v0'), - new_name=True) + arrays = self.list_class.from_dataset(ds, ydim=0) + arrays.extend( + self.list_class.from_dataset(ds, ydim=1, name="v0"), new_name=True + ) # filter by coordinate - self.assertEqual([arr.psy.arr_name for arr in l(y=0)], ['arr0']) - self.assertEqual([arr.psy.arr_name for arr in l(y=1)], ['arr3']) - self.assertEqual([arr.psy.arr_name for arr in l(y=1, method='sel')], - ['arr0']) + self.assertEqual([arr.psy.arr_name for arr in arrays(y=0)], ["arr0"]) + self.assertEqual([arr.psy.arr_name for arr in arrays(y=1)], ["arr3"]) self.assertEqual( - [arr.psy.arr_name for arr in l(y=lambda val: val == 0)], ['arr0']) + [arr.psy.arr_name for arr in arrays(y=1, method="sel")], ["arr0"] + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(y=lambda val: val == 0)], + ["arr0"], + ) def test_filter_5_mixed(self): """Test the filtering of the ArrayList""" ds = self._filter_test_ds - l = self.list_class.from_dataset(ds, ydim=0) - l.extend(self.list_class.from_dataset(ds, ydim=1, name='v0'), - new_name=True) + arrays = self.list_class.from_dataset(ds, ydim=0) + arrays.extend( + self.list_class.from_dataset(ds, ydim=1, name="v0"), new_name=True + ) # mix criteria self.assertEqual( - [arr.psy.arr_name for arr in l(arr_name=['arr0', 'arr1'], test=1)], - ['arr0']) + [ + arr.psy.arr_name + for arr in arrays(arr_name=["arr0", "arr1"], test=1) + ], + ["arr0"], + ) def test_filter_6_ax(self): """Test the filtering of the ArrayList""" import matplotlib.pyplot as plt + from psyplot.plotter import Plotter + ds = self._filter_test_ds - l = self.list_class.from_dataset(ds, ydim=[0, 1], name='v0') + arrays = self.list_class.from_dataset(ds, ydim=[0, 1], name="v0") axes = plt.subplots(1, 2)[1] - for i, arr in enumerate(l): + for i, arr in enumerate(arrays): Plotter(arr, ax=axes[i]) # mix criteria self.assertEqual( - [arr.psy.arr_name for arr in l(ax=axes[0])], - [l[0].psy.arr_name]) + [arr.psy.arr_name for arr in arrays(ax=axes[0])], + [arrays[0].psy.arr_name], + ) self.assertEqual( - [arr.psy.arr_name for arr in l(ax=axes[1])], - [l[1].psy.arr_name]) + [arr.psy.arr_name for arr in arrays(ax=axes[1])], + [arrays[1].psy.arr_name], + ) def test_filter_7_fig(self): """Test the filtering of the ArrayList""" import matplotlib.pyplot as plt + from psyplot.plotter import Plotter + ds = self._filter_test_ds - l = self.list_class.from_dataset(ds, ydim=[0, 1], name='v0') + arrays = self.list_class.from_dataset(ds, ydim=[0, 1], name="v0") figs = [0, 0] axes = [0, 0] figs[0], axes[0] = plt.subplots() figs[1], axes[1] = plt.subplots() - for i, arr in enumerate(l): + for i, arr in enumerate(arrays): Plotter(arr, ax=axes[i]) # mix criteria self.assertEqual( - [arr.psy.arr_name for arr in l(fig=figs[0])], - [l[0].psy.arr_name]) + [arr.psy.arr_name for arr in arrays(fig=figs[0])], + [arrays[0].psy.arr_name], + ) self.assertEqual( - [arr.psy.arr_name for arr in l(fig=figs[1])], - [l[1].psy.arr_name]) + [arr.psy.arr_name for arr in arrays(fig=figs[1])], + [arrays[1].psy.arr_name], + ) def test_filter_8_fmts(self): - import matplotlib.pyplot as plt - from test_plotter import TestPlotter, SimpleFmt + from test_plotter import SimpleFmt, TestPlotter + ds = self._filter_test_ds - l = self.list_class.from_dataset(ds, ydim=[0, 1], name='v0') + arrays = self.list_class.from_dataset(ds, ydim=[0, 1], name="v0") class TestPlotter2(TestPlotter): + fmt_test = SimpleFmt("fmt_test") - fmt_test = SimpleFmt('fmt_test') - - TestPlotter(l[0]) - TestPlotter2(l[1]) + TestPlotter(arrays[0]) + TestPlotter2(arrays[1]) - self.assertEqual(l(fmts=['fmt1']).arr_names, l.arr_names) - self.assertEqual(l(fmts=['fmt_test']).arr_names, [l[1].psy.arr_name]) + self.assertEqual(arrays(fmts=["fmt1"]).arr_names, arrays.arr_names) + self.assertEqual( + arrays(fmts=["fmt_test"]).arr_names, [arrays[1].psy.arr_name] + ) def test_list_filter_1_name(self): """Test the filtering of InteractiveList by the variable name""" ds = self._filter_test_ds - l = self.list_class.from_dataset(ds, name='v1', ydim=[0, 1], - prefer_list=True) - l.extend(self.list_class.from_dataset(ds, name='v2', xdim=[0, 1], - prefer_list=True), new_name=True) - self.assertEqual([arr.psy.arr_name for arr in l(name='v1')], - ['arr0']) - self.assertEqual([arr.psy.arr_name for arr in l(name='v2')], - ['arr1']) - self.assertEqual( - [arr.psy.arr_name for arr in l(name=lambda n: n == 'v1')], - ['arr0']) + arrays = self.list_class.from_dataset( + ds, name="v1", ydim=[0, 1], prefer_list=True + ) + arrays.extend( + self.list_class.from_dataset( + ds, name="v2", xdim=[0, 1], prefer_list=True + ), + new_name=True, + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(name="v1")], ["arr0"] + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(name="v2")], ["arr1"] + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(name=lambda n: n == "v1")], + ["arr0"], + ) def test_list_filter_2_arr_name(self): """Test the filtering of InteractiveList by the array name""" ds = self._filter_test_ds - l = self.list_class.from_dataset(ds, name='v1', ydim=[0, 1], - prefer_list=True) - l.extend(self.list_class.from_dataset(ds, name='v2', xdim=[0, 1], - prefer_list=True), new_name=True) - self.assertEqual([arr.psy.arr_name for arr in l(arr_name='arr0')], - ['arr0']) - self.assertEqual([arr.psy.arr_name for arr in l(arr_name='arr1')], - ['arr1']) - self.assertEqual( - [arr.psy.arr_name for arr in l(arr_name=lambda an: an == 'arr0')], - ['arr0']) + arrays = self.list_class.from_dataset( + ds, name="v1", ydim=[0, 1], prefer_list=True + ) + arrays.extend( + self.list_class.from_dataset( + ds, name="v2", xdim=[0, 1], prefer_list=True + ), + new_name=True, + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(arr_name="arr0")], ["arr0"] + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(arr_name="arr1")], ["arr1"] + ) + self.assertEqual( + [ + arr.psy.arr_name + for arr in arrays(arr_name=lambda an: an == "arr0") + ], + ["arr0"], + ) def test_list_filter_3_attribute(self): """Test the filtering of InteractiveList by attribute""" ds = self._filter_test_ds - l = self.list_class.from_dataset(ds, name='v1', ydim=[0, 1], - prefer_list=True) - l.extend(self.list_class.from_dataset(ds, name='v2', xdim=[0, 1], - prefer_list=True), new_name=True) - self.assertEqual([arr.psy.arr_name for arr in l(test=2)], - ['arr0']) - self.assertEqual([arr.psy.arr_name for arr in l(test=3)], - ['arr1']) - self.assertEqual( - [arr.psy.arr_name for arr in l(test=lambda i: i == 2)], - ['arr0']) + arrays = self.list_class.from_dataset( + ds, name="v1", ydim=[0, 1], prefer_list=True + ) + arrays.extend( + self.list_class.from_dataset( + ds, name="v2", xdim=[0, 1], prefer_list=True + ), + new_name=True, + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(test=2)], ["arr0"] + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(test=3)], ["arr1"] + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(test=lambda i: i == 2)], + ["arr0"], + ) def test_list_filter_4_coord(self): """Test the filtering of InteractiveList by the coordinate""" ds = self._filter_test_ds - l = self.list_class.from_dataset(ds, name=['v1', 'v2'], xdim=0, - prefer_list=True) - l.extend( - self.list_class.from_dataset(ds, name=['v1', 'v2'], xdim=1, - prefer_list=True), new_name=True) - self.assertEqual([arr.psy.arr_name for arr in l(xdim=0)], - ['arr0']) - self.assertEqual([arr.psy.arr_name for arr in l(xdim=1)], - ['arr1']) - self.assertEqual([arr.psy.arr_name for arr in l(xdim=1, method='sel')], - ['arr1']) - self.assertEqual( - [arr.psy.arr_name for arr in l(xdim=lambda i: i == 0)], - ['arr0']) - self.assertEqual( - [arr.psy.arr_name for arr in l(xdim=lambda i: i == 1, - method='sel')], - ['arr1']) + arrays = self.list_class.from_dataset( + ds, name=["v1", "v2"], xdim=0, prefer_list=True + ) + arrays.extend( + self.list_class.from_dataset( + ds, name=["v1", "v2"], xdim=1, prefer_list=True + ), + new_name=True, + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(xdim=0)], ["arr0"] + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(xdim=1)], ["arr1"] + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(xdim=1, method="sel")], + ["arr1"], + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(xdim=lambda i: i == 0)], + ["arr0"], + ) + self.assertEqual( + [ + arr.psy.arr_name + for arr in arrays(xdim=lambda i: i == 1, method="sel") + ], + ["arr1"], + ) def test_list_filter_5_coord_list(self): - """Test the filtering of InteractiveList by the coordinate with a list - """ + """Test the filtering of InteractiveList by the coordinate with a list""" ds = self._filter_test_ds - l = self.list_class.from_dataset(ds, name='v0', ydim=[0, 1], - prefer_list=True) - l.extend( - self.list_class.from_dataset(ds, name='v0', ydim=[2, 3], - prefer_list=True), new_name=True) - self.assertEqual([arr.psy.arr_name for arr in l(ydim=[0, 1])], - ['arr0']) - self.assertEqual([arr.psy.arr_name for arr in l(ydim=[2, 3])], - ['arr1']) - self.assertEqual([arr.psy.arr_name for arr in l(ydim=[1, 2], - method='sel')], - ['arr0']) - self.assertEqual([arr.psy.arr_name for arr in l(ydim=[3, 4], - method='sel')], - ['arr1']) + arrays = self.list_class.from_dataset( + ds, name="v0", ydim=[0, 1], prefer_list=True + ) + arrays.extend( + self.list_class.from_dataset( + ds, name="v0", ydim=[2, 3], prefer_list=True + ), + new_name=True, + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(ydim=[0, 1])], ["arr0"] + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(ydim=[2, 3])], ["arr1"] + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(ydim=[1, 2], method="sel")], + ["arr0"], + ) + self.assertEqual( + [arr.psy.arr_name for arr in arrays(ydim=[3, 4], method="sel")], + ["arr1"], + ) def test_list_filter_6_mixed(self): """Test the filtering of InteractiveList by attribute""" ds = self._filter_test_ds - l = self.list_class.from_dataset(ds, name='v0', ydim=[0, 1], - prefer_list=True) - l.extend(self.list_class.from_dataset(ds, name='v0', ydim=[2, 3], - prefer_list=True), new_name=True) + arrays = self.list_class.from_dataset( + ds, name="v0", ydim=[0, 1], prefer_list=True + ) + arrays.extend( + self.list_class.from_dataset( + ds, name="v0", ydim=[2, 3], prefer_list=True + ), + new_name=True, + ) self.assertEqual( - [arr.psy.arr_name for arr in l(name='v0', ydim=[2, 3])], - ['arr1']) + [arr.psy.arr_name for arr in arrays(name="v0", ydim=[2, 3])], + ["arr1"], + ) @property def _from_dataset_test_variables(self): """The variables and coords needed for the from_dataset tests""" variables = { - # 3d-variable - 'v0': xr.Variable(('time', 'ydim', 'xdim'), np.zeros((4, 4, 4))), - # 2d-variable with time and x - 'v1': xr.Variable(('time', 'xdim', ), np.zeros((4, 4))), - # 2d-variable with y and x - 'v2': xr.Variable(('ydim', 'xdim', ), np.zeros((4, 4))), - # 1d-variable - 'v3': xr.Variable(('xdim', ), np.zeros(4))} + # 3d-variable + "v0": xr.Variable(("time", "ydim", "xdim"), np.zeros((4, 4, 4))), + # 2d-variable with time and x + "v1": xr.Variable( + ( + "time", + "xdim", + ), + np.zeros((4, 4)), + ), + # 2d-variable with y and x + "v2": xr.Variable( + ( + "ydim", + "xdim", + ), + np.zeros((4, 4)), + ), + # 1d-variable + "v3": xr.Variable(("xdim",), np.zeros(4)), + } coords = { - 'ydim': xr.Variable(('ydim', ), np.arange(1, 5)), - 'xdim': xr.Variable(('xdim', ), np.arange(4)), - 'time': xr.Variable( - ('time', ), - pd.date_range('1999-01-01', '1999-05-01', freq='M').values)} + "ydim": xr.Variable(("ydim",), np.arange(1, 5)), + "xdim": xr.Variable(("xdim",), np.arange(4)), + "time": xr.Variable( + ("time",), + pd.date_range("1999-01-01", "1999-05-01", freq="M").values, + ), + } return variables, coords def test_from_dataset_01_basic(self): """test creation without any additional information""" variables, coords = self._from_dataset_test_variables ds = xr.Dataset(variables, coords) - l = self.list_class.from_dataset(ds) - self.assertEqual(len(l), 4) - self.assertEqual(set(l.names), set(variables)) - for arr in l: - self.assertEqual(arr.dims, variables[arr.name].dims, - msg="Wrong dimensions for variable " + arr.name) - self.assertEqual(arr.shape, variables[arr.name].shape, - msg="Wrong shape for variable " + arr.name) + arrays = self.list_class.from_dataset(ds) + self.assertEqual(len(arrays), 4) + self.assertEqual(set(arrays.names), set(variables)) + for arr in arrays: + self.assertEqual( + arr.dims, + variables[arr.name].dims, + msg="Wrong dimensions for variable " + arr.name, + ) + self.assertEqual( + arr.shape, + variables[arr.name].shape, + msg="Wrong shape for variable " + arr.name, + ) def test_from_dataset_02_name(self): """Test the from_dataset creation method with selected names""" variables, coords = self._from_dataset_test_variables ds = xr.Dataset(variables, coords) - l = self.list_class.from_dataset(ds, name="v2") - self.assertEqual(len(l), 1) - self.assertEqual(set(l.names), {"v2"}) - for arr in l: - self.assertEqual(arr.dims, variables[arr.name].dims, - msg="Wrong dimensions for variable " + arr.name) - self.assertEqual(arr.shape, variables[arr.name].shape, - msg="Wrong shape for variable " + arr.name) + arrays = self.list_class.from_dataset(ds, name="v2") + self.assertEqual(len(arrays), 1) + self.assertEqual(set(arrays.names), {"v2"}) + for arr in arrays: + self.assertEqual( + arr.dims, + variables[arr.name].dims, + msg="Wrong dimensions for variable " + arr.name, + ) + self.assertEqual( + arr.shape, + variables[arr.name].shape, + msg="Wrong shape for variable " + arr.name, + ) def test_from_dataset_03_simple_selection(self): """Test the from_dataset creation method with x- and t-selection""" variables, coords = self._from_dataset_test_variables ds = xr.Dataset(variables, coords) - l = self.list_class.from_dataset(ds, x=0, t=0) - self.assertEqual(len(l), 4) - self.assertEqual(set(l.names), set(variables)) - for arr in l: - self.assertEqual(arr.xdim.ndim, 0, - msg="Wrong x dimension for " + arr.name) - if 'time' in arr.dims: - self.assertEqual(arr.time, coords['time'], - msg="Wrong time dimension for " + arr.name) + arrays = self.list_class.from_dataset(ds, x=0, t=0) + self.assertEqual(len(arrays), 4) + self.assertEqual(set(arrays.names), set(variables)) + for arr in arrays: + self.assertEqual( + arr.xdim.ndim, 0, msg="Wrong x dimension for " + arr.name + ) + if "time" in arr.dims: + self.assertEqual( + arr.time, + coords["time"], + msg="Wrong time dimension for " + arr.name, + ) def test_from_dataset_04_exact_selection(self): """Test the from_dataset creation method with selected names""" variables, coords = self._from_dataset_test_variables ds = xr.Dataset(variables, coords) - l = self.list_class.from_dataset(ds, ydim=2, method=None, - name=['v0', 'v2']) - self.assertEqual(len(l), 2) - self.assertEqual(set(l.names), {'v0', 'v2'}) - for arr in l: - self.assertEqual(arr.ydim, 2, - msg="Wrong ydim slice for " + arr.name) + arrays = self.list_class.from_dataset( + ds, ydim=2, method=None, name=["v0", "v2"] + ) + self.assertEqual(len(arrays), 2) + self.assertEqual(set(arrays.names), {"v0", "v2"}) + for arr in arrays: + self.assertEqual( + arr.ydim, 2, msg="Wrong ydim slice for " + arr.name + ) def test_from_dataset_05_exact_array_selection(self): """Test the from_dataset creation method with selected names""" variables, coords = self._from_dataset_test_variables ds = xr.Dataset(variables, coords) - l = self.list_class.from_dataset(ds, ydim=[[2, 3]], method=None, - name=['v0', 'v2']) - self.assertEqual(len(l), 2) - self.assertEqual(set(l.names), {'v0', 'v2'}) - for arr in l: - self.assertEqual(arr.ydim.values.tolist(), [2, 3], - msg="Wrong ydim slice for " + arr.name) + arrays = self.list_class.from_dataset( + ds, ydim=[[2, 3]], method=None, name=["v0", "v2"] + ) + self.assertEqual(len(arrays), 2) + self.assertEqual(set(arrays.names), {"v0", "v2"}) + for arr in arrays: + self.assertEqual( + arr.ydim.values.tolist(), + [2, 3], + msg="Wrong ydim slice for " + arr.name, + ) def test_from_dataset_06_nearest_selection(self): """Test the from_dataset creation method with selected names""" variables, coords = self._from_dataset_test_variables ds = xr.Dataset(variables, coords) - l = self.list_class.from_dataset(ds, ydim=1.7, method='nearest', - name=['v0', 'v2']) - self.assertEqual(len(l), 2) - self.assertEqual(set(l.names), {'v0', 'v2'}) - for arr in l: - self.assertEqual(arr.ydim, 2, - msg="Wrong ydim slice for " + arr.name) + arrays = self.list_class.from_dataset( + ds, ydim=1.7, method="nearest", name=["v0", "v2"] + ) + self.assertEqual(len(arrays), 2) + self.assertEqual(set(arrays.names), {"v0", "v2"}) + for arr in arrays: + self.assertEqual( + arr.ydim, 2, msg="Wrong ydim slice for " + arr.name + ) def test_from_dataset_07_time_selection(self): """Test the from_dataset creation method with selected names""" variables, coords = self._from_dataset_test_variables ds = xr.Dataset(variables, coords) - l = self.list_class.from_dataset(ds, t='1999-02-28', method=None, - name=['v0', 'v1']) - self.assertEqual(len(l), 2) - self.assertEqual(set(l.names), {'v0', 'v1'}) - for arr in l: - self.assertEqual(arr.time, coords['time'][1], - msg="Wrong time slice for " + arr.name) + arrays = self.list_class.from_dataset( + ds, t="1999-02-28", method=None, name=["v0", "v1"] + ) + self.assertEqual(len(arrays), 2) + self.assertEqual(set(arrays.names), {"v0", "v1"}) + for arr in arrays: + self.assertEqual( + arr.time, + coords["time"][1], + msg="Wrong time slice for " + arr.name, + ) def test_from_dataset_08_time_array_selection(self): """Test the from_dataset creation method with selected names""" variables, coords = self._from_dataset_test_variables ds = xr.Dataset(variables, coords) # test with array of time - l = self.list_class.from_dataset(ds, t=[coords['time'][1:3]], - method=None, name=['v0', 'v1']) - self.assertEqual(len(l), 2) - self.assertEqual(set(l.names), {'v0', 'v1'}) - for arr in l: - self.assertEqual(arr.time.values.tolist(), - coords['time'][1:3].values.tolist(), - msg="Wrong time slice for " + arr.name) + arrays = self.list_class.from_dataset( + ds, t=[coords["time"][1:3]], method=None, name=["v0", "v1"] + ) + self.assertEqual(len(arrays), 2) + self.assertEqual(set(arrays.names), {"v0", "v1"}) + for arr in arrays: + self.assertEqual( + arr.time.values.tolist(), + coords["time"][1:3].values.tolist(), + msg="Wrong time slice for " + arr.name, + ) def test_from_dataset_09_nearest_time_selection(self): """Test the from_dataset creation method with selected names""" variables, coords = self._from_dataset_test_variables ds = xr.Dataset(variables, coords) - l = self.list_class.from_dataset(ds, t='1999-02-20', method='nearest', - name=['v0', 'v1']) - self.assertEqual(len(l), 2) - self.assertEqual(set(l.names), {'v0', 'v1'}) - for arr in l: - self.assertEqual(arr.time, coords['time'][1], - msg="Wrong time slice for " + arr.name) + arrays = self.list_class.from_dataset( + ds, t="1999-02-20", method="nearest", name=["v0", "v1"] + ) + self.assertEqual(len(arrays), 2) + self.assertEqual(set(arrays.names), {"v0", "v1"}) + for arr in arrays: + self.assertEqual( + arr.time, + coords["time"][1], + msg="Wrong time slice for " + arr.name, + ) def test_from_dataset_10_2_vars(self): """Test the creation of arrays out of two variables""" variables, coords = self._from_dataset_test_variables - variables['v4'] = variables['v3'].copy() + variables["v4"] = variables["v3"].copy() ds = xr.Dataset(variables, coords) - l = self.list_class.from_dataset(ds, name=[['v3', 'v4'], 'v2'], - xdim=[[2]], squeeze=False) - self.assertEqual(len(l), 2) - self.assertIn('variable', l[0].dims) - self.assertEqual(l[0].coords['variable'].values.tolist(), ['v3', 'v4']) - self.assertEqual(l[0].ndim, 2) + arrays = self.list_class.from_dataset( + ds, name=[["v3", "v4"], "v2"], xdim=[[2]], squeeze=False + ) + self.assertEqual(len(arrays), 2) + self.assertIn("variable", arrays[0].dims) + self.assertEqual( + arrays[0].coords["variable"].values.tolist(), ["v3", "v4"] + ) + self.assertEqual(arrays[0].ndim, 2) - self.assertEqual(l[1].name, 'v2') - self.assertEqual(l[1].ndim, variables['v2'].ndim) + self.assertEqual(arrays[1].name, "v2") + self.assertEqual(arrays[1].ndim, variables["v2"].ndim) def test_from_dataset_11_list(self): """Test the creation of a list of InteractiveLists""" @@ -1280,33 +1501,35 @@ def test_from_dataset_11_list(self): # Create two lists, each containing two arrays of variables v1 and v2. # In the first list, the xdim dimensions are 0 and 1. # In the second, the xdim dimensions are both 2 - l = self.list_class.from_dataset( - ds, name=[['v1', 'v2']], xdim=[[0, 1], 2], prefer_list=True) - - self.assertEqual(len(l), 2) - self.assertIsInstance(l[0], psyd.InteractiveList) - self.assertIsInstance(l[1], psyd.InteractiveList) - self.assertEqual(len(l[0]), 2) - self.assertEqual(len(l[1]), 2) - self.assertEqual(l[0][0].xdim, 0) - self.assertEqual(l[0][1].xdim, 1) - self.assertEqual(l[1][0].xdim, 2) - self.assertEqual(l[1][1].xdim, 2) + arrays = self.list_class.from_dataset( + ds, name=[["v1", "v2"]], xdim=[[0, 1], 2], prefer_list=True + ) + + self.assertEqual(len(arrays), 2) + self.assertIsInstance(arrays[0], psyd.InteractiveList) + self.assertIsInstance(arrays[1], psyd.InteractiveList) + self.assertEqual(len(arrays[0]), 2) + self.assertEqual(len(arrays[1]), 2) + self.assertEqual(arrays[0][0].xdim, 0) + self.assertEqual(arrays[0][1].xdim, 1) + self.assertEqual(arrays[1][0].xdim, 2) + self.assertEqual(arrays[1][1].xdim, 2) def test_from_dataset_12_list_and_2_vars(self): """Test the creation of a list of Interactive lists with one array out of 2 variables""" variables, coords = self._from_dataset_test_variables - variables['v4'] = variables['v3'].copy() + variables["v4"] = variables["v3"].copy() ds = xr.Dataset(variables, coords) - l = ds.psy.create_list( - ds, name=[['v1', ['v3', 'v4']], ['v1', 'v2']], prefer_list=True) + arrays = ds.psy.create_list( + ds, name=[["v1", ["v3", "v4"]], ["v1", "v2"]], prefer_list=True + ) - self.assertEqual(len(l), 2) - self.assertIsInstance(l[0], psyd.InteractiveList) - self.assertIsInstance(l[1], psyd.InteractiveList) - self.assertEqual(len(l[0]), 2) - self.assertEqual(len(l[1]), 2) + self.assertEqual(len(arrays), 2) + self.assertIsInstance(arrays[0], psyd.InteractiveList) + self.assertIsInstance(arrays[1], psyd.InteractiveList) + self.assertEqual(len(arrays[0]), 2) + self.assertEqual(len(arrays[1]), 2) def test_from_dataset_13_decoder_class(self): ds = xr.Dataset(*self._from_dataset_test_variables) @@ -1314,8 +1537,8 @@ def test_from_dataset_13_decoder_class(self): class MyDecoder(psyd.CFDecoder): pass - l = self.list_class.from_dataset(ds, name="v2", decoder=MyDecoder) - self.assertIsInstance(l[0].psy.decoder, MyDecoder) + arrays = self.list_class.from_dataset(ds, name="v2", decoder=MyDecoder) + self.assertIsInstance(arrays[0].psy.decoder, MyDecoder) def test_from_dataset_14_decoder_instance(self): ds = xr.Dataset(*self._from_dataset_test_variables) @@ -1325,123 +1548,206 @@ class MyDecoder(psyd.CFDecoder): decoder = MyDecoder(ds) - l = self.list_class.from_dataset(ds, name="v2", decoder=decoder) - self.assertIs(l[0].psy.decoder, decoder) + arrays = self.list_class.from_dataset(ds, name="v2", decoder=decoder) + self.assertIs(arrays[0].psy.decoder, decoder) def test_from_dataset_15_decoder_kws(self): ds = xr.Dataset(*self._from_dataset_test_variables) - l = self.list_class.from_dataset(ds, name="v2", - decoder=dict(x={'myx'})) - self.assertEqual(l[0].psy.decoder.x, {'myx'}) + arrays = self.list_class.from_dataset( + ds, name="v2", decoder=dict(x={"myx"}) + ) + self.assertEqual(arrays[0].psy.decoder.x, {"myx"}) def test_from_dataset_16_default_slice(self): """Test selection with default_slice=0""" variables, coords = self._from_dataset_test_variables ds = xr.Dataset(variables, coords) - l = self.list_class.from_dataset(ds, ydim=2, default_slice=0, method=None, - name=['v0', 'v2']) - self.assertEqual(len(l), 2) - self.assertEqual(set(l.names), {'v0', 'v2'}) - for arr in l: - self.assertEqual(arr.ydim, 2, - msg="Wrong ydim slice for " + arr.name) - + arrays = self.list_class.from_dataset( + ds, ydim=2, default_slice=0, method=None, name=["v0", "v2"] + ) + self.assertEqual(len(arrays), 2) + self.assertEqual(set(arrays.names), {"v0", "v2"}) + for arr in arrays: + self.assertEqual( + arr.ydim, 2, msg="Wrong ydim slice for " + arr.name + ) def test_array_info(self): variables, coords = self._from_dataset_test_variables - variables['v4'] = variables['v3'].copy() + variables["v4"] = variables["v3"].copy() ds = xr.Dataset(variables, coords) - fname = osp.relpath(bt.get_file('test-t2m-u-v.nc'), '.') + fname = osp.relpath(bt.get_file("test-t2m-u-v.nc"), ".") ds2 = xr.open_dataset(fname) - l = ds.psy.create_list( - name=[['v1', ['v3', 'v4']], ['v1', 'v2']], prefer_list=True) - l.extend(ds2.psy.create_list(name=['t2m'], x=0, t=1), - new_name=True) + arrays = ds.psy.create_list( + name=[["v1", ["v3", "v4"]], ["v1", "v2"]], prefer_list=True + ) + arrays.extend( + ds2.psy.create_list(name=["t2m"], x=0, t=1), new_name=True + ) if xr_version < (0, 17): - nc_store = ('xarray.backends.netCDF4_', 'NetCDF4DataStore') + nc_store = ("xarray.backends.netCDF4_", "NetCDF4DataStore") else: nc_store = (None, None) - self.assertEqual(l.array_info(engine='netCDF4'), OrderedDict([ - # first list contating an array with two variables - ('arr0', OrderedDict([ - ('arr0', {'dims': {'t': slice(None), 'x': slice(None)}, - 'attrs': OrderedDict(), 'store': (None, None), - 'name': 'v1', 'fname': None}), - ('arr1', {'dims': {'y': slice(None)}, - 'attrs': OrderedDict(), 'store': (None, None), - 'name': [['v3', 'v4']], 'fname': None}), - ('attrs', OrderedDict())])), - # second list with two arrays containing each one variable - ('arr1', OrderedDict([ - ('arr0', {'dims': {'t': slice(None), 'x': slice(None)}, - 'attrs': OrderedDict(), 'store': (None, None), - 'name': 'v1', 'fname': None}), - ('arr1', {'dims': {'y': slice(None), 'x': slice(None)}, - 'attrs': OrderedDict(), 'store': (None, None), - 'name': 'v2', 'fname': None}), - ('attrs', OrderedDict())])), - # last array from real dataset - ('arr2', {'dims': {'z': slice(None), 'y': slice(None), - 't': 1, 'x': 0}, - 'attrs': ds2.t2m.attrs, - 'store': nc_store, - 'name': 't2m', 'fname': fname}), - ('attrs', OrderedDict())])) - return l + self.assertEqual( + arrays.array_info(engine="netCDF4"), + dict( + [ + # first list contating an array with two variables + ( + "arr0", + dict( + [ + ( + "arr0", + { + "dims": { + "t": slice(None), + "x": slice(None), + }, + "attrs": dict(), + "store": (None, None), + "name": "v1", + "fname": None, + }, + ), + ( + "arr1", + { + "dims": {"y": slice(None)}, + "attrs": dict(), + "store": (None, None), + "name": [["v3", "v4"]], + "fname": None, + }, + ), + ("attrs", dict()), + ] + ), + ), + # second list with two arrays containing each one variable + ( + "arr1", + dict( + [ + ( + "arr0", + { + "dims": { + "t": slice(None), + "x": slice(None), + }, + "attrs": dict(), + "store": (None, None), + "name": "v1", + "fname": None, + }, + ), + ( + "arr1", + { + "dims": { + "y": slice(None), + "x": slice(None), + }, + "attrs": dict(), + "store": (None, None), + "name": "v2", + "fname": None, + }, + ), + ("attrs", dict()), + ] + ), + ), + # last array from real dataset + ( + "arr2", + { + "dims": { + "z": slice(None), + "y": slice(None), + "t": 1, + "x": 0, + }, + "attrs": ds2.t2m.attrs, + "store": nc_store, + "name": "t2m", + "fname": fname, + }, + ), + ("attrs", dict()), + ] + ), + ) + return arrays def test_from_dict_01(self): """Test the creation from a dictionary""" - l = self.test_array_info() - d = l.array_info(engine='netCDF4') - self.assertEqual(self.list_class.from_dict(d).array_info(), - l[-1:].array_info()) - d = l.array_info(ds_description={'ds'}) - self.assertEqual(self.list_class.from_dict(d).array_info(), - l.array_info()) + arrays = self.test_array_info() + d = arrays.array_info(engine="netCDF4") + self.assertEqual( + self.list_class.from_dict(d).array_info(), arrays[-1:].array_info() + ) + d = arrays.array_info(ds_description={"ds"}) + self.assertEqual( + self.list_class.from_dict(d).array_info(), arrays.array_info() + ) def test_from_dict_02_only(self): """Test the only keyword""" - l = self.test_array_info() - d = l.array_info(ds_description={'ds'}) + arrays = self.test_array_info() + d = arrays.array_info(ds_description={"ds"}) # test to use only the first 2 - self.assertEqual(self.list_class.from_dict( - d, only=l.arr_names[1:]).array_info(), - l[1:].array_info()) + self.assertEqual( + self.list_class.from_dict( + d, only=arrays.arr_names[1:] + ).array_info(), + arrays[1:].array_info(), + ) # test to a pattern - self.assertEqual(self.list_class.from_dict( - d, only='|'.join(l.arr_names[1:])).array_info(), - l[1:].array_info()) + self.assertEqual( + self.list_class.from_dict( + d, only="|".join(arrays.arr_names[1:]) + ).array_info(), + arrays[1:].array_info(), + ) # test to a function self.assertEqual( self.list_class.from_dict( - d, only=lambda n, info: ( - n in l.arr_names[1:] and 'name' not in 'info') - ).array_info(), - l[1:].array_info()) + d, + only=lambda n, info: ( + n in arrays.arr_names[1:] and "name" not in "info" + ), + ).array_info(), + arrays[1:].array_info(), + ) def test_from_dict_03_mfdataset(self): """Test opening a multifile dataset""" ds = xr.Dataset(*self._from_dataset_test_variables) ds1 = ds.isel(time=slice(0, 2)) ds2 = ds.isel(time=slice(2, None)) - fname1 = tempfile.NamedTemporaryFile(suffix='.nc', - prefix='tmp_psyplot_').name + fname1 = tempfile.NamedTemporaryFile( + suffix=".nc", prefix="tmp_psyplot_" + ).name ds1.to_netcdf(fname1) self._created_files.add(fname1) - fname2 = tempfile.NamedTemporaryFile(suffix='.nc', - prefix='tmp_psyplot_').name + fname2 = tempfile.NamedTemporaryFile( + suffix=".nc", prefix="tmp_psyplot_" + ).name ds2.to_netcdf(fname2) self._created_files.add(fname2) # now open the mfdataset ds = psyd.open_mfdataset([fname1, fname2]) - l = self.list_class.from_dataset(ds, name=['v0'], time=[0, 3]) + arrays = self.list_class.from_dataset(ds, name=["v0"], time=[0, 3]) if xr_version >= (0, 18): ds.psy.filename = [fname1, fname2] self.assertEqual( - self.list_class.from_dict(l.array_info()).array_info(), - l.array_info()) + self.list_class.from_dict(arrays.array_info()).array_info(), + arrays.array_info(), + ) ds.close() def test_from_dict_04_concat_dim(self): @@ -1449,28 +1755,33 @@ def test_from_dict_04_concat_dim(self): ds = xr.Dataset(*self._from_dataset_test_variables) ds1 = ds.isel(time=0) ds2 = ds.isel(time=1) - fname1 = tempfile.NamedTemporaryFile(suffix='.nc', - prefix='tmp_psyplot_').name + fname1 = tempfile.NamedTemporaryFile( + suffix=".nc", prefix="tmp_psyplot_" + ).name ds1.to_netcdf(fname1) self._created_files.add(fname1) - fname2 = tempfile.NamedTemporaryFile(suffix='.nc', - prefix='tmp_psyplot_').name + fname2 = tempfile.NamedTemporaryFile( + suffix=".nc", prefix="tmp_psyplot_" + ).name ds2.to_netcdf(fname2) self._created_files.add(fname2) # now open the mfdataset - ds = psyd.open_mfdataset([fname1, fname2], concat_dim='time', - combine='nested') - l = self.list_class.from_dataset(ds, name=['v0'], time=[0, 1]) + ds = psyd.open_mfdataset( + [fname1, fname2], concat_dim="time", combine="nested" + ) + arrays = self.list_class.from_dataset(ds, name=["v0"], time=[0, 1]) self.assertEqual( - self.list_class.from_dict(l.array_info()).array_info(), - l.array_info()) + self.list_class.from_dict(arrays.array_info()).array_info(), + arrays.array_info(), + ) def test_logger(self): """Test whether one can access the logger""" import logging - l = self.test_array_info() - self.assertIsInstance(l.logger, logging.Logger) + + arrays = self.test_array_info() + self.assertIsInstance(arrays.logger, logging.Logger) class TestInteractiveList(TestArrayList): @@ -1480,27 +1791,37 @@ class TestInteractiveList(TestArrayList): def test_to_dataframe(self): variables, coords = self._from_dataset_test_variables - variables['v1'][:] = np.arange(variables['v1'].size).reshape( - variables['v1'].shape) + variables["v1"][:] = np.arange(variables["v1"].size).reshape( + variables["v1"].shape + ) ds = xr.Dataset(variables, coords) - l = psyd.InteractiveList.from_dataset(ds, name='v1', t=[0, 1]) - l.extend(psyd.InteractiveList.from_dataset(ds, name='v1', t=2, - x=slice(1, 3)), - new_name=True) - self.assertEqual(len(l), 3) - self.assertTrue(all(arr.ndim == 1 for arr in l), msg=l) - df = l.to_dataframe() + arrays = psyd.InteractiveList.from_dataset(ds, name="v1", t=[0, 1]) + arrays.extend( + psyd.InteractiveList.from_dataset( + ds, name="v1", t=2, x=slice(1, 3) + ), + new_name=True, + ) + self.assertEqual(len(arrays), 3) + self.assertTrue(all(arr.ndim == 1 for arr in arrays), msg=arrays) + df = arrays.to_dataframe() self.assertEqual(df.shape, (ds.xdim.size, 3)) self.assertEqual(df.index.values.tolist(), ds.xdim.values.tolist()) - self.assertEqual(df[l[0].psy.arr_name].values.tolist(), - ds.v1[0].values.tolist()) - self.assertEqual(df[l[1].psy.arr_name].values.tolist(), - ds.v1[1].values.tolist()) - self.assertEqual(df[l[2].psy.arr_name].notnull().sum(), 2) self.assertEqual( - df[l[2].psy.arr_name].values[ - df[l[2].psy.arr_name].notnull().values].tolist(), - ds.v1[2, 1:3].values.tolist()) + df[arrays[0].psy.arr_name].values.tolist(), + ds.v1[0].values.tolist(), + ) + self.assertEqual( + df[arrays[1].psy.arr_name].values.tolist(), + ds.v1[1].values.tolist(), + ) + self.assertEqual(df[arrays[2].psy.arr_name].notnull().sum(), 2) + self.assertEqual( + df[arrays[2].psy.arr_name] + .values[df[arrays[2].psy.arr_name].notnull().values] + .tolist(), + ds.v1[2, 1:3].values.tolist(), + ) class AbsoluteTimeTest(unittest.TestCase, AlmostArrayEqualMixin): @@ -1521,29 +1842,41 @@ def tearDown(self): @property def _test_ds(self): - import xarray as xr import pandas as pd - time = xr.Variable('time', pd.to_datetime( - ['1979-01-01T12:00:00', '1979-01-01T18:00:00', - '1979-01-01T18:30:00']), - encoding={'units': 'day as %Y%m%d.%f'}) - var = xr.Variable(('time', 'x'), np.zeros((len(time), 5))) - return xr.Dataset({'test': var}, {'time': time}) + import xarray as xr + + time = xr.Variable( + "time", + pd.to_datetime( + [ + "1979-01-01T12:00:00", + "1979-01-01T18:00:00", + "1979-01-01T18:30:00", + ] + ), + encoding={"units": "day as %Y%m%d.%f"}, + ) + var = xr.Variable(("time", "x"), np.zeros((len(time), 5))) + return xr.Dataset({"test": var}, {"time": time}) def test_to_netcdf(self): """Test whether the data is stored correctly""" import netCDF4 as nc + ds = self._test_ds - fname = tempfile.NamedTemporaryFile(suffix='.nc', - prefix='tmp_psyplot_').name + fname = tempfile.NamedTemporaryFile( + suffix=".nc", prefix="tmp_psyplot_" + ).name self._created_files.add(fname) psyd.to_netcdf(ds, fname) with nc.Dataset(fname) as nco: self.assertAlmostArrayEqual( - nco.variables['time'][:], [19790101.5, 19790101.75, - 19790101.75 + 30.0 / (24.0 * 60.)], - rtol=0, atol=1e-5) - self.assertEqual(nco.variables['time'].units, 'day as %Y%m%d.%f') + nco.variables["time"][:], + [19790101.5, 19790101.75, 19790101.75 + 30.0 / (24.0 * 60.0)], + rtol=0, + atol=1e-5, + ) + self.assertEqual(nco.variables["time"].units, "day as %Y%m%d.%f") return fname def test_open_dataset(self): @@ -1552,7 +1885,8 @@ def test_open_dataset(self): ds = psyd.open_dataset(fname) self.assertEqual( pd.to_datetime(ds.time.values).tolist(), - pd.to_datetime(ref_ds.time.values).tolist()) + pd.to_datetime(ref_ds.time.values).tolist(), + ) class FilenamesTest(unittest.TestCase): @@ -1560,10 +1894,11 @@ class FilenamesTest(unittest.TestCase): @property def fname(self): - return osp.join(osp.dirname(__file__), 'test-t2m-u-v.nc') + return osp.join(osp.dirname(__file__), "test-t2m-u-v.nc") def _test_engine(self, engine): from importlib import import_module + fname = self.fname ds = psyd.open_dataset(fname, engine=engine).load() self.assertEqual(ds.psy.filename, fname) @@ -1578,10 +1913,10 @@ def _test_engine(self, engine): ds2.close() ds.psy.filename = None dumped_fname, dumped_store_mod, dumped_store = psyd.get_filename_ds( - ds, dump=True, engine=engine, paths=True) + ds, dump=True, engine=engine, paths=True + ) self.assertTrue(dumped_fname) - self.assertTrue(osp.exists(dumped_fname), - msg='Missing %s' % fname) + self.assertTrue(osp.exists(dumped_fname), msg="Missing %s" % fname) self.assertEqual(dumped_store_mod, store_mod) self.assertEqual(dumped_store, store) ds.close() @@ -1589,30 +1924,30 @@ def _test_engine(self, engine): os.remove(dumped_fname) dumped_fname, dumped_store_mod, dumped_store = psyd.get_filename_ds( - ds, dump=True, engine=engine, paths=dumped_fname) + ds, dump=True, engine=engine, paths=dumped_fname + ) self.assertTrue(dumped_fname) - self.assertTrue(osp.exists(dumped_fname), - msg='Missing %s' % fname) + self.assertTrue(osp.exists(dumped_fname), msg="Missing %s" % fname) self.assertEqual(dumped_store_mod, store_mod) self.assertEqual(dumped_store, store) ds.close() os.remove(dumped_fname) @unittest.skipIf(xr_version >= (0, 17), "Not supported for xarray>=0.18") - @unittest.skipIf(not with_nio, 'Nio module not installed') + @unittest.skipIf(not with_nio, "Nio module not installed") def test_nio(self): - self._test_engine('pynio') + self._test_engine("pynio") @unittest.skipIf(xr_version >= (0, 17), "Not supported for xarray>=0.18") - @unittest.skipIf(not with_netcdf4, 'netCDF4 module not installed') + @unittest.skipIf(not with_netcdf4, "netCDF4 module not installed") def test_netcdf4(self): - self._test_engine('netcdf4') + self._test_engine("netcdf4") @unittest.skipIf(xr_version >= (0, 17), "Not supported for xarray>=0.18") - @unittest.skipIf(not with_scipy, 'scipy module not installed') + @unittest.skipIf(not with_scipy, "scipy module not installed") def test_scipy(self): - self._test_engine('scipy') + self._test_engine("scipy") -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_gdalstore.py b/tests/test_gdalstore.py index f1463b4..bb1fe74 100644 --- a/tests/test_gdalstore.py +++ b/tests/test_gdalstore.py @@ -1,32 +1,19 @@ """Module to test the :mod:`psyplot.gdal_store` module.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only import unittest + import _base_testing as bt import pandas as pd + import psyplot.data as psyd + try: import gdal except ImportError: @@ -39,24 +26,30 @@ class TestGdalStore(unittest.TestCase): @unittest.skipIf(not gdal, "GDAL module not installed") def test_open_geotiff(self): """Test to open a GeoTiff file""" - ds_ref = psyd.open_dataset(bt.get_file('test-t2m-u-v.nc')) - ds_tiff = psyd.open_dataset(bt.get_file( - 'test-t2m-1979-01-31T18-00-00.tif'), engine='gdal') + ds_ref = psyd.open_dataset(bt.get_file("test-t2m-u-v.nc")) + ds_tiff = psyd.open_dataset( + bt.get_file("test-t2m-1979-01-31T18-00-00.tif"), engine="gdal" + ) self.assertListEqual( ds_tiff.Band1.values.tolist(), - ds_ref.isel(time=0, lev=0).t2m.values.tolist()) + ds_ref.isel(time=0, lev=0).t2m.values.tolist(), + ) @unittest.skipIf(not gdal, "GDAL module not installed") def test_open_mf_geotiff(self): """Test to open multiple GeoTiff files and extract the time from the file name""" - ds_ref = psyd.open_dataset(bt.get_file('test-t2m-u-v.nc')) - ds_tiff = psyd.open_mfdataset(bt.get_file('test-t2m-*.tif'), - engine='gdal', - t_format='test-t2m-%Y-%m-%dT%H-%M-%S') + ds_ref = psyd.open_dataset(bt.get_file("test-t2m-u-v.nc")) + ds_tiff = psyd.open_mfdataset( + bt.get_file("test-t2m-*.tif"), + engine="gdal", + t_format="test-t2m-%Y-%m-%dT%H-%M-%S", + ) self.assertListEqual( ds_ref.isel(time=[0, 1], lev=0).t2m.values.tolist(), - ds_tiff.Band1.values.tolist()) + ds_tiff.Band1.values.tolist(), + ) self.assertListEqual( pd.to_datetime(ds_tiff.time.values).tolist(), - pd.to_datetime(ds_ref.time[:2].values).tolist()) + pd.to_datetime(ds_ref.time[:2].values).tolist(), + ) diff --git a/tests/test_main.py b/tests/test_main.py index ea563c0..35be0d3 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -1,45 +1,31 @@ """Test the :mod:`psyplot.__main__` module.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only -import sys +import inspect import os import os.path as osp -import subprocess as spr -import yaml import shutil +import subprocess as spr +import sys import tempfile import unittest from itertools import product -import six + import _base_testing as bt -import psyplot.__main__ as main -import psyplot.project as psy -import psyplot import matplotlib.pyplot as plt +import six import test_plotter as tp -import inspect +import yaml + +import psyplot +import psyplot.__main__ as main +import psyplot.project as psy remove_temp_files = True @@ -50,15 +36,15 @@ class TestCommandLine(unittest.TestCase): _created_files = set() def setUp(self): - psy.close('all') - plt.close('all') + psy.close("all") + plt.close("all") self._created_files = set() def tearDown(self): for identifier in list(psy.registered_plotters): psy.unregister_plotter(identifier) - psy.close('all') - plt.close('all') + psy.close("all") + plt.close("all") tp.results.clear() if remove_temp_files: for f in self._created_files: @@ -69,13 +55,16 @@ def tearDown(self): self._created_files.clear() def _create_and_save_test_project(self): - psy.register_plotter('test_plotter', module='test_plotter', - plotter_name='TestPlotter') - sp = psy.plot.test_plotter(bt.get_file('test-t2m-u-v.nc'), - name=['t2m', 'u'], time=[0, 1]) + psy.register_plotter( + "test_plotter", module="test_plotter", plotter_name="TestPlotter" + ) + sp = psy.plot.test_plotter( + bt.get_file("test-t2m-u-v.nc"), name=["t2m", "u"], time=[0, 1] + ) self.assertEqual(len(sp), 4, sp) fname = tempfile.NamedTemporaryFile( - suffix='.pkl', prefix='test_psyplot_').name + suffix=".pkl", prefix="test_psyplot_" + ).name self._created_files.add(fname) sp.save_project(fname, use_rel_paths=False) return sp, fname @@ -84,89 +73,135 @@ def test_get_parser(self): parser = main.get_parser() args = inspect.getfullargspec(main.make_plot)[0] for arg in args: - self.assertIn(arg, parser.unfinished_arguments, - msg='Missing ' + arg) + self.assertIn( + arg, parser.unfinished_arguments, msg="Missing " + arg + ) def test_main_01_from_project(self): """Test the :func:`psyplot.__main__.main` function""" if not six.PY2: - with self.assertRaisesRegex(ValueError, 'filename'): - main.main(['-o', 'test.pdf']) + with self.assertRaisesRegex(ValueError, "filename"): + main.main(["-o", "test.pdf"]) sp, fname1 = self._create_and_save_test_project() fname2 = tempfile.NamedTemporaryFile( - suffix='.pdf', prefix='test_psyplot_').name + suffix=".pdf", prefix="test_psyplot_" + ).name self._created_files.add(fname2) sp.save_project(fname1, use_rel_paths=False) - psy.close('all') + psy.close("all") if six.PY2: - main.main(['-p', fname1, '-o', fname2]) + main.main(["-p", fname1, "-o", fname2]) else: - with self.assertWarnsRegex(UserWarning, 'ignored'): - main.main(['-p', fname1, '-o', fname2, '-n', 't2m']) - self.assertTrue(osp.exists(fname2), msg='Missing ' + fname2) + with self.assertWarnsRegex(UserWarning, "ignored"): + main.main(["-p", fname1, "-o", fname2, "-n", "t2m"]) + self.assertTrue(osp.exists(fname2), msg="Missing " + fname2) self.assertEqual(len(psy.gcp(True)), 4) def test_main_02_alternative_ds(self): - sp, fname1 = self._create_and_save_test_project() fname2 = tempfile.NamedTemporaryFile( - suffix='.pdf', prefix='test_psyplot_').name + suffix=".pdf", prefix="test_psyplot_" + ).name self._created_files.add(fname2) sp.save_project(fname1, use_rel_paths=False) - psy.close('all') - main.main([bt.get_file('circumpolar_test.nc'), '-p', fname1, - '-o', fname2]) - self.assertTrue(osp.exists(fname2), msg='Missing ' + fname2) + psy.close("all") + main.main( + [bt.get_file("circumpolar_test.nc"), "-p", fname1, "-o", fname2] + ) + self.assertTrue(osp.exists(fname2), msg="Missing " + fname2) mp = psy.gcp(True) self.assertEqual(len(mp), 4) self.assertEqual( - set(t[0] for t in mp._get_dsnames(mp.array_info( - dump=False, use_rel_paths=False))), - {bt.get_file('circumpolar_test.nc')}) + set( + t[0] + for t in mp._get_dsnames( + mp.array_info(dump=False, use_rel_paths=False) + ) + ), + {bt.get_file("circumpolar_test.nc")}, + ) def test_main_03_dims(self): import yaml - psy.register_plotter('test_plotter', module='test_plotter', - plotter_name='TestPlotter') + + psy.register_plotter( + "test_plotter", module="test_plotter", plotter_name="TestPlotter" + ) fname2 = tempfile.NamedTemporaryFile( - suffix='.pdf', prefix='test_psyplot_').name + suffix=".pdf", prefix="test_psyplot_" + ).name self._created_files.add(fname2) # create a formatoptions file fmt_file = tempfile.NamedTemporaryFile( - suffix='.yml', prefix='test_psyplot_').name + suffix=".yml", prefix="test_psyplot_" + ).name self._created_files.add(fmt_file) - with open(fmt_file, 'w') as f: - yaml.dump({'fmt1': 'fmt1', 'fmt2': 'fmt2'}, f) + with open(fmt_file, "w") as f: + yaml.dump({"fmt1": "fmt1", "fmt2": "fmt2"}, f) if not six.PY2: - with self.assertRaisesRegex(ValueError, 'plotting method'): - main.main([bt.get_file('test-t2m-u-v.nc'), '-o', fname2, - '-d', 'time,1,2', 'y,3,4', '-n', 'u', 'v']) - main.main([bt.get_file('test-t2m-u-v.nc'), '-o', fname2, - '-d', 'time,1,2', 'y,3,4', '-n', 'u', 'v', - '-pm', 'test_plotter', '-fmt', fmt_file]) + with self.assertRaisesRegex(ValueError, "plotting method"): + main.main( + [ + bt.get_file("test-t2m-u-v.nc"), + "-o", + fname2, + "-d", + "time,1,2", + "y,3,4", + "-n", + "u", + "v", + ] + ) + main.main( + [ + bt.get_file("test-t2m-u-v.nc"), + "-o", + fname2, + "-d", + "time,1,2", + "y,3,4", + "-n", + "u", + "v", + "-pm", + "test_plotter", + "-fmt", + fmt_file, + ] + ) mp = psy.gcp(True) - self.assertEqual(len(mp), 2*2*2, msg=mp) - all_dims = set(product((1, 2), (3, 4), ('u', 'v'))) + self.assertEqual(len(mp), 2 * 2 * 2, msg=mp) + all_dims = set(product((1, 2), (3, 4), ("u", "v"))) for arr in mp: idims = arr.psy.idims - all_dims -= {(idims['time'], idims['lat'], arr.name)} + all_dims -= {(idims["time"], idims["lat"], arr.name)} self.assertFalse(all_dims) for i, plotter in enumerate(mp.plotters): - self.assertEqual(plotter['fmt1'], 'fmt1', - msg='Wrong value for fmt1 of plotter %i!' % i) - self.assertEqual(plotter['fmt2'], 'fmt2', - msg='Wrong value for fmt2 of plotter %i!' % i) + self.assertEqual( + plotter["fmt1"], + "fmt1", + msg="Wrong value for fmt1 of plotter %i!" % i, + ) + self.assertEqual( + plotter["fmt2"], + "fmt2", + msg="Wrong value for fmt2 of plotter %i!" % i, + ) def test_all_versions(self): """Test to display all versions""" ref = psyplot.get_versions() - proc = spr.Popen([sys.executable, '-m', 'psyplot', '-aV'], - stdout=spr.PIPE, stderr=spr.PIPE) + proc = spr.Popen( + [sys.executable, "-m", "psyplot", "-aV"], + stdout=spr.PIPE, + stderr=spr.PIPE, + ) proc.wait() self.assertFalse(proc.poll(), msg=proc.stderr.read()) d = yaml.load(proc.stdout.read(), yaml.Loader) - d.pop('psyplot_gui', None) - ref.pop('psyplot_gui', None) + d.pop("psyplot_gui", None) + ref.pop("psyplot_gui", None) # make sure the version does not end with .dirty d["psyplot"]["version"] = d["psyplot"]["version"].replace(".dirty", "") ref["psyplot"]["version"] = ref["psyplot"]["version"].replace( @@ -177,8 +212,11 @@ def test_all_versions(self): def test_list_plugins(self): """Test to display all versions""" ref = psyplot.rcParams._plugins - proc = spr.Popen([sys.executable, '-m', 'psyplot', '-lp'], - stdout=spr.PIPE, stderr=spr.PIPE) + proc = spr.Popen( + [sys.executable, "-m", "psyplot", "-lp"], + stdout=spr.PIPE, + stderr=spr.PIPE, + ) proc.wait() self.assertFalse(proc.poll(), msg=proc.stderr.read()) d = yaml.load(proc.stdout.read(), yaml.Loader) @@ -186,15 +224,19 @@ def test_list_plugins(self): def test_list_plot_methods(self): """Test to display all versions""" - proc = spr.Popen([sys.executable, '-m', 'psyplot', '-lpm'], - stdout=spr.PIPE, stderr=spr.PIPE) + proc = spr.Popen( + [sys.executable, "-m", "psyplot", "-lpm"], + stdout=spr.PIPE, + stderr=spr.PIPE, + ) proc.wait() self.assertFalse(proc.poll(), msg=proc.stderr.read()) import psyplot.project as psy - for pm, d in psyplot.rcParams['project.plotters'].items(): + + for pm, d in psyplot.rcParams["project.plotters"].items(): try: psy.register_plotter(pm, **d) - except: + except Exception: pass ref = psy.plot._plot_methods d = yaml.load(proc.stdout.read(), yaml.Loader) diff --git a/tests/test_plotter.py b/tests/test_plotter.py index 74263dd..1066bf5 100644 --- a/tests/test_plotter.py +++ b/tests/test_plotter.py @@ -1,59 +1,47 @@ """Test module of the :mod:`psyplot.plotter` module.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only -import unittest -import six import os.path as osp +import unittest +from itertools import repeat + import _base_testing as bt -from psyplot.compat.pycompat import OrderedDict -import psyplot.data as psyd import pandas as pd +import six import xarray as xr + +import psyplot.config as psyc +import psyplot.data as psyd import psyplot.plotter as psyp -from itertools import repeat from psyplot import rcParams -import psyplot.config as psyc + try: from textwrap import indent except ImportError: + def indent(text, prefix, predicate=None): # python2 - return '\n'.join(prefix + s if predicate is None or predicate(s) else s - for s in text.splitlines()) + return "\n".join( + prefix + s if predicate is None or predicate(s) else s + for s in text.splitlines() + ) docstrings = psyp.docstrings -psyc.setup_logging(osp.join(osp.dirname(__file__), 'logging.yml')) +psyc.setup_logging(osp.join(osp.dirname(__file__), "logging.yml")) -results = OrderedDict() +results = dict() class TestFormatoption(psyp.Formatoption): - removed = False @property @@ -61,12 +49,12 @@ def default(self): try: return super(TestFormatoption, self).default except KeyError: - return '' + return "" _validate = str def update(self, value): - key = '%s.%s' % (self.plotter.data.psy.arr_name, self.key) + key = "%s.%s" % (self.plotter.data.psy.arr_name, self.key) if not value: results.pop(key, None) else: @@ -76,8 +64,8 @@ def remove(self): self.removed = True -@docstrings.get_docstring(base='_testing.SimpleFmt') -@docstrings.get_sections(base='_testing.SimpleFmt') +@docstrings.get_docstring(base="_testing.SimpleFmt") +@docstrings.get_sections(base="_testing.SimpleFmt") class SimpleFmt(TestFormatoption): """ Just a simple formatoption to check the sharing possibility @@ -87,17 +75,17 @@ class SimpleFmt(TestFormatoption): str The string to use in the text""" - group = 'labels' + group = "labels" - children = ['fmt2'] + children = ["fmt2"] - dependencies = ['fmt3'] + dependencies = ["fmt3"] class SimpleFmt2(SimpleFmt): """%(_testing.SimpleFmt)s""" - children = ['fmt3'] + children = ["fmt3"] dependencies = [] @@ -110,7 +98,7 @@ class SimpleFmt3(SimpleFmt): -------------- %(_testing.SimpleFmt.possible_types)s""" - group = 'something' + group = "something" children = dependencies = [] @@ -126,9 +114,9 @@ class SimpleFmt3(SimpleFmt): class TestPlotter(psyp.Plotter): """A simple Plotter for testing the plotter-formatoption framework""" - fmt1 = SimpleFmt('fmt1') - fmt2 = SimpleFmt2('fmt2') - fmt3 = SimpleFmt3('fmt3') + fmt1 = SimpleFmt("fmt1") + fmt2 = SimpleFmt2("fmt2") + fmt3 = SimpleFmt3("fmt3") class TestPostFormatoption(unittest.TestCase): @@ -138,39 +126,39 @@ def test_timing(self): plotter = TestPlotter(xr.DataArray([]), enable_post=True) # test attribute for the formatoption plotter.post.test = [] - plotter.update( - post='self.test.append(1)') + plotter.update(post="self.test.append(1)") # check if the post fmt has been updated self.assertEqual(plotter.post.test, [1]) - plotter.update(fmt1='something') + plotter.update(fmt1="something") # check if the post fmt has been updated self.assertEqual(plotter.post.test, [1]) # -- test replot timing - plotter.update(post_timing='replot') - plotter.update(fmt1='something else') + plotter.update(post_timing="replot") + plotter.update(fmt1="something else") # check if the post fmt has been updated self.assertEqual(plotter.post.test, [1]) - plotter.update(fmt2='test', replot=True) + plotter.update(fmt2="test", replot=True) # check if the post fmt has been updated self.assertEqual(plotter.post.test, [1, 1]) # -- test always timing - plotter.update(post_timing='always') + plotter.update(post_timing="always") # check if the post fmt has been updated self.assertEqual(plotter.post.test, [1, 1, 1]) - plotter.update(fmt1='okay') + plotter.update(fmt1="okay") # check if the post fmt has been updated self.assertEqual(plotter.post.test, [1, 1, 1, 1]) def test_enable(self): """Test if the warning is raised""" - plotter = TestPlotter(xr.DataArray([]), - post='self.ax.set_title("test")') - self.assertEqual(plotter.ax.get_title(), '') + plotter = TestPlotter( + xr.DataArray([]), post='self.ax.set_title("test")' + ) + self.assertEqual(plotter.ax.get_title(), "") plotter.enable_post = True plotter.update(post=plotter.post.value, force=True) - self.assertEqual(plotter.ax.get_title(), 'test') + self.assertEqual(plotter.ax.get_title(), "test") class PlotterTest(unittest.TestCase): @@ -195,60 +183,66 @@ def test_shared(self): """Testing the sharing of formatoptions""" plotter1 = TestPlotter(xr.DataArray([])) plotter2 = TestPlotter(xr.DataArray([])) - plotter1.data.psy.arr_name = 'test1' - plotter2.data.psy.arr_name = 'test2' + plotter1.data.psy.arr_name = "test1" + plotter2.data.psy.arr_name = "test2" results.clear() # test sharing of two formatoptions - plotter1.share(plotter2, ['fmt1', 'fmt3']) - plotter1.update(fmt1='okay', fmt3='okay2') + plotter1.share(plotter2, ["fmt1", "fmt3"]) + plotter1.update(fmt1="okay", fmt3="okay2") # check source - self.assertIn('test1.fmt1', results) - self.assertEqual(results['test1.fmt1'], 'okay') - self.assertIn('test1.fmt3', results) - self.assertEqual(results['test1.fmt3'], 'okay2') + self.assertIn("test1.fmt1", results) + self.assertEqual(results["test1.fmt1"], "okay") + self.assertIn("test1.fmt3", results) + self.assertEqual(results["test1.fmt3"], "okay2") # checked shared - self.assertIn('test2.fmt1', results) - self.assertEqual(results['test2.fmt1'], 'okay') - self.assertIn('test2.fmt3', results) - self.assertEqual(results['test2.fmt3'], 'okay2') + self.assertIn("test2.fmt1", results) + self.assertEqual(results["test2.fmt1"], "okay") + self.assertIn("test2.fmt3", results) + self.assertEqual(results["test2.fmt3"], "okay2") # unshare the formatoptions plotter1.unshare(plotter2) # check source - self.assertIn('test1.fmt1', results) - self.assertEqual(results['test1.fmt1'], 'okay') - self.assertIn('test1.fmt3', results) - self.assertEqual(results['test1.fmt3'], 'okay2') + self.assertIn("test1.fmt1", results) + self.assertEqual(results["test1.fmt1"], "okay") + self.assertIn("test1.fmt3", results) + self.assertEqual(results["test1.fmt3"], "okay2") # check (formerly) shared - self.assertNotIn('test2.fmt1', results, - msg='Value of fmt1: %s, in results: %s' % ( - plotter2.fmt1.value, results.get('test2.fmt1'))) - self.assertNotIn('test2.fmt3', results, - msg='Value of fmt3: %s, in results: %s' % ( - plotter2.fmt3.value, results.get('test2.fmt3'))) + self.assertNotIn( + "test2.fmt1", + results, + msg="Value of fmt1: %s, in results: %s" + % (plotter2.fmt1.value, results.get("test2.fmt1")), + ) + self.assertNotIn( + "test2.fmt3", + results, + msg="Value of fmt3: %s, in results: %s" + % (plotter2.fmt3.value, results.get("test2.fmt3")), + ) # test sharing of a group of formatoptions - plotter1.share(plotter2, 'labels') - plotter1.update(fmt1='okay', fmt2='okay2') + plotter1.share(plotter2, "labels") + plotter1.update(fmt1="okay", fmt2="okay2") # check source - self.assertIn('test1.fmt1', results) - self.assertEqual(results['test1.fmt1'], 'okay') - self.assertIn('test1.fmt2', results) - self.assertEqual(results['test1.fmt2'], 'okay2') + self.assertIn("test1.fmt1", results) + self.assertEqual(results["test1.fmt1"], "okay") + self.assertIn("test1.fmt2", results) + self.assertEqual(results["test1.fmt2"], "okay2") # check shared - self.assertIn('test2.fmt1', results) - self.assertEqual(results['test2.fmt1'], 'okay') - self.assertIn('test2.fmt2', results) - self.assertEqual(results['test2.fmt2'], 'okay2') - self.assertNotIn('test2.fmt3', results) + self.assertIn("test2.fmt1", results) + self.assertEqual(results["test2.fmt1"], "okay") + self.assertIn("test2.fmt2", results) + self.assertEqual(results["test2.fmt2"], "okay2") + self.assertNotIn("test2.fmt3", results) # unshare the plotter - plotter2.unshare_me('fmt1') - self.assertNotIn('test2.fmt1', results) - self.assertIn('test2.fmt2', results) - plotter2.unshare_me('labels') - self.assertNotIn('test2.fmt2', results) + plotter2.unshare_me("fmt1") + self.assertNotIn("test2.fmt1", results) + self.assertIn("test2.fmt2", results) + plotter2.unshare_me("labels") + self.assertNotIn("test2.fmt2", results) def test_auto_update(self): """Test the :attr:`psyplot.plotter.Plotter.no_auto_update` attribute""" @@ -260,11 +254,11 @@ def test_auto_update(self): self.assertTrue(plotter.no_auto_update) plotter.update(fmt1=1) - self.assertEqual(plotter['fmt1'], '') - self.assertEqual(plotter._registered_updates['fmt1'], 1) + self.assertEqual(plotter["fmt1"], "") + self.assertEqual(plotter._registered_updates["fmt1"], 1) plotter.start_update() - self.assertEqual(plotter['fmt1'], '1') + self.assertEqual(plotter["fmt1"], "1") self.assertFalse(plotter._registered_updates) data.psy.no_auto_update = False @@ -272,46 +266,48 @@ def test_auto_update(self): self.assertFalse(plotter.no_auto_update) def test_rc(self): - """Test the default values and validation - """ + """Test the default values and validation""" + def validate(s): - return s + 'okay' + return s + "okay" + rcParams.defaultParams = rcParams.defaultParams.copy() - rcParams.defaultParams['plotter.test1.fmt1'] = ('test1', validate) - rcParams.defaultParams['plotter.test1.fmt2'] = ('test2', validate) - rcParams.defaultParams['plotter.test1.fmt3'] = ('test3', validate) - rcParams.defaultParams['plotter.test2.fmt3'] = ('test3.2', validate) - rcParams.update(**{ - key: val[0] for key, val in rcParams.defaultParams.items()}) + rcParams.defaultParams["plotter.test1.fmt1"] = ("test1", validate) + rcParams.defaultParams["plotter.test1.fmt2"] = ("test2", validate) + rcParams.defaultParams["plotter.test1.fmt3"] = ("test3", validate) + rcParams.defaultParams["plotter.test2.fmt3"] = ("test3.2", validate) + rcParams.update( + **{key: val[0] for key, val in rcParams.defaultParams.items()} + ) class ThisTestPlotter(TestPlotter): - _rcparams_string = ['plotter.test1.'] + _rcparams_string = ["plotter.test1."] class ThisTestPlotter2(ThisTestPlotter): - _rcparams_string = ['plotter.test2.'] + _rcparams_string = ["plotter.test2."] plotter1 = ThisTestPlotter(xr.DataArray([])) plotter2 = ThisTestPlotter2(xr.DataArray([])) # plotter1 - self.assertEqual(plotter1.fmt1.value, 'test1okay') - self.assertEqual(plotter1.fmt2.value, 'test2okay') - self.assertEqual(plotter1.fmt3.value, 'test3okay') + self.assertEqual(plotter1.fmt1.value, "test1okay") + self.assertEqual(plotter1.fmt2.value, "test2okay") + self.assertEqual(plotter1.fmt3.value, "test3okay") # plotter2 - self.assertEqual(plotter2.fmt1.value, 'test1okay') - self.assertEqual(plotter2.fmt2.value, 'test2okay') - self.assertEqual(plotter2.fmt3.value, 'test3.2okay') + self.assertEqual(plotter2.fmt1.value, "test1okay") + self.assertEqual(plotter2.fmt2.value, "test2okay") + self.assertEqual(plotter2.fmt3.value, "test3.2okay") def test_fmt_connections(self): """Test the order of the updates""" arr = xr.DataArray([]) - arr.psy.arr_name = 'arr0' - plotter = TestPlotter(arr, - fmt1='test', fmt2='test2', fmt3='test3') + arr.psy.arr_name = "arr0" + plotter = TestPlotter(arr, fmt1="test", fmt2="test2", fmt3="test3") # check the initialization order - self.assertEqual(list(results.keys()), - ['arr0.fmt3', 'arr0.fmt2', 'arr0.fmt1']) + self.assertEqual( + list(results.keys()), ["arr0.fmt3", "arr0.fmt2", "arr0.fmt1"] + ) # check the connection properties self.assertIs(plotter.fmt1.fmt2, plotter.fmt2) @@ -320,20 +316,27 @@ def test_fmt_connections(self): # check the update results.clear() - plotter.update(fmt2='something', fmt3='else') - self.assertEqual(list(results.keys()), - ['arr0.fmt3', 'arr0.fmt2', 'arr0.fmt1']) - self.assertEqual(plotter.fmt1.value, 'test') - self.assertEqual(plotter.fmt2.value, 'something') - self.assertEqual(plotter.fmt3.value, 'else') - - self.assertEqual(list(plotter._sorted_by_priority( - [plotter.fmt1, plotter.fmt2, plotter.fmt3])), - [plotter.fmt3, plotter.fmt2, plotter.fmt1]) + plotter.update(fmt2="something", fmt3="else") + self.assertEqual( + list(results.keys()), ["arr0.fmt3", "arr0.fmt2", "arr0.fmt1"] + ) + self.assertEqual(plotter.fmt1.value, "test") + self.assertEqual(plotter.fmt2.value, "something") + self.assertEqual(plotter.fmt3.value, "else") + + self.assertEqual( + list( + plotter._sorted_by_priority( + [plotter.fmt1, plotter.fmt2, plotter.fmt3] + ) + ), + [plotter.fmt3, plotter.fmt2, plotter.fmt1], + ) if six.PY3: with self.assertRaisesRegex( - TypeError, "got an unexpected keyword argument 'wrong'"): - SimpleFmt('fmt1', wrong='something') + TypeError, "got an unexpected keyword argument 'wrong'" + ): + SimpleFmt("fmt1", wrong="something") def test_data_props_array(self): """Test the data properties of Formatoptions with a DataArray""" @@ -349,8 +352,7 @@ def test_data_props_list(self): """Test the data properties of Formatoptions with an InteractiveList""" data = psyd.InteractiveList([xr.DataArray([]), xr.DataArray([])]) plot_data = data.copy(True) - plot_data.extend([xr.DataArray([]), xr.DataArray([])], - new_name=True) + plot_data.extend([xr.DataArray([]), xr.DataArray([])], new_name=True) plotter = TestPlotter(data) plotter.plot_data = plot_data plot_data = plotter.plot_data # the data might have been copied @@ -387,8 +389,7 @@ def test_decoder_list(self): """Test the decoder property with an InteractiveList""" data = psyd.InteractiveList([xr.DataArray([]), xr.DataArray([])]) plot_data = data.copy(True) - plot_data.extend([xr.DataArray([]), xr.DataArray([])], - new_name=True) + plot_data.extend([xr.DataArray([]), xr.DataArray([])], new_name=True) for arr in data: arr.psy.init_accessor(decoder=psyd.CFDecoder(arr.psy.base)) plotter = TestPlotter(data) @@ -407,8 +408,9 @@ def test_decoder_list(self): decoder = psyd.CFDecoder(data[0].psy.base) plotter.fmt2.decoder = decoder for i, d2 in enumerate(plotter.plot_data_decoder): - self.assertIs(d2, decoder, - msg='Decoder %i has been set wrong!' % i) + self.assertIs( + d2, decoder, msg="Decoder %i has been set wrong!" % i + ) self.assertEqual(plotter.fmt2.decoder, plotter.plot_data_decoder) # test with index in list of plot_data outside raw_data @@ -416,8 +418,11 @@ def test_decoder_list(self): decoder2 = psyd.CFDecoder(data[0].psy.base) plotter.fmt1.decoder = decoder2 for i, d2 in enumerate(plotter.plot_data_decoder): - self.assertIs(d2, decoder if i != 3 else decoder2, - msg='Decoder %i has been set wrong!' % i) + self.assertIs( + d2, + decoder if i != 3 else decoder2, + msg="Decoder %i has been set wrong!" % i, + ) self.assertIsInstance(plotter.fmt1.decoder, psyd.CFDecoder) self.assertIs(plotter.fmt1.decoder, plotter.plot_data_decoder[3]) @@ -425,8 +430,7 @@ def test_any_decoder(self): """Test the decoder property with an InteractiveList""" data = psyd.InteractiveList([xr.DataArray([]), xr.DataArray([])]) plot_data = data.copy(True) - plot_data.extend([xr.DataArray([]), xr.DataArray([])], - new_name=True) + plot_data.extend([xr.DataArray([]), xr.DataArray([])], new_name=True) for arr in data: arr.psy.init_accessor(decoder=psyd.CFDecoder(arr.psy.base)) plotter = TestPlotter(data) @@ -437,177 +441,244 @@ def test_any_decoder(self): decoder = psyd.CFDecoder(data[0].psy.base) plotter.fmt2.decoder = decoder for i, d2 in enumerate(plotter.plot_data_decoder): - self.assertIs(d2, decoder, - msg='Decoder %i has been set wrong!' % i) + self.assertIs( + d2, decoder, msg="Decoder %i has been set wrong!" % i + ) self.assertEqual(plotter.fmt2.decoder, plotter.plot_data_decoder) self.assertIs(plotter.fmt2.any_decoder, decoder) def test_get_enhanced_attrs_01_arr(self): - """Test the :meth:`psyplot.plotter.Plotter.get_enhanced_attrs` method - """ - ds = psyd.open_dataset(bt.get_file('test-t2m-u-v.nc')) + """Test the :meth:`psyplot.plotter.Plotter.get_enhanced_attrs` method""" + ds = psyd.open_dataset(bt.get_file("test-t2m-u-v.nc")) plotter = TestPlotter(ds.t2m) attrs = ds.t2m.attrs.copy() for key, val in ds.lon.attrs.items(): - attrs['x' + key] = val + attrs["x" + key] = val for key, val in ds.lat.attrs.items(): - attrs['y' + key] = val + attrs["y" + key] = val for key, val in ds.lev.attrs.items(): - attrs['z' + key] = val + attrs["z" + key] = val for key, val in ds.time.attrs.items(): - attrs['t' + key] = val - attrs['xname'] = 'lon' - attrs['yname'] = 'lat' - attrs['zname'] = 'lev' - attrs['tname'] = 'time' - attrs['name'] = 't2m' - self.assertEqual(dict(plotter.get_enhanced_attrs(plotter.plot_data)), - dict(attrs)) + attrs["t" + key] = val + attrs["xname"] = "lon" + attrs["yname"] = "lat" + attrs["zname"] = "lev" + attrs["tname"] = "time" + attrs["name"] = "t2m" + self.assertEqual( + dict(plotter.get_enhanced_attrs(plotter.plot_data)), dict(attrs) + ) def test_get_enhanced_attrs_02_list(self): - """Test the :meth:`psyplot.plotter.Plotter.get_enhanced_attrs` method - """ - ds = psyd.open_dataset(bt.get_file('test-t2m-u-v.nc')) - plotter = TestPlotter(psyd.InteractiveList( - ds.psy.create_list(name=['t2m', 'u'], x=0, t=0))) + """Test the :meth:`psyplot.plotter.Plotter.get_enhanced_attrs` method""" + ds = psyd.open_dataset(bt.get_file("test-t2m-u-v.nc")) + plotter = TestPlotter( + psyd.InteractiveList( + ds.psy.create_list(name=["t2m", "u"], x=0, t=0) + ) + ) attrs = {} for key, val in ds.t2m.attrs.items(): - attrs['t2m' + key] = val + attrs["t2m" + key] = val for key, val in ds.u.attrs.items(): - attrs['u' + key] = val + attrs["u" + key] = val for key, val in ds.lon.attrs.items(): - attrs['x' + key] = val + attrs["x" + key] = val for key, val in ds.lat.attrs.items(): - attrs['y' + key] = val - attrs['x' + key] = val # overwrite the longitude information + attrs["y" + key] = val + attrs["x" + key] = val # overwrite the longitude information # the plot_data has priority over the base variable, therefore we # the plotter should replace the y information with the z information for key, val in ds.lev.attrs.items(): - attrs['z' + key] = val - attrs['y' + key] = val # overwrite the latitude information + attrs["z" + key] = val + attrs["y" + key] = val # overwrite the latitude information for key, val in ds.time.attrs.items(): - attrs['t' + key] = val + attrs["t" + key] = val for key in set(ds.t2m.attrs) & set(ds.u.attrs): if ds.t2m.attrs[key] == ds.u.attrs[key]: attrs[key] = ds.t2m.attrs[key] - attrs['zname'] = attrs['yname'] = 'lev' - attrs['xname'] = 'lat' - attrs['tname'] = 'time' - attrs['lon'] = attrs['x'] = ds.lon.values[0] - attrs['time'] = attrs['t'] = pd.to_datetime( - ds.time.values[0]).isoformat() + attrs["zname"] = attrs["yname"] = "lev" + attrs["xname"] = "lat" + attrs["tname"] = "time" + attrs["lon"] = attrs["x"] = ds.lon.values[0] + attrs["time"] = attrs["t"] = pd.to_datetime( + ds.time.values[0] + ).isoformat() self.maxDiff = None - self.assertEqual(dict(plotter.get_enhanced_attrs(plotter.plot_data)), - dict(attrs)) + self.assertEqual( + dict(plotter.get_enhanced_attrs(plotter.plot_data)), dict(attrs) + ) def test_show_keys(self): """Test the :meth:`psyplot.plotter.Plotter.show_keys` method""" plotter = TestPlotter(xr.DataArray([])) - s = plotter.show_keys(['fmt1', 'fmt2', 'fmt3'], func=str) - self.assertEqual(s, - '+------+------+------+\n' - '| fmt1 | fmt2 | fmt3 |\n' - '+------+------+------+') - s = plotter.show_keys(['fmt1', 'fmt2', 'fmt3'], func=str, grouped=True) - title = psyp.groups['labels'] - self.assertEqual(s, - '*' * len(title) + '\n' + - title + '\n' + - '*' * len(title) + '\n' - '+------+------+\n' - '| fmt1 | fmt2 |\n' - '+------+------+\n' - '\n' - '*********\n' - 'something\n' - '*********\n' - '+------+\n' - '| fmt3 |\n' - '+------+') - s = plotter.show_keys(['fmt1', 'something'], func=str) - self.assertEqual(s, - '+------+------+\n' - '| fmt1 | fmt3 |\n' - '+------+------+') + s = plotter.show_keys(["fmt1", "fmt2", "fmt3"], func=str) + self.assertEqual( + s, + "+------+------+------+\n" + "| fmt1 | fmt2 | fmt3 |\n" + "+------+------+------+", + ) + s = plotter.show_keys(["fmt1", "fmt2", "fmt3"], func=str, grouped=True) + title = psyp.groups["labels"] + self.assertEqual( + s, + "*" * len(title) + "\n" + title + "\n" + "*" * len(title) + "\n" + "+------+------+\n" + "| fmt1 | fmt2 |\n" + "+------+------+\n" + "\n" + "*********\n" + "something\n" + "*********\n" + "+------+\n" + "| fmt3 |\n" + "+------+", + ) + s = plotter.show_keys(["fmt1", "something"], func=str) + self.assertEqual( + s, "+------+------+\n" "| fmt1 | fmt3 |\n" "+------+------+" + ) if six.PY3: - with self.assertWarnsRegex(UserWarning, - '(?i)unknown formatoption keyword'): - s = plotter.show_keys(['fmt1', 'wrong', 'something'], func=str) - self.assertEqual(s, - '+------+------+\n' - '| fmt1 | fmt3 |\n' - '+------+------+') + with self.assertWarnsRegex( + UserWarning, "(?i)unknown formatoption keyword" + ): + s = plotter.show_keys(["fmt1", "wrong", "something"], func=str) + self.assertEqual( + s, + "+------+------+\n" "| fmt1 | fmt3 |\n" "+------+------+", + ) def test_show_docs(self): """Test the :meth:`psyplot.plotter.Plotter.show_docs` method""" plotter = TestPlotter(xr.DataArray([])) s = plotter.show_docs(func=str) self.maxDiff = None - self.assertEqual(s, '\n'.join([ - 'fmt1', '====', SimpleFmt.__doc__, '', - 'fmt2', '====', SimpleFmt2.__doc__, '', - 'fmt3', '====', SimpleFmt3.__doc__, '', - 'post', '====', psyp.PostProcessing.__doc__, '', - 'post_timing', '===========', psyp.PostTiming.__doc__, ''])) - s = plotter.show_docs(['fmt1', 'fmt2', 'fmt3'], func=str, grouped=True) - title = psyp.groups['labels'] - self.assertEqual(s, '\n'.join([ - '*' * len(title), - title, - '*' * len(title), - 'fmt1', '====', SimpleFmt.__doc__, '', - 'fmt2', '====', SimpleFmt2.__doc__, '', '', - '*********', - 'something', - '*********', - 'fmt3', '====', SimpleFmt3.__doc__])) + self.assertEqual( + s, + "\n".join( + [ + "fmt1", + "====", + SimpleFmt.__doc__, + "", + "fmt2", + "====", + SimpleFmt2.__doc__, + "", + "fmt3", + "====", + SimpleFmt3.__doc__, + "", + "post", + "====", + psyp.PostProcessing.__doc__, + "", + "post_timing", + "===========", + psyp.PostTiming.__doc__, + "", + ] + ), + ) + s = plotter.show_docs(["fmt1", "fmt2", "fmt3"], func=str, grouped=True) + title = psyp.groups["labels"] + self.assertEqual( + s, + "\n".join( + [ + "*" * len(title), + title, + "*" * len(title), + "fmt1", + "====", + SimpleFmt.__doc__, + "", + "fmt2", + "====", + SimpleFmt2.__doc__, + "", + "", + "*********", + "something", + "*********", + "fmt3", + "====", + SimpleFmt3.__doc__, + ] + ), + ) def test_show_summaries(self): """Test the :meth:`psyplot.plotter.Plotter.show_summaries` method""" plotter = TestPlotter(xr.DataArray([])) s = plotter.show_summaries(func=str) - self.assertEqual(s, '\n'.join([ - 'fmt1', indent(SimpleFmt.__doc__.splitlines()[0], ' '), - 'fmt2', indent(SimpleFmt2.__doc__.splitlines()[0], ' '), - 'fmt3', indent(SimpleFmt3.__doc__.splitlines()[0], ' '), - 'post', indent(psyp.PostProcessing.__doc__.splitlines()[0], - ' '), - 'post_timing', indent(psyp.PostTiming.__doc__.splitlines()[0], - ' ')])) - s = plotter.show_summaries(['fmt1', 'fmt2', 'fmt3'], func=str, - grouped=True) - title = psyp.groups['labels'] - self.assertEqual(s, '\n'.join([ - '*' * len(title), - title, - '*' * len(title), - 'fmt1', indent(SimpleFmt.__doc__.splitlines()[0], ' '), - 'fmt2', indent(SimpleFmt2.__doc__.splitlines()[0], ' '), '', - '*********', - 'something', - '*********', - 'fmt3', indent(SimpleFmt3.__doc__.splitlines()[0], ' ')] - )) + self.assertEqual( + s, + "\n".join( + [ + "fmt1", + indent(SimpleFmt.__doc__.splitlines()[0], " "), + "fmt2", + indent(SimpleFmt2.__doc__.splitlines()[0], " "), + "fmt3", + indent(SimpleFmt3.__doc__.splitlines()[0], " "), + "post", + indent( + psyp.PostProcessing.__doc__.splitlines()[0], " " + ), + "post_timing", + indent(psyp.PostTiming.__doc__.splitlines()[0], " "), + ] + ), + ) + s = plotter.show_summaries( + ["fmt1", "fmt2", "fmt3"], func=str, grouped=True + ) + title = psyp.groups["labels"] + self.assertEqual( + s, + "\n".join( + [ + "*" * len(title), + title, + "*" * len(title), + "fmt1", + indent(SimpleFmt.__doc__.splitlines()[0], " "), + "fmt2", + indent(SimpleFmt2.__doc__.splitlines()[0], " "), + "", + "*********", + "something", + "*********", + "fmt3", + indent(SimpleFmt3.__doc__.splitlines()[0], " "), + ] + ), + ) def test_has_changed(self): """Test the :meth:`psyplot.plotter.Plotter.show_summaries` method""" - plotter = TestPlotter(xr.DataArray([]), fmt1='something') - self.assertEqual(plotter['fmt1'], 'something') + plotter = TestPlotter(xr.DataArray([]), fmt1="something") + self.assertEqual(plotter["fmt1"], "something") for i in range(1, 4): - key = 'fmt%i' % i + key = "fmt%i" % i fmto = getattr(plotter, key) - self.assertEqual(plotter.has_changed(key), - [fmto.default, plotter[key]], - msg="Wrong value for " + key) + self.assertEqual( + plotter.has_changed(key), + [fmto.default, plotter[key]], + msg="Wrong value for " + key, + ) plotter.update() - self.assertIsNone(plotter.has_changed('fmt1')) - plotter.update(fmt1='test', fmt3=plotter.fmt3.default, force=True) - self.assertEqual(plotter.has_changed('fmt1'), - ['something', 'test']) - self.assertIsNone(plotter.has_changed('fmt2')) - self.assertIsNone(plotter.has_changed('fmt3', include_last=False)) - self.assertEqual(plotter.has_changed('fmt3'), - [plotter.fmt3.default, plotter.fmt3.default]) + self.assertIsNone(plotter.has_changed("fmt1")) + plotter.update(fmt1="test", fmt3=plotter.fmt3.default, force=True) + self.assertEqual(plotter.has_changed("fmt1"), ["something", "test"]) + self.assertIsNone(plotter.has_changed("fmt2")) + self.assertIsNone(plotter.has_changed("fmt3", include_last=False)) + self.assertEqual( + plotter.has_changed("fmt3"), + [plotter.fmt3.default, plotter.fmt3.default], + ) def test_insert_additionals(self): """Test whether the right formatoptions are inserted""" @@ -624,7 +695,7 @@ class PlotFmt(TestFormatoption): plot_fmt = True def make_plot(self): - results['plot_made'] = True + results["plot_made"] = True class DataDependentFmt(TestFormatoption): priority = psyp.START @@ -636,79 +707,94 @@ class DataDependentFmt2(TestFormatoption): data_dependent = True class ThisTestPlotter(TestPlotter): - fmt_start = StartFormatoption('fmt_start') - fmt_plot = PlotFmt('fmt_plot') - fmt_plot1 = BeforePlottingFmt('fmt_plot1') - fmt_plot2 = BeforePlottingFmt('fmt_plot2') - fmt_data1 = DataDependentFmt('fmt_data1') - fmt_data2 = DataDependentFmt2('fmt_data2') + fmt_start = StartFormatoption("fmt_start") + fmt_plot = PlotFmt("fmt_plot") + fmt_plot1 = BeforePlottingFmt("fmt_plot1") + fmt_plot2 = BeforePlottingFmt("fmt_plot2") + fmt_data1 = DataDependentFmt("fmt_data1") + fmt_data2 = DataDependentFmt2("fmt_data2") def key_name(key): return "%s.%s" % (aname, key) plotter = ThisTestPlotter(xr.DataArray([])) - for key in set(plotter) - {'post', 'post_timing'}: + for key in set(plotter) - {"post", "post_timing"}: plotter[key] = 999 aname = plotter.data.psy.arr_name results.clear() # test whether everything is updated plotter.update(fmt_start=1) - self.assertTrue(results.pop('plot_made')) - self.assertEqual(list(results), - [key_name('fmt_start'), key_name('fmt_plot'), - key_name('fmt_data2')]) + self.assertTrue(results.pop("plot_made")) + self.assertEqual( + list(results), + [ + key_name("fmt_start"), + key_name("fmt_plot"), + key_name("fmt_data2"), + ], + ) results.clear() # test whether the plot is updated plotter.update(fmt_plot1=1) - self.assertEqual(list(results), [key_name('fmt_plot1'), 'plot_made']) + self.assertEqual(list(results), [key_name("fmt_plot1"), "plot_made"]) results.clear() # test whether the data dependent formatoptions are updated plotter.update(replot=True) - self.assertTrue(results.pop('plot_made')) - self.assertEqual(list(results), - [key_name('fmt_plot'), key_name('fmt_data2')]) + self.assertTrue(results.pop("plot_made")) + self.assertEqual( + list(results), [key_name("fmt_plot"), key_name("fmt_data2")] + ) results.clear() depend = 1 plotter.update(replot=True) - self.assertTrue(results.pop('plot_made')) - self.assertEqual(sorted(list(results)), - sorted([key_name('fmt_plot'), key_name('fmt_data1'), - key_name('fmt_data2')])) + self.assertTrue(results.pop("plot_made")) + self.assertEqual( + sorted(list(results)), + sorted( + [ + key_name("fmt_plot"), + key_name("fmt_data1"), + key_name("fmt_data2"), + ] + ), + ) def test_reinit(self): """Test the reinitialization of a plotter""" - class ClearingFormatoption(SimpleFmt): + class ClearingFormatoption(SimpleFmt): def remove(self): - results['removed'] = True + results["removed"] = True requires_clearing = True class AnotherFormatoption(SimpleFmt): - def remove(self): - results['removed2'] = True + results["removed2"] = True class ThisTestPlotter(TestPlotter): - fmt_clear = ClearingFormatoption('fmt_clear') - fmt_remove = AnotherFormatoption('fmt_remove') + fmt_clear = ClearingFormatoption("fmt_clear") + fmt_remove = AnotherFormatoption("fmt_remove") + import matplotlib.pyplot as plt + ax = plt.axes(label="new axis") ax.plot([6, 7]) plotter = ThisTestPlotter() - keys = list(set(plotter) - {'post', 'post_timing'}) - plotter = ThisTestPlotter(xr.DataArray([]), ax=ax, - **dict(zip(keys, repeat(1)))) + keys = list(set(plotter) - {"post", "post_timing"}) + plotter = ThisTestPlotter( + xr.DataArray([]), ax=ax, **dict(zip(keys, repeat(1))) + ) - self.assertNotIn('removed', results) - self.assertNotIn('removed2', results) + self.assertNotIn("removed", results) + self.assertNotIn("removed2", results) arr_name = plotter.data.psy.arr_name for key in keys: self.assertIn("%s.%s" % (arr_name, key), results) @@ -717,8 +803,8 @@ class ThisTestPlotter(TestPlotter): results.clear() plotter.reinit() - self.assertIn('removed', results) - self.assertIn('removed2', results) + self.assertIn("removed", results) + self.assertIn("removed2", results) for key in keys: self.assertIn("%s.%s" % (arr_name, key), results) self.assertFalse(ax.lines) # axes should be cleared @@ -726,10 +812,11 @@ class ThisTestPlotter(TestPlotter): results.clear() ax.plot([6, 7]) - keys.remove('fmt_clear') - keys.remove('fmt_remove') - plotter = TestPlotter(xr.DataArray([]), ax=ax, - **dict(zip(keys, repeat(1)))) + keys.remove("fmt_clear") + keys.remove("fmt_remove") + plotter = TestPlotter( + xr.DataArray([]), ax=ax, **dict(zip(keys, repeat(1))) + ) for key in keys: self.assertIn("%s.%s" % (arr_name, key), results) self.assertTrue(ax.lines) # axes should not be cleared @@ -743,18 +830,19 @@ class ThisTestPlotter(TestPlotter): def test_check_data(self): """Tests the :meth:`psyplot.plotter.Plotter.check_data` method""" - self.assertEqual(TestPlotter.check_data('test', ('dim1', ), True), - ([True], [''])) + self.assertEqual( + TestPlotter.check_data("test", ("dim1",), True), ([True], [""]) + ) checks, messages = TestPlotter.check_data( - ['test1', 'test2'], [('dim1', )], [False, False]) + ["test1", "test2"], [("dim1",)], [False, False] + ) self.assertEqual(checks, [False, False]) - self.assertIn('not the same', messages[0]) - self.assertIn('not the same', messages[1]) + self.assertIn("not the same", messages[0]) + self.assertIn("not the same", messages[1]) class FormatoptionTest(unittest.TestCase): - """A class to test the :class:`psyplot.plotter.Formatoption` class - """ + """A class to test the :class:`psyplot.plotter.Formatoption` class""" def setUp(self): results.clear() @@ -768,8 +856,9 @@ def tearDown(self): def test_data(self): """Test the :attr:`psyplot.plotter.Formatoption.data` attribute""" + class OtherTestPlotter(TestPlotter): - fmt4 = SimpleFmt3('fmt4', index_in_list=2) + fmt4 = SimpleFmt3("fmt4", index_in_list=2) raw_data = psyd.InteractiveList([xr.DataArray([]) for _ in range(4)]) plotter = OtherTestPlotter(raw_data) @@ -788,8 +877,9 @@ class OtherTestPlotter(TestPlotter): plotter.fmt3.data = raw_data for i, arr in enumerate(plotter.plot_data): - self.assertIs(arr, raw_data[i], - msg='Wrong array at position %i' % i) + self.assertIs( + arr, raw_data[i], msg="Wrong array at position %i" % i + ) # undo the setting plotter.fmt3.data = plot_data @@ -797,13 +887,17 @@ class OtherTestPlotter(TestPlotter): # plot data it_data = plotter.fmt3.iter_data for i, arr in enumerate(plotter.fmt3.data): - self.assertIs(next(it_data), arr, - msg='Wrong array at position %i' % i) + self.assertIs( + next(it_data), arr, msg="Wrong array at position %i" % i + ) # raw data it_data = plotter.fmt3.iter_raw_data for i, arr in enumerate(plotter.fmt3.raw_data): - self.assertIs(next(it_data), arr, - msg='Wrong raw data array at position %i' % i) + self.assertIs( + next(it_data), + arr, + msg="Wrong raw data array at position %i" % i, + ) self.assertIs(next(plotter.fmt4.iter_data), plot_data[2]) self.assertIs(next(plotter.fmt4.iter_raw_data), raw_data[2]) @@ -821,62 +915,70 @@ def validate_false(val): return val try: + class RcTestPlotter(TestPlotter): - _rcparams_string = ['plotter.test.data.'] + _rcparams_string = ["plotter.test.data."] + # delete the validation del TestFormatoption._validate - rcParams.defaultParams['plotter.test.data.fmt1'] = (1, validate) - rcParams.defaultParams['plotter.test.data.fmt3'] = ( - 3, validate_false) + rcParams.defaultParams["plotter.test.data.fmt1"] = (1, validate) + rcParams.defaultParams["plotter.test.data.fmt3"] = ( + 3, + validate_false, + ) rcParams.update_from_defaultParams() plotter = RcTestPlotter(xr.DataArray([])) - self.assertEqual(checks, [1], - msg='Validation function has not been called!') + self.assertEqual( + checks, [1], msg="Validation function has not been called!" + ) # test general functionality - self.assertEqual(plotter.fmt1.default_key, - 'plotter.test.data.fmt1') - self.assertEqual(plotter.fmt3.default_key, - 'plotter.test.data.fmt3') + self.assertEqual( + plotter.fmt1.default_key, "plotter.test.data.fmt1" + ) + self.assertEqual( + plotter.fmt3.default_key, "plotter.test.data.fmt3" + ) if six.PY3: - with self.assertRaisesRegex(KeyError, 'fmt2'): + with self.assertRaisesRegex(KeyError, "fmt2"): plotter.fmt2.default_key - self.assertEqual(plotter['fmt1'], 1) + self.assertEqual(plotter["fmt1"], 1) self.assertEqual(plotter.fmt1.default, 1) self.assertFalse(plotter.fmt2.value) self.assertIs(plotter.fmt1.validate, validate) # test after update plotter.update(fmt1=8) - self.assertEqual(checks, [1, 8], - msg='Validation function has not been called!') + self.assertEqual( + checks, [1, 8], msg="Validation function has not been called!" + ) self.assertEqual(plotter.fmt1.value, 8) - self.assertEqual(plotter['fmt1'], 8) + self.assertEqual(plotter["fmt1"], 8) self.assertEqual(plotter.fmt1.default, 1) # test false validation if six.PY3: - with self.assertWarnsRegex(RuntimeWarning, - "Could not find a validation " - "function"): + with self.assertWarnsRegex( + RuntimeWarning, "Could not find a validation " "function" + ): plotter.fmt2.validate - with self.assertRaisesRegex(ValueError, 'Expected ValueError'): + with self.assertRaisesRegex(ValueError, "Expected ValueError"): plotter.update(fmt3=4) plotter.update(fmt3=3) plotter.fmt2.validate = validate plotter.update(fmt2=9) self.assertEqual(checks, [1, 8, 9]) self.assertEqual(plotter.fmt2.value, 9) - except: + except Exception: raise finally: TestFormatoption._validate = str def test_groupname(self): if not six.PY2: - with self.assertWarnsRegex(RuntimeWarning, - 'Unknown formatoption group'): - self.assertEqual(TestPlotter.fmt3.groupname, 'something') - self.assertEqual(TestPlotter.fmt1.groupname, - psyp.groups['labels']) + with self.assertWarnsRegex( + RuntimeWarning, "Unknown formatoption group" + ): + self.assertEqual(TestPlotter.fmt3.groupname, "something") + self.assertEqual(TestPlotter.fmt1.groupname, psyp.groups["labels"]) class TestDictFormatoption(unittest.TestCase): @@ -884,7 +986,6 @@ class TestDictFormatoption(unittest.TestCase): def test_update(self): class TestDictFormatoption(psyp.DictFormatoption): - @property def default(self): try: @@ -898,8 +999,7 @@ def update(self, value): pass class ThisTestPlotter(TestPlotter): - - fmt4 = TestDictFormatoption('fmt4') + fmt4 = TestDictFormatoption("fmt4") plotter = ThisTestPlotter(xr.DataArray([])) @@ -917,5 +1017,6 @@ class ThisTestPlotter(TestPlotter): plotter.update(fmt4=None) self.assertEqual(plotter.fmt4.value, {}) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/tests/test_plugin/psyplot_test/__init__.py b/tests/test_plugin/psyplot_test/__init__.py index 5d8c7fe..d4af897 100644 --- a/tests/test_plugin/psyplot_test/__init__.py +++ b/tests/test_plugin/psyplot_test/__init__.py @@ -1,24 +1,8 @@ """Dummy psyplot plugin test.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only diff --git a/tests/test_plugin/psyplot_test/plotter.py b/tests/test_plugin/psyplot_test/plotter.py index 3735d21..bc93a7a 100644 --- a/tests/test_plugin/psyplot_test/plotter.py +++ b/tests/test_plugin/psyplot_test/plotter.py @@ -3,31 +3,14 @@ # The plotter in this module has been registered by the rcParams in the plugin # package -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only -from psyplot.plotter import Plotter, Formatoption -from psyplot_test.plugin import rcParams +from psyplot.plotter import Formatoption, Plotter class TestFmt(Formatoption): @@ -40,5 +23,4 @@ def update(self, value): class TestPlotter(Plotter): - - fmt1 = TestFmt('fmt1') + fmt1 = TestFmt("fmt1") diff --git a/tests/test_plugin/psyplot_test/plugin.py b/tests/test_plugin/psyplot_test/plugin.py index 132a491..c88e40e 100644 --- a/tests/test_plugin/psyplot_test/plugin.py +++ b/tests/test_plugin/psyplot_test/plugin.py @@ -1,41 +1,32 @@ """Dummy plugin file.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only from psyplot.config.rcsetup import RcParams, validate_dict - -plugin_version = '1.0.0' - - -rcParams = RcParams(defaultParams={ - 'test': [1, lambda i: int(i)], - 'project.plotters': [ - {'test_plotter': { - 'module': 'psyplot_test.plotter', - 'plotter_name': 'TestPlotter', 'import_plotter': True}}, - validate_dict]}) +plugin_version = "1.0.0" + + +rcParams = RcParams( + defaultParams={ + "test": [1, lambda i: int(i)], + "project.plotters": [ + { + "test_plotter": { + "module": "psyplot_test.plotter", + "plotter_name": "TestPlotter", + "import_plotter": True, + } + }, + validate_dict, + ], + } +) rcParams.update_from_defaultParams() @@ -47,7 +38,7 @@ def test_patch(plotter_d, versions): if not checking_patch: raise ValueError("Accidently applied the patch!") - patch_check.append({'plotter': plotter_d, 'versions': versions}) + patch_check.append({"plotter": plotter_d, "versions": versions}) -patches = {('psyplot_test.plotter', 'TestPlotter'): test_patch} +patches = {("psyplot_test.plotter", "TestPlotter"): test_patch} diff --git a/tests/test_plugin/setup.py b/tests/test_plugin/setup.py index 1ac6805..62e821c 100644 --- a/tests/test_plugin/setup.py +++ b/tests/test_plugin/setup.py @@ -1,32 +1,22 @@ -from setuptools import setup, find_packages +from setuptools import find_packages, setup -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only -setup(name='psyplot_test', - version='1.0.0', - license="GPLv2", - packages=find_packages(exclude=['docs', 'tests*', 'examples']), - entry_points={'psyplot': ['plugin=psyplot_test.plugin', - 'patches=psyplot_test.plugin:patches']}, - zip_safe=False) +setup( + name="psyplot_test", + version="1.0.0", + license="GPLv2", + packages=find_packages(exclude=["docs", "tests*", "examples"]), + entry_points={ + "psyplot": [ + "plugin=psyplot_test.plugin", + "patches=psyplot_test.plugin:patches", + ] + }, + zip_safe=False, +) diff --git a/tests/test_plugin_template.py b/tests/test_plugin_template.py deleted file mode 100644 index 4425d05..0000000 --- a/tests/test_plugin_template.py +++ /dev/null @@ -1,46 +0,0 @@ -"""Test script for the psyplot.plugin_template module.""" - -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. -# -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . - -import _base_testing as bt -import os.path as osp -import unittest -import tempfile -import psyplot.plugin_template as pp - - -class TestPluginTemplate(unittest.TestCase): - """Test case for the psyplot.plugin_template module""" - - def test_main(self): - tempdir = tempfile.mkdtemp() - target = osp.join(tempdir, 'test-plugin') - pp.main([target]) - self.assertTrue(osp.exists(target), msg=target + ' is missing!') - setup_script = osp.join(target, 'setup.py') - self.assertTrue(osp.exists(setup_script), - msg=setup_script + ' is missing!') - plugin_file = osp.join(target, 'test_plugin', 'plugin.py') - self.assertTrue(osp.exists(plugin_file), - msg=plugin_file + ' is missing!') diff --git a/tests/test_project.py b/tests/test_project.py index c77d536..ec6ed21 100644 --- a/tests/test_project.py +++ b/tests/test_project.py @@ -1,48 +1,35 @@ """Test module of the :mod:`psyplot.project` module.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only import os import os.path as osp import shutil -import six -import pytest import unittest -import yaml from itertools import chain + import _base_testing as bt +import matplotlib.pyplot as plt +import pytest +import six import test_data as td import test_plotter as tp import xarray as xr +import yaml + import psyplot.data as psyd import psyplot.plotter as psyp import psyplot.project as psy -import matplotlib.pyplot as plt from psyplot.config.rcsetup import get_configdir try: from cdo import Cdo + Cdo() except Exception: with_cdo = False @@ -69,66 +56,80 @@ def get_row_num(ax): @pytest.fixture def project(): try: - psy.register_plotter('test_plotter', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) except ValueError: pass yield psy.Project() for identifier in list(psy.registered_plotters): psy.unregister_plotter(identifier) + @pytest.mark.parametrize( - "preset,path", [("test", osp.join(get_configdir(), 'presets', 'test.yml')), - ("test.yml", osp.join(get_configdir(), 'presets', - 'test.yml')), - ("test.yml", "test.yml")]) + "preset,path", + [ + ("test", osp.join(get_configdir(), "presets", "test.yml")), + ("test.yml", osp.join(get_configdir(), "presets", "test.yml")), + ("test.yml", "test.yml"), + ], +) def test_load_preset(project, preset, path): if osp.dirname(path): os.makedirs(osp.dirname(path), exist_ok=True) - with open(path, 'w') as f: + with open(path, "w") as f: yaml.dump({"fmt1": "test", "fmt2": "this should be ignored"}, f) try: - sp = project.plot.test_plotter(xr.Dataset({"x": (('a'), [1])})) + sp = project.plot.test_plotter(xr.Dataset({"x": (("a"), [1])})) sp.load_preset(preset) plotter = sp.plotters[0] - assert plotter.fmt1.value == 'test' + assert plotter.fmt1.value == "test" finally: os.remove(path) def test_safe_load_preset(project, tmpdir): import matplotlib.pyplot as plt - preset = osp.join(tmpdir, 'test.yml') - with open(preset, 'w') as f: - yaml.dump({'cmap': plt.get_cmap('Reds')}, f) + + preset = osp.join(tmpdir, "test.yml") + with open(preset, "w") as f: + yaml.dump({"cmap": plt.get_cmap("Reds")}, f) with pytest.raises(yaml.constructor.ConstructorError): project.load_preset(preset) with psy.rcParams.catch(): - psy.rcParams['presets.trusted'].append(preset) + psy.rcParams["presets.trusted"].append(preset) project.load_preset(preset) def test_extract_preset(project): - preset = {"fmt1": "test1", "test_plotter": {"fmt2": "test2"}, - "not_existent": 1} + preset = { + "fmt1": "test1", + "test_plotter": {"fmt2": "test2"}, + "not_existent": 1, + } fmts = project.extract_fmts_from_preset(preset, "test_plotter") assert fmts == {"fmt1": "test1", "fmt2": "test2"} def test_save_preset(project): - sp = project.plot.test_plotter(xr.Dataset({"x": (('a'), [1])}), - name=['x', 'x']) + sp = project.plot.test_plotter( + xr.Dataset({"x": (("a"), [1])}), name=["x", "x"] + ) assert sp.save_preset() == {} - assert sp.save_preset(include_defaults=True)['fmt1'] == \ - sp.plotters[0]['fmt1'] + assert ( + sp.save_preset(include_defaults=True)["fmt1"] == sp.plotters[0]["fmt1"] + ) - sp[1].psy.update(fmt1='changed') + sp[1].psy.update(fmt1="changed") assert sp.save_preset() == {} - sp[0].psy.update(fmt1='changed') - assert sp.save_preset() == {'fmt1': 'changed'} + sp[0].psy.update(fmt1="changed") + assert sp.save_preset() == {"fmt1": "changed"} class TestProject(td.TestArrayList): @@ -139,15 +140,15 @@ class TestProject(td.TestArrayList): def setUp(self): for identifier in list(psy.registered_plotters): psy.unregister_plotter(identifier) - psy.close('all') - plt.close('all') + psy.close("all") + plt.close("all") self._created_files = set() def tearDown(self): for identifier in list(psy.registered_plotters): psy.unregister_plotter(identifier) - psy.close('all') - plt.close('all') + psy.close("all") + plt.close("all") tp.results.clear() if remove_temp_files: for f in self._created_files: @@ -162,11 +163,15 @@ def tearDown(self): def test_with(self): """Test __enter__ and __exit__ methods""" - psy.register_plotter('test_plotter', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) self.assertFalse(psy.gcp(True)) self.assertFalse(psy.gcp()) - with psy.plot.test_plotter(bt.get_file('test-t2m-u-v.nc')) as sp: + with psy.plot.test_plotter(bt.get_file("test-t2m-u-v.nc")): self.assertTrue(psy.gcp(True)) self.assertTrue(psy.gcp()) @@ -175,27 +180,38 @@ def test_with(self): def test_save_and_load_01_simple(self): """Test the saving and loading of a Project""" - psy.register_plotter('test_plotter', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - plt.close('all') - sp = psy.plot.test_plotter(ds, name=['t2m', 'u'], x=0, y=4, - ax=(2, 2, 1), fmt1='test') + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + plt.close("all") + sp = psy.plot.test_plotter( + ds, name=["t2m", "u"], x=0, y=4, ax=(2, 2, 1), fmt1="test" + ) self.assertEqual(len(sp), 2) self.assertEqual(sp[0].psy.ax.get_figure().number, 1) self.assertEqual(get_row_num(sp[0].psy.ax), 0) self.assertEqual(get_col_num(sp[0].psy.ax), 0) - self.assertEqual(sp[0].psy.ax.numCols, 2) - self.assertEqual(sp[0].psy.ax.numRows, 2) + + gs = sp[0].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 2) self.assertEqual(sp[1].psy.ax.get_figure().number, 2) self.assertEqual(get_row_num(sp[1].psy.ax), 0) self.assertEqual(get_col_num(sp[1].psy.ax), 0) - self.assertEqual(sp[1].psy.ax.numCols, 2) - self.assertEqual(sp[1].psy.ax.numRows, 2) + + gs = sp[1].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 2) arr_names = sp.arr_names - self.assertEqual(tp.results[arr_names[0] + '.fmt1'], 'test') - self.assertEqual(tp.results[arr_names[1] + '.fmt1'], 'test') - fname = 'test.pkl' + self.assertEqual(tp.results[arr_names[0] + ".fmt1"], "test") + self.assertEqual(tp.results[arr_names[1] + ".fmt1"], "test") + fname = "test.pkl" self._created_files.add(fname) sp.save_project(fname) psy.close() @@ -205,38 +221,54 @@ def test_save_and_load_01_simple(self): self.assertEqual(sp[0].psy.ax.get_figure().number, 1) self.assertEqual(get_row_num(sp[0].psy.ax), 0) self.assertEqual(get_col_num(sp[0].psy.ax), 0) - self.assertEqual(sp[0].psy.ax.numCols, 2) - self.assertEqual(sp[0].psy.ax.numRows, 2) + + gs = sp[0].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 2) self.assertEqual(sp[1].psy.ax.get_figure().number, 2) self.assertEqual(get_row_num(sp[1].psy.ax), 0) self.assertEqual(get_col_num(sp[1].psy.ax), 0) - self.assertEqual(sp[1].psy.ax.numCols, 2) - self.assertEqual(sp[1].psy.ax.numRows, 2) + + gs = sp[1].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 2) def test_save_and_load_02_alternative_axes(self): - """Test the saving and loading of a Project providing alternative axes - """ - psy.register_plotter('test_plotter', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - plt.close('all') - sp = psy.plot.test_plotter(ds, name=['t2m', 'u'], x=0, y=4, - ax=(2, 2, 1), fmt1='test') + """Test the saving and loading of a Project providing alternative axes""" + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + plt.close("all") + sp = psy.plot.test_plotter( + ds, name=["t2m", "u"], x=0, y=4, ax=(2, 2, 1), fmt1="test" + ) self.assertEqual(len(sp), 2) self.assertEqual(sp[0].psy.ax.get_figure().number, 1) self.assertEqual(get_row_num(sp[0].psy.ax), 0) self.assertEqual(get_col_num(sp[0].psy.ax), 0) - self.assertEqual(sp[0].psy.ax.numCols, 2) - self.assertEqual(sp[0].psy.ax.numRows, 2) + + gs = sp[0].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 2) self.assertEqual(sp[1].psy.ax.get_figure().number, 2) self.assertEqual(get_row_num(sp[1].psy.ax), 0) self.assertEqual(get_col_num(sp[1].psy.ax), 0) - self.assertEqual(sp[1].psy.ax.numCols, 2) - self.assertEqual(sp[1].psy.ax.numRows, 2) + + gs = sp[1].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 2) arr_names = sp.arr_names - self.assertEqual(tp.results[arr_names[0] + '.fmt1'], 'test') - self.assertEqual(tp.results[arr_names[1] + '.fmt1'], 'test') - fname = 'test.pkl' + self.assertEqual(tp.results[arr_names[0] + ".fmt1"], "test") + self.assertEqual(tp.results[arr_names[1] + ".fmt1"], "test") + fname = "test.pkl" self._created_files.add(fname) sp.save_project(fname) psy.close() @@ -247,206 +279,294 @@ def test_save_and_load_02_alternative_axes(self): self.assertEqual(sp[0].psy.ax.get_figure().number, 1) self.assertEqual(get_row_num(sp[0].psy.ax), 0) self.assertEqual(get_col_num(sp[0].psy.ax), 0) - self.assertEqual(sp[0].psy.ax.numCols, 2) - self.assertEqual(sp[0].psy.ax.numRows, 1) + + gs = sp[0].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 1) self.assertEqual(sp[1].psy.ax.get_figure().number, 1) self.assertEqual(get_row_num(sp[1].psy.ax), 0) self.assertEqual(get_col_num(sp[1].psy.ax), 1) - self.assertEqual(sp[1].psy.ax.numCols, 2) - self.assertEqual(sp[1].psy.ax.numRows, 1) + + gs = sp[1].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 1) def test_save_and_load_03_alternative_ds(self): - """Test the saving and loading of a Project providing alternative axes - """ - psy.register_plotter('test_plotter', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - plt.close('all') - sp = psy.plot.test_plotter(ds, name=['t2m', 'u'], x=0, y=4, - ax=(2, 2, 1), fmt1='test') + """Test the saving and loading of a Project providing alternative axes""" + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + plt.close("all") + sp = psy.plot.test_plotter( + ds, name=["t2m", "u"], x=0, y=4, ax=(2, 2, 1), fmt1="test" + ) self.assertEqual(len(sp), 2) self.assertEqual(sp[0].psy.ax.get_figure().number, 1) self.assertEqual(get_row_num(sp[0].psy.ax), 0) self.assertEqual(get_col_num(sp[0].psy.ax), 0) - self.assertEqual(sp[0].psy.ax.numCols, 2) - self.assertEqual(sp[0].psy.ax.numRows, 2) + + gs = sp[0].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 2) self.assertEqual(sp[1].psy.ax.get_figure().number, 2) self.assertEqual(get_row_num(sp[1].psy.ax), 0) self.assertEqual(get_col_num(sp[1].psy.ax), 0) - self.assertEqual(sp[1].psy.ax.numCols, 2) - self.assertEqual(sp[1].psy.ax.numRows, 2) + + gs = sp[1].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 2) arr_names = sp.arr_names - self.assertEqual(tp.results[arr_names[0] + '.fmt1'], 'test') - self.assertEqual(tp.results[arr_names[1] + '.fmt1'], 'test') - fname = 'test.pkl' + self.assertEqual(tp.results[arr_names[0] + ".fmt1"], "test") + self.assertEqual(tp.results[arr_names[1] + ".fmt1"], "test") + fname = "test.pkl" self._created_files.add(fname) sp.save_project(fname) psy.close() tp.results.clear() fig, axes = plt.subplots(1, 2) - ds = psy.open_dataset(bt.get_file('circumpolar_test.nc')) + ds = psy.open_dataset(bt.get_file("circumpolar_test.nc")) sp = psy.Project.load_project(fname, datasets=[ds], new_fig=False) self.assertEqual(len(sp), 2) self.assertEqual(sp[0].psy.ax.get_figure().number, 1) self.assertEqual(get_row_num(sp[0].psy.ax), 0) self.assertEqual(get_col_num(sp[0].psy.ax), 0) - self.assertEqual(sp[0].psy.ax.numCols, 2) - self.assertEqual(sp[0].psy.ax.numRows, 2) + + gs = sp[0].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 2) self.assertEqual(sp[1].psy.ax.get_figure().number, 2) self.assertEqual(get_row_num(sp[1].psy.ax), 0) self.assertEqual(get_col_num(sp[1].psy.ax), 0) - self.assertEqual(sp[1].psy.ax.numCols, 2) - self.assertEqual(sp[1].psy.ax.numRows, 2) + + gs = sp[1].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 2) self.assertIs(sp[0].psy.base, ds) self.assertIs(sp[1].psy.base, ds) def test_save_and_load_04_alternative_fname(self): - """Test the saving and loading of a Project providing alternative axes - """ - psy.register_plotter('test_plotter', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - plt.close('all') - sp = psy.plot.test_plotter(ds, name=['t2m', 'u'], x=0, y=4, - ax=(2, 2, 1), fmt1='test') + """Test the saving and loading of a Project providing alternative axes""" + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + plt.close("all") + sp = psy.plot.test_plotter( + ds, name=["t2m", "u"], x=0, y=4, ax=(2, 2, 1), fmt1="test" + ) self.assertEqual(len(sp), 2) self.assertEqual(sp[0].psy.ax.get_figure().number, 1) self.assertEqual(get_row_num(sp[0].psy.ax), 0) self.assertEqual(get_col_num(sp[0].psy.ax), 0) - self.assertEqual(sp[0].psy.ax.numCols, 2) - self.assertEqual(sp[0].psy.ax.numRows, 2) + + gs = sp[0].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 2) self.assertEqual(sp[1].psy.ax.get_figure().number, 2) self.assertEqual(get_row_num(sp[1].psy.ax), 0) self.assertEqual(get_col_num(sp[1].psy.ax), 0) - self.assertEqual(sp[1].psy.ax.numCols, 2) - self.assertEqual(sp[1].psy.ax.numRows, 2) + + gs = sp[1].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 2) arr_names = sp.arr_names - self.assertEqual(tp.results[arr_names[0] + '.fmt1'], 'test') - self.assertEqual(tp.results[arr_names[1] + '.fmt1'], 'test') - fname = 'test.pkl' + self.assertEqual(tp.results[arr_names[0] + ".fmt1"], "test") + self.assertEqual(tp.results[arr_names[1] + ".fmt1"], "test") + fname = "test.pkl" self._created_files.add(fname) sp.save_project(fname) psy.close() tp.results.clear() fig, axes = plt.subplots(1, 2) sp = psy.Project.load_project( - fname, alternative_paths=[bt.get_file('circumpolar_test.nc')], - new_fig=False) + fname, + alternative_paths=[bt.get_file("circumpolar_test.nc")], + new_fig=False, + ) self.assertEqual(len(sp), 2) self.assertEqual(sp[0].psy.ax.get_figure().number, 1) self.assertEqual(get_row_num(sp[0].psy.ax), 0) self.assertEqual(get_col_num(sp[0].psy.ax), 0) - self.assertEqual(sp[0].psy.ax.numCols, 2) - self.assertEqual(sp[0].psy.ax.numRows, 2) + + gs = sp[0].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 2) self.assertEqual(sp[1].psy.ax.get_figure().number, 2) self.assertEqual(get_row_num(sp[1].psy.ax), 0) self.assertEqual(get_col_num(sp[1].psy.ax), 0) - self.assertEqual(sp[1].psy.ax.numCols, 2) - self.assertEqual(sp[1].psy.ax.numRows, 2) - self.assertEqual(psyd.get_filename_ds(sp[0].psy.base)[0], - bt.get_file('circumpolar_test.nc')) - self.assertEqual(psyd.get_filename_ds(sp[1].psy.base)[0], - bt.get_file('circumpolar_test.nc')) + + gs = sp[1].psy.ax.get_gridspec() + + self.assertEqual(gs.ncols, 2) + self.assertEqual(gs.nrows, 2) + self.assertEqual( + psyd.get_filename_ds(sp[0].psy.base)[0], + bt.get_file("circumpolar_test.nc"), + ) + self.assertEqual( + psyd.get_filename_ds(sp[1].psy.base)[0], + bt.get_file("circumpolar_test.nc"), + ) def test_save_and_load_05_pack(self): import tempfile - psy.register_plotter('test_plotter', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') - tempdir1 = tempfile.mkdtemp(prefix='psyplot_test_') - tempdir2 = tempfile.mkdtemp(prefix='psyplot_test_') - tempdir3 = tempfile.mkdtemp(prefix='psyplot_test_') - outdir = tempfile.mkdtemp(prefix='psyplot_test_') + + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) + tempdir1 = tempfile.mkdtemp(prefix="psyplot_test_") + tempdir2 = tempfile.mkdtemp(prefix="psyplot_test_") + tempdir3 = tempfile.mkdtemp(prefix="psyplot_test_") + outdir = tempfile.mkdtemp(prefix="psyplot_test_") self._created_files.update([tempdir1, tempdir2, tempdir3, outdir]) # first test file - shutil.copyfile(bt.get_file('test-t2m-u-v.nc'), - osp.join(tempdir1, 'test-t2m-u-v.nc')) - psy.plot.test_plotter(osp.join(tempdir1, 'test-t2m-u-v.nc'), - name='t2m', t=[1, 2]) + shutil.copyfile( + bt.get_file("test-t2m-u-v.nc"), + osp.join(tempdir1, "test-t2m-u-v.nc"), + ) + psy.plot.test_plotter( + osp.join(tempdir1, "test-t2m-u-v.nc"), name="t2m", t=[1, 2] + ) # second test file - shutil.copyfile(bt.get_file('test-t2m-u-v.nc'), - osp.join(tempdir2, 'test-t2m-u-v.nc')) - psy.plot.test_plotter(osp.join(tempdir2, 'test-t2m-u-v.nc'), - name='t2m', t=[3, 4]) + shutil.copyfile( + bt.get_file("test-t2m-u-v.nc"), + osp.join(tempdir2, "test-t2m-u-v.nc"), + ) + psy.plot.test_plotter( + osp.join(tempdir2, "test-t2m-u-v.nc"), name="t2m", t=[3, 4] + ) # third test file - shutil.copyfile(bt.get_file('test-t2m-u-v.nc'), - osp.join(tempdir3, 'test-t2m-u-v.nc')) - psy.plot.test_plotter(osp.join(tempdir3, 'test-t2m-u-v.nc'), - name='t2m', t=[3, 4]) + shutil.copyfile( + bt.get_file("test-t2m-u-v.nc"), + osp.join(tempdir3, "test-t2m-u-v.nc"), + ) + psy.plot.test_plotter( + osp.join(tempdir3, "test-t2m-u-v.nc"), name="t2m", t=[3, 4] + ) # fourth test file with different name - psy.plot.test_plotter(bt.get_file('circumpolar_test.nc'), name='t2m', - t=[0, 1]) + psy.plot.test_plotter( + bt.get_file("circumpolar_test.nc"), name="t2m", t=[0, 1] + ) mp = psy.gcp(True) - mp.save_project(osp.join(outdir, 'test.pkl'), pack=True) - files = {'test-t2m-u-v.nc', 'test-t2m-u-v-1.nc', - 'test-t2m-u-v-2.nc', 'test.pkl', 'circumpolar_test.nc'} + mp.save_project(osp.join(outdir, "test.pkl"), pack=True) + files = { + "test-t2m-u-v.nc", + "test-t2m-u-v-1.nc", + "test-t2m-u-v-2.nc", + "test.pkl", + "circumpolar_test.nc", + } self.assertEqual(set(os.listdir(outdir)), files) psy.close(mp) # move the directory to check whether it is still working - outdir2 = tempfile.mkdtemp(prefix='psyplot_test_') + outdir2 = tempfile.mkdtemp(prefix="psyplot_test_") self._created_files.add(outdir2) for f in files: shutil.move(osp.join(outdir, f), osp.join(outdir2, f)) - mp = psy.Project.load_project(osp.join(outdir2, 'test.pkl'), main=True, - ) + mp = psy.Project.load_project( + osp.join(outdir2, "test.pkl"), + main=True, + ) self.assertEqual(len(mp), 8, msg=mp) - paths = {osp.join(outdir2, 'test-t2m-u-v.nc'), - osp.join(outdir2, 'test-t2m-u-v-1.nc'), - osp.join(outdir2, 'test-t2m-u-v-2.nc')} + paths = { + osp.join(outdir2, "test-t2m-u-v.nc"), + osp.join(outdir2, "test-t2m-u-v-1.nc"), + osp.join(outdir2, "test-t2m-u-v-2.nc"), + } found = set() for i in range(6): found.add(psyd.get_filename_ds(mp[i].psy.base)[0]) - self.assertFalse(paths - found, - msg='expected %s\n%s\nfound %s' % (paths, '-' * 80, - found)) - self.assertEqual(psyd.get_filename_ds(mp[6].psy.base)[0], - osp.join(outdir2, 'circumpolar_test.nc')) - self.assertEqual(psyd.get_filename_ds(mp[7].psy.base)[0], - osp.join(outdir2, 'circumpolar_test.nc')) + self.assertFalse( + paths - found, + msg="expected %s\n%s\nfound %s" % (paths, "-" * 80, found), + ) + self.assertEqual( + psyd.get_filename_ds(mp[6].psy.base)[0], + osp.join(outdir2, "circumpolar_test.nc"), + ) + self.assertEqual( + psyd.get_filename_ds(mp[7].psy.base)[0], + osp.join(outdir2, "circumpolar_test.nc"), + ) def test_save_and_load_06_post_fmt(self): """Test whether the :attr:`psyplot.plotter.Plotter.post` fmt works""" - psy.register_plotter('test_plotter', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - plt.close('all') - sp = psy.plot.test_plotter(ds, name=['t2m', 'u'], x=0, y=4, - ax=(2, 2, 1), fmt1='test', - post='self.ax.set_title("test")') - self.assertEqual(sp.plotters[0].ax.get_title(), 'test') - fname = 'test.pkl' + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + plt.close("all") + sp = psy.plot.test_plotter( + ds, + name=["t2m", "u"], + x=0, + y=4, + ax=(2, 2, 1), + fmt1="test", + post='self.ax.set_title("test")', + ) + self.assertEqual(sp.plotters[0].ax.get_title(), "test") + fname = "test.pkl" self._created_files.add(fname) sp.save_project(fname) - psy.close('all') + psy.close("all") # test without enabled post sp = psy.Project.load_project(fname) - self.assertEqual(sp.plotters[0].ax.get_title(), '') - psy.close('all') + self.assertEqual(sp.plotters[0].ax.get_title(), "") + psy.close("all") # test with enabled post sp = psy.Project.load_project(fname, enable_post=True) - self.assertEqual(sp.plotters[0].ax.get_title(), 'test') + self.assertEqual(sp.plotters[0].ax.get_title(), "test") def test_save_and_load_07_sharedx(self): """Test whether shared x- and y-axis are restored correctly""" - psy.register_plotter('test_plotter', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - plt.close('all') + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + plt.close("all") fig, axes = plt.subplots(1, 3, sharex=True) - sp = psy.plot.test_plotter(ds, name=['t2m', 'u', 'v'], x=0, y=4, - ax=axes) + sp = psy.plot.test_plotter( + ds, name=["t2m", "u", "v"], x=0, y=4, ax=axes + ) axes[0].set_xlim(5, 10) self.assertEqual(list(axes[1].get_xlim()), [5, 10]) # save the project - fname = 'test.pkl' + fname = "test.pkl" self._created_files.add(fname) sp.save_project(fname) - psy.close('all') + psy.close("all") # load the project sp = psy.Project.load_project(fname) @@ -456,7 +576,7 @@ def test_save_and_load_07_sharedx(self): # now we test, if it still works, if we remove the source axes names2use = sp.arr_names[1:] - psy.close('all') + psy.close("all") sp = psy.Project.load_project(fname, only=names2use) self.assertEqual(len(sp.axes), 2, msg=sp.axes) sp[0].psy.ax.set_xlim(10, 15) @@ -464,20 +584,25 @@ def test_save_and_load_07_sharedx(self): def test_save_and_load_08_sharedy(self): """Test whether shared x- and y-axis are restored correctly""" - psy.register_plotter('test_plotter', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - plt.close('all') + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + plt.close("all") fig, axes = plt.subplots(1, 3, sharey=True) - sp = psy.plot.test_plotter(ds, name=['t2m', 'u', 'v'], x=0, y=4, - ax=axes) + sp = psy.plot.test_plotter( + ds, name=["t2m", "u", "v"], x=0, y=4, ax=axes + ) axes[0].set_ylim(5, 10) self.assertEqual(list(axes[1].get_ylim()), [5, 10]) # save the project - fname = 'test.pkl' + fname = "test.pkl" self._created_files.add(fname) sp.save_project(fname) - psy.close('all') + psy.close("all") # load the project sp = psy.Project.load_project(fname) @@ -487,7 +612,7 @@ def test_save_and_load_08_sharedy(self): # now we test, if it still works, if we remove the source axes names2use = sp.arr_names[1:] - psy.close('all') + psy.close("all") sp = psy.Project.load_project(fname, only=names2use) self.assertEqual(len(sp.axes), 2, msg=sp.axes) sp[0].psy.ax.set_ylim(10, 15) @@ -495,6 +620,7 @@ def test_save_and_load_08_sharedy(self): def test_versions_and_patch(self): import warnings + try: import psyplot_test.plugin as test_plugin except ImportError: @@ -502,27 +628,35 @@ def test_versions_and_patch(self): return rc = psyd.rcParams with warnings.catch_warnings(): - warnings.simplefilter('ignore') + warnings.simplefilter("ignore") rc.load_plugins() psy._versions.clear() - psy.register_plotter('test_plotter', - **rc['project.plotters']['test_plotter']) - psy.register_plotter('test_plotter2', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') - - psy.plot.test_plotter(bt.get_file('test-t2m-u-v.nc'), name='t2m', - t=[1, 2]) - psy.plot.test_plotter2(bt.get_file('test-t2m-u-v.nc'), name='t2m', - t=[1, 2]) + psy.register_plotter( + "test_plotter", **rc["project.plotters"]["test_plotter"] + ) + psy.register_plotter( + "test_plotter2", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) + + psy.plot.test_plotter( + bt.get_file("test-t2m-u-v.nc"), name="t2m", t=[1, 2] + ) + psy.plot.test_plotter2( + bt.get_file("test-t2m-u-v.nc"), name="t2m", t=[1, 2] + ) mp = psy.gcp(True) self.assertEqual(len(mp), 4, msg=mp) d = mp.save_project() - self.assertIn('versions', d) - self.assertEqual(len(d['versions']), 2, msg=d['versions']) - self.assertIn('psyplot', d['versions']) - self.assertIn('psyplot_test.plugin', d['versions']) - self.assertEqual(d['versions']['psyplot_test.plugin']['version'], - '1.0.0') + self.assertIn("versions", d) + self.assertEqual(len(d["versions"]), 2, msg=d["versions"]) + self.assertIn("psyplot", d["versions"]) + self.assertIn("psyplot_test.plugin", d["versions"]) + self.assertEqual( + d["versions"]["psyplot_test.plugin"]["version"], "1.0.0" + ) # test the patch self.assertEqual(test_plugin.patch_check, []) @@ -531,218 +665,347 @@ def test_versions_and_patch(self): mp.close(True, True, True) mp = psy.Project.load_project(d) self.assertEqual(len(test_plugin.patch_check), 2) - self.assertIs(test_plugin.patch_check[0]['plotter'], - d['arrays']['arr0']['plotter']) - self.assertIs(test_plugin.patch_check[1]['plotter'], - d['arrays']['arr1']['plotter']) - self.assertIs(test_plugin.patch_check[0]['versions'], - d['versions']) - self.assertIs(test_plugin.patch_check[1]['versions'], - d['versions']) + self.assertIs( + test_plugin.patch_check[0]["plotter"], + d["arrays"]["arr0"]["plotter"], + ) + self.assertIs( + test_plugin.patch_check[1]["plotter"], + d["arrays"]["arr1"]["plotter"], + ) + self.assertIs( + test_plugin.patch_check[0]["versions"], d["versions"] + ) + self.assertIs( + test_plugin.patch_check[1]["versions"], d["versions"] + ) finally: test_plugin.checking_patch = False def test_keys(self): """Test the :meth:`psyplot.project.Project.keys` method""" import test_plotter as tp + import psyplot.plotter as psyp - psy.register_plotter('test_plotter', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') + + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) class TestPlotter2(tp.TestPlotter): fmt2 = None - psy.register_plotter('test_plotter2', module='something', - plotter_name='anyway', plotter_cls=TestPlotter2) + psy.register_plotter( + "test_plotter2", + module="something", + plotter_name="anyway", + plotter_cls=TestPlotter2, + ) variables, coords = self._from_dataset_test_variables ds = xr.Dataset(variables, coords) - sp1 = psy.plot.test_plotter(ds, name='v0') + sp1 = psy.plot.test_plotter(ds, name="v0") # add a second project without a fmt2 formatoption - sp2 = psy.plot.test_plotter2(ds, name='v1') + sp2 = psy.plot.test_plotter2(ds, name="v1") mp = sp1 + sp2 - self.assertEqual(sp1.keys(['fmt1', 'fmt2', 'fmt3'], func=str), - '+------+------+------+\n' - '| fmt1 | fmt2 | fmt3 |\n' - '+------+------+------+') - self.assertEqual(mp.keys(['fmt1', 'fmt2', 'fmt3'], func=str), - '+------+------+\n' - '| fmt1 | fmt3 |\n' - '+------+------+') - title = psyp.groups['labels'] - self.assertEqual(sp1.keys(['fmt1', 'fmt2', 'fmt3'], func=str, - grouped=True), - '*' * len(title) + '\n' + - title + '\n' + - '*' * len(title) + '\n' - '+------+------+\n' - '| fmt1 | fmt2 |\n' - '+------+------+\n' - '\n' - '*********\n' - 'something\n' - '*********\n' - '+------+\n' - '| fmt3 |\n' - '+------+') - self.assertEqual(mp.keys(['fmt1', 'fmt2', 'fmt3'], func=str, - grouped=True), - '*' * len(title) + '\n' + - title + '\n' + - '*' * len(title) + '\n' - '+------+\n' - '| fmt1 |\n' - '+------+\n' - '\n' - '*********\n' - 'something\n' - '*********\n' - '+------+\n' - '| fmt3 |\n' - '+------+') - self.assertEqual(sp1.keys(['fmt1', 'something'], func=str), - '+------+------+\n' - '| fmt1 | fmt3 |\n' - '+------+------+') + self.assertEqual( + sp1.keys(["fmt1", "fmt2", "fmt3"], func=str), + "+------+------+------+\n" + "| fmt1 | fmt2 | fmt3 |\n" + "+------+------+------+", + ) + self.assertEqual( + mp.keys(["fmt1", "fmt2", "fmt3"], func=str), + "+------+------+\n" "| fmt1 | fmt3 |\n" "+------+------+", + ) + title = psyp.groups["labels"] + self.assertEqual( + sp1.keys(["fmt1", "fmt2", "fmt3"], func=str, grouped=True), + "*" * len(title) + "\n" + title + "\n" + "*" * len(title) + "\n" + "+------+------+\n" + "| fmt1 | fmt2 |\n" + "+------+------+\n" + "\n" + "*********\n" + "something\n" + "*********\n" + "+------+\n" + "| fmt3 |\n" + "+------+", + ) + self.assertEqual( + mp.keys(["fmt1", "fmt2", "fmt3"], func=str, grouped=True), + "*" * len(title) + "\n" + title + "\n" + "*" * len(title) + "\n" + "+------+\n" + "| fmt1 |\n" + "+------+\n" + "\n" + "*********\n" + "something\n" + "*********\n" + "+------+\n" + "| fmt3 |\n" + "+------+", + ) + self.assertEqual( + sp1.keys(["fmt1", "something"], func=str), + "+------+------+\n" "| fmt1 | fmt3 |\n" "+------+------+", + ) if six.PY3: - with self.assertWarnsRegex(UserWarning, - '(?i)unknown formatoption keyword'): + with self.assertWarnsRegex( + UserWarning, "(?i)unknown formatoption keyword" + ): self.assertEqual( - sp1.keys(['fmt1', 'wrong', 'something'], func=str), - '+------+------+\n' - '| fmt1 | fmt3 |\n' - '+------+------+') + sp1.keys(["fmt1", "wrong", "something"], func=str), + "+------+------+\n" "| fmt1 | fmt3 |\n" "+------+------+", + ) def test_docs(self): """Test the :meth:`psyplot.project.Project.docs` method""" import test_plotter as tp + import psyplot.plotter as psyp - psy.register_plotter('test_plotter', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') + + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) class TestPlotter2(tp.TestPlotter): fmt2 = None - psy.register_plotter('test_plotter2', module='something', - plotter_name='anyway', plotter_cls=TestPlotter2) + psy.register_plotter( + "test_plotter2", + module="something", + plotter_name="anyway", + plotter_cls=TestPlotter2, + ) variables, coords = self._from_dataset_test_variables ds = xr.Dataset(variables, coords) - sp1 = psy.plot.test_plotter(ds, name='v0') + sp1 = psy.plot.test_plotter(ds, name="v0") # add a second project without a fmt2 formatoption - sp2 = psy.plot.test_plotter2(ds, name='v1') + sp2 = psy.plot.test_plotter2(ds, name="v1") mp = sp1 + sp2 - self.assertEqual(sp1.docs(func=str), '\n'.join([ - 'fmt1', '====', tp.SimpleFmt.__doc__, '', - 'fmt2', '====', tp.SimpleFmt2.__doc__, '', - 'fmt3', '====', tp.SimpleFmt3.__doc__, '', - 'post', '====', psyp.PostProcessing.__doc__, '', - 'post_timing', '===========', psyp.PostTiming.__doc__, ''])) + self.assertEqual( + sp1.docs(func=str), + "\n".join( + [ + "fmt1", + "====", + tp.SimpleFmt.__doc__, + "", + "fmt2", + "====", + tp.SimpleFmt2.__doc__, + "", + "fmt3", + "====", + tp.SimpleFmt3.__doc__, + "", + "post", + "====", + psyp.PostProcessing.__doc__, + "", + "post_timing", + "===========", + psyp.PostTiming.__doc__, + "", + ] + ), + ) # test summed project - self.assertEqual(mp.docs(func=str), '\n'.join([ - 'fmt1', '====', tp.SimpleFmt.__doc__, '', - 'fmt3', '====', tp.SimpleFmt3.__doc__, '', - 'post', '====', psyp.PostProcessing.__doc__, '', - 'post_timing', '===========', psyp.PostTiming.__doc__, ''])) - title = psyp.groups['labels'] self.assertEqual( - sp1.docs(['fmt1', 'fmt2', 'fmt3'], func=str, grouped=True), - '\n'.join([ - '*' * len(title), - title, - '*' * len(title), - 'fmt1', '====', tp.SimpleFmt.__doc__, '', - 'fmt2', '====', tp.SimpleFmt2.__doc__, '', '', - '*********', - 'something', - '*********', - 'fmt3', '====', tp.SimpleFmt3.__doc__])) + mp.docs(func=str), + "\n".join( + [ + "fmt1", + "====", + tp.SimpleFmt.__doc__, + "", + "fmt3", + "====", + tp.SimpleFmt3.__doc__, + "", + "post", + "====", + psyp.PostProcessing.__doc__, + "", + "post_timing", + "===========", + psyp.PostTiming.__doc__, + "", + ] + ), + ) + title = psyp.groups["labels"] + self.assertEqual( + sp1.docs(["fmt1", "fmt2", "fmt3"], func=str, grouped=True), + "\n".join( + [ + "*" * len(title), + title, + "*" * len(title), + "fmt1", + "====", + tp.SimpleFmt.__doc__, + "", + "fmt2", + "====", + tp.SimpleFmt2.__doc__, + "", + "", + "*********", + "something", + "*********", + "fmt3", + "====", + tp.SimpleFmt3.__doc__, + ] + ), + ) # test summed project self.assertEqual( - mp.docs(['fmt1', 'fmt3'], func=str, grouped=True), - '\n'.join([ - '*' * len(title), - title, - '*' * len(title), - 'fmt1', '====', tp.SimpleFmt.__doc__, '', '', - '*********', - 'something', - '*********', - 'fmt3', '====', tp.SimpleFmt3.__doc__])) + mp.docs(["fmt1", "fmt3"], func=str, grouped=True), + "\n".join( + [ + "*" * len(title), + title, + "*" * len(title), + "fmt1", + "====", + tp.SimpleFmt.__doc__, + "", + "", + "*********", + "something", + "*********", + "fmt3", + "====", + tp.SimpleFmt3.__doc__, + ] + ), + ) def test_summaries(self): """Test the :meth:`psyplot.project.Project.summaries` method""" import test_plotter as tp + import psyplot.plotter as psyp - psy.register_plotter('test_plotter', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') + + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) class TestPlotter2(tp.TestPlotter): fmt2 = None - psy.register_plotter('test_plotter2', module='something', - plotter_name='anyway', plotter_cls=TestPlotter2) + psy.register_plotter( + "test_plotter2", + module="something", + plotter_name="anyway", + plotter_cls=TestPlotter2, + ) variables, coords = self._from_dataset_test_variables ds = xr.Dataset(variables, coords) - sp1 = psy.plot.test_plotter(ds, name='v0') + sp1 = psy.plot.test_plotter(ds, name="v0") # add a second project without a fmt2 formatoption - sp2 = psy.plot.test_plotter2(ds, name='v1') + sp2 = psy.plot.test_plotter2(ds, name="v1") mp = sp1 + sp2 - self.assertEqual(sp1.summaries(func=str), '\n'.join([ - 'fmt1', tp.indent(tp.SimpleFmt.__doc__.splitlines()[0], ' '), - 'fmt2', tp.indent(tp.SimpleFmt2.__doc__.splitlines()[0], ' '), - 'fmt3', tp.indent(tp.SimpleFmt3.__doc__.splitlines()[0], ' '), - 'post', tp.indent(psyp.PostProcessing.__doc__.splitlines()[0], - ' '), - 'post_timing', tp.indent(psyp.PostTiming.__doc__.splitlines()[0], - ' ')])) + self.assertEqual( + sp1.summaries(func=str), + "\n".join( + [ + "fmt1", + tp.indent(tp.SimpleFmt.__doc__.splitlines()[0], " "), + "fmt2", + tp.indent(tp.SimpleFmt2.__doc__.splitlines()[0], " "), + "fmt3", + tp.indent(tp.SimpleFmt3.__doc__.splitlines()[0], " "), + "post", + tp.indent( + psyp.PostProcessing.__doc__.splitlines()[0], " " + ), + "post_timing", + tp.indent(psyp.PostTiming.__doc__.splitlines()[0], " "), + ] + ), + ) # test summed project - self.assertEqual(mp.summaries(func=str), '\n'.join([ - 'fmt1', tp.indent(tp.SimpleFmt.__doc__.splitlines()[0], ' '), - 'fmt3', tp.indent(tp.SimpleFmt3.__doc__.splitlines()[0], ' '), - 'post', tp.indent(psyp.PostProcessing.__doc__.splitlines()[0], - ' '), - 'post_timing', tp.indent(psyp.PostTiming.__doc__.splitlines()[0], - ' ')])) - title = psyp.groups['labels'] self.assertEqual( - sp1.summaries(['fmt1', 'fmt2', 'fmt3'], func=str, grouped=True), - '\n'.join([ - '*' * len(title), - title, - '*' * len(title), - 'fmt1', tp.indent( - tp.SimpleFmt.__doc__.splitlines()[0], ' '), - 'fmt2', tp.indent( - tp.SimpleFmt2.__doc__.splitlines()[0], ' '), - '', - '*********', - 'something', - '*********', - 'fmt3', tp.indent( - tp.SimpleFmt3.__doc__.splitlines()[0], ' ')] - )) + mp.summaries(func=str), + "\n".join( + [ + "fmt1", + tp.indent(tp.SimpleFmt.__doc__.splitlines()[0], " "), + "fmt3", + tp.indent(tp.SimpleFmt3.__doc__.splitlines()[0], " "), + "post", + tp.indent( + psyp.PostProcessing.__doc__.splitlines()[0], " " + ), + "post_timing", + tp.indent(psyp.PostTiming.__doc__.splitlines()[0], " "), + ] + ), + ) + title = psyp.groups["labels"] + self.assertEqual( + sp1.summaries(["fmt1", "fmt2", "fmt3"], func=str, grouped=True), + "\n".join( + [ + "*" * len(title), + title, + "*" * len(title), + "fmt1", + tp.indent(tp.SimpleFmt.__doc__.splitlines()[0], " "), + "fmt2", + tp.indent(tp.SimpleFmt2.__doc__.splitlines()[0], " "), + "", + "*********", + "something", + "*********", + "fmt3", + tp.indent(tp.SimpleFmt3.__doc__.splitlines()[0], " "), + ] + ), + ) # test summed project self.assertEqual( - mp.summaries(['fmt1', 'fmt3'], func=str, grouped=True), - '\n'.join([ - '*' * len(title), - title, - '*' * len(title), - 'fmt1', tp.indent(tp.SimpleFmt.__doc__.splitlines()[0], - ' '), - '', - '*********', - 'something', - '*********', - 'fmt3', tp.indent(tp.SimpleFmt3.__doc__.splitlines()[0], - ' ')]) - ) + mp.summaries(["fmt1", "fmt3"], func=str, grouped=True), + "\n".join( + [ + "*" * len(title), + title, + "*" * len(title), + "fmt1", + tp.indent(tp.SimpleFmt.__doc__.splitlines()[0], " "), + "", + "*********", + "something", + "*********", + "fmt3", + tp.indent(tp.SimpleFmt3.__doc__.splitlines()[0], " "), + ] + ), + ) def test_figs(self): """Test the :attr:`psyplot.project.Project.figs` attribute""" - psy.register_plotter('test_plotter', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - sp = psy.plot.test_plotter(ds, name='t2m', time=[1, 2]) + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + sp = psy.plot.test_plotter(ds, name="t2m", time=[1, 2]) self.assertEqual(sp[0].psy.ax.figure.number, 1) self.assertEqual(sp[1].psy.ax.figure.number, 2) figs = sp.figs @@ -753,10 +1016,14 @@ def test_figs(self): def test_axes(self): """Test the :attr:`psyplot.project.Project.axes` attribute""" - psy.register_plotter('test_plotter', import_plotter=True, - module='test_plotter', plotter_name='TestPlotter') - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - sp = psy.plot.test_plotter(ds, name='t2m', time=[1, 2]) + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + sp = psy.plot.test_plotter(ds, name="t2m", time=[1, 2]) self.assertIsNot(sp[0].psy.ax, sp[1].psy.ax) axes = sp.axes self.assertIn(sp[0].psy.ax, axes) @@ -766,12 +1033,13 @@ def test_axes(self): def test_close(self): """Test the :meth:`psyplot.project.Project.close` method""" - psy.register_plotter('test_plotter', module='test_plotter', - plotter_name='TestPlotter') - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - sp0 = psy.plot.test_plotter(ds, name='t2m', time=[1]) - sp1 = psy.plot.test_plotter(ds, name='t2m', time=[1, 2]) - sp2 = psy.plot.test_plotter(ds, name='t2m', time=[3, 4]) + psy.register_plotter( + "test_plotter", module="test_plotter", plotter_name="TestPlotter" + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + sp0 = psy.plot.test_plotter(ds, name="t2m", time=[1]) + sp1 = psy.plot.test_plotter(ds, name="t2m", time=[1, 2]) + sp2 = psy.plot.test_plotter(ds, name="t2m", time=[3, 4]) mp = psy.gcp(True) names0 = sp0.arr_names names1 = sp1.arr_names @@ -805,21 +1073,26 @@ def test_close(self): def test_close_global(self): """Test the :func:`psyplot.project.close` function""" - psy.register_plotter('test_plotter', module='test_plotter', - plotter_name='TestPlotter') - with psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) as ds: + psy.register_plotter( + "test_plotter", module="test_plotter", plotter_name="TestPlotter" + ) + with psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) as ds: time = ds.time.values lev = ds.lev.values - mp0 = psy.plot.test_plotter(bt.get_file('test-t2m-u-v.nc'), name='t2m', - lev=[0, 1]).main + mp0 = psy.plot.test_plotter( + bt.get_file("test-t2m-u-v.nc"), name="t2m", lev=[0, 1] + ).main mp1 = psy.project() - psy.plot.test_plotter(bt.get_file('test-t2m-u-v.nc'), name='t2m', - time=[1, 2]) + psy.plot.test_plotter( + bt.get_file("test-t2m-u-v.nc"), name="t2m", time=[1, 2] + ) mp2 = psy.project() - sp1 = psy.plot.test_plotter(bt.get_file('test-t2m-u-v.nc'), name='t2m', - time=[3, 4]) - sp2 = psy.plot.test_plotter(bt.get_file('test-t2m-u-v.nc'), name='t2m', - lev=[2, 3]) + sp1 = psy.plot.test_plotter( + bt.get_file("test-t2m-u-v.nc"), name="t2m", time=[3, 4] + ) + sp2 = psy.plot.test_plotter( + bt.get_file("test-t2m-u-v.nc"), name="t2m", lev=[2, 3] + ) # some checks in the beginning self.assertEqual(len(mp0), 2) self.assertEqual(len(mp1), 2) @@ -857,7 +1130,7 @@ def test_close_global(self): ds0.v.values # check that the data can be loaded ds1 = mp1[0].psy.base ds1.v.values # check that the data can be loaded - psy.close('all') + psy.close("all") self.assertEqual(mp0, []) self.assertEqual(mp1, []) self.assertEqual(psy.gcp(), []) @@ -866,8 +1139,9 @@ def test_close_global(self): def test_oncpchange_signal(self): """Test whether the correct signal is fired""" - psy.register_plotter('test_plotter', module='test_plotter', - plotter_name='TestPlotter') + psy.register_plotter( + "test_plotter", module="test_plotter", plotter_name="TestPlotter" + ) check_mains = [] projects = [] @@ -876,8 +1150,8 @@ def check(p): projects.append(p) psy.Project.oncpchange.connect(check) - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')).load() - sp = psy.plot.test_plotter(ds, name='t2m', lev=[0, 1]) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")).load() + sp = psy.plot.test_plotter(ds, name="t2m", lev=[0, 1]) # the signal should have been fired 2 times, one times from the # subproject, one times from the project self.assertEqual(len(check_mains), 2) @@ -891,16 +1165,18 @@ def check(p): projects = [] p = sp[1:] psy.scp(p) - self.assertEqual(check_mains, [False], - msg="projects: %s" % (projects, )) + self.assertEqual( + check_mains, [False], msg="projects: %s" % (projects,) + ) self.assertIs(projects[0], p) # test appending check_mains = [] projects = [] p.append(sp[0]) - self.assertEqual(check_mains, [False], - msg="projects: %s" % (projects, )) + self.assertEqual( + check_mains, [False], msg="projects: %s" % (projects,) + ) self.assertIs(projects[0], p) p.pop(1) @@ -915,8 +1191,9 @@ def check(p): check_mains = [] projects = [] psy.close() - self.assertEqual(len(check_mains), 2, - msg="%s, %s" % (check_mains, projects)) + self.assertEqual( + len(check_mains), 2, msg="%s, %s" % (check_mains, projects) + ) self.assertIn(False, check_mains) self.assertIn(True, check_mains) self.assertEqual(len(projects[0]), 0, msg=str(projects[0])) @@ -926,118 +1203,129 @@ def check(p): def test_share_01_on_creation(self): """Test the sharing within a project when creating it""" - psy.register_plotter('test_plotter', module='test_plotter', - plotter_name='TestPlotter') - sp = psy.plot.test_plotter(bt.get_file('test-t2m-u-v.nc'), name='t2m', - time=[0, 1, 2], share='something') + psy.register_plotter( + "test_plotter", module="test_plotter", plotter_name="TestPlotter" + ) + sp = psy.plot.test_plotter( + bt.get_file("test-t2m-u-v.nc"), + name="t2m", + time=[0, 1, 2], + share="something", + ) self.assertEqual(len(sp), 3, msg=sp) - self.assertEqual(sp.plotters[0].fmt3.shared, - {sp.plotters[1].fmt3, sp.plotters[2].fmt3}) - sp[0].psy.update(fmt3='test3') - self.assertEqual(sp.plotters[1].fmt3.value, 'test3') - self.assertEqual(sp.plotters[2].fmt3.value, 'test3') + self.assertEqual( + sp.plotters[0].fmt3.shared, + {sp.plotters[1].fmt3, sp.plotters[2].fmt3}, + ) + sp[0].psy.update(fmt3="test3") + self.assertEqual(sp.plotters[1].fmt3.value, "test3") + self.assertEqual(sp.plotters[2].fmt3.value, "test3") def test_share_02_method(self): """Test the :meth:`psyplot.project.Project.share` method""" - psy.register_plotter('test_plotter', module='test_plotter', - plotter_name='TestPlotter') - sp = psy.plot.test_plotter(bt.get_file('test-t2m-u-v.nc'), name='t2m', - time=[0, 1, 2]) + psy.register_plotter( + "test_plotter", module="test_plotter", plotter_name="TestPlotter" + ) + sp = psy.plot.test_plotter( + bt.get_file("test-t2m-u-v.nc"), name="t2m", time=[0, 1, 2] + ) # share within the project - sp.share(keys='something') + sp.share(keys="something") self.assertEqual(len(sp), 3, msg=sp) - self.assertEqual(sp.plotters[0].fmt3.shared, - {sp.plotters[1].fmt3, sp.plotters[2].fmt3}) - sp[0].psy.update(fmt3='test3') - self.assertEqual(sp.plotters[1].fmt3.value, 'test3') - self.assertEqual(sp.plotters[2].fmt3.value, 'test3') + self.assertEqual( + sp.plotters[0].fmt3.shared, + {sp.plotters[1].fmt3, sp.plotters[2].fmt3}, + ) + sp[0].psy.update(fmt3="test3") + self.assertEqual(sp.plotters[1].fmt3.value, "test3") + self.assertEqual(sp.plotters[2].fmt3.value, "test3") sp.unshare() self.assertFalse(sp.plotters[0].fmt3.shared) # share from outside the project - sp[::2].share(sp[1], keys='something') - self.assertEqual(sp.plotters[1].fmt3.shared, - {sp.plotters[0].fmt3, sp.plotters[2].fmt3}) - sp[1].psy.update(fmt3='test3') - self.assertEqual(sp.plotters[0].fmt3.value, 'test3') - self.assertEqual(sp.plotters[2].fmt3.value, 'test3') + sp[::2].share(sp[1], keys="something") + self.assertEqual( + sp.plotters[1].fmt3.shared, + {sp.plotters[0].fmt3, sp.plotters[2].fmt3}, + ) + sp[1].psy.update(fmt3="test3") + self.assertEqual(sp.plotters[0].fmt3.value, "test3") + self.assertEqual(sp.plotters[2].fmt3.value, "test3") sp.unshare() self.assertFalse(sp.plotters[1].fmt3.shared) def test_share_03_method_by(self): - """Test the :meth:`psyplot.project.Project.share` method by axes/figure - """ + """Test the :meth:`psyplot.project.Project.share` method by axes/figure""" import matplotlib.pyplot as plt - psy.register_plotter('test_plotter', module='test_plotter', - plotter_name='TestPlotter') + + psy.register_plotter( + "test_plotter", module="test_plotter", plotter_name="TestPlotter" + ) fig1, ax1 = plt.subplots() fig2, axes = plt.subplots(1, 2) ax2, ax3 = axes - sp = psy.plot.test_plotter(bt.get_file('test-t2m-u-v.nc'), name='t2m', - time=range(4), ax=[ax1, ax2, ax1, ax3]) + sp = psy.plot.test_plotter( + bt.get_file("test-t2m-u-v.nc"), + name="t2m", + time=range(4), + ax=[ax1, ax2, ax1, ax3], + ) self.assertEqual(len(sp), 4, msg=sp) # share by axes - sp.share(by='axes', keys='something') - self.assertEqual(sp.plotters[0].fmt3.shared, - {sp.plotters[2].fmt3}) + sp.share(by="axes", keys="something") + self.assertEqual(sp.plotters[0].fmt3.shared, {sp.plotters[2].fmt3}) self.assertFalse(sp.plotters[1].fmt3.shared) self.assertFalse(sp.plotters[3].fmt3.shared) - sp[0].psy.update(fmt3='test3') - self.assertEqual(sp.plotters[2].fmt3.value, 'test3') + sp[0].psy.update(fmt3="test3") + self.assertEqual(sp.plotters[2].fmt3.value, "test3") sp.unshare() self.assertFalse(sp.plotters[0].fmt3.shared) # share by figure - sp.share(by='fig', keys='something') - self.assertEqual(sp.plotters[0].fmt3.shared, - {sp.plotters[2].fmt3}) - self.assertEqual(sp.plotters[1].fmt3.shared, - {sp.plotters[3].fmt3}) - sp[0].psy.update(fmt3='test3') - sp[1].psy.update(fmt3='test4') - self.assertEqual(sp.plotters[2].fmt3.value, 'test3') - self.assertEqual(sp.plotters[3].fmt3.value, 'test4') + sp.share(by="fig", keys="something") + self.assertEqual(sp.plotters[0].fmt3.shared, {sp.plotters[2].fmt3}) + self.assertEqual(sp.plotters[1].fmt3.shared, {sp.plotters[3].fmt3}) + sp[0].psy.update(fmt3="test3") + sp[1].psy.update(fmt3="test4") + self.assertEqual(sp.plotters[2].fmt3.value, "test3") + self.assertEqual(sp.plotters[3].fmt3.value, "test4") sp.unshare() self.assertFalse(sp.plotters[0].fmt3.shared) self.assertFalse(sp.plotters[1].fmt3.shared) # share with provided bases by figure - sp[2:].share(sp[:2], keys='something', by='fig') + sp[2:].share(sp[:2], keys="something", by="fig") - self.assertEqual(sp.plotters[0].fmt3.shared, - {sp.plotters[2].fmt3}) - self.assertEqual(sp.plotters[1].fmt3.shared, - {sp.plotters[3].fmt3}) - sp[0].psy.update(fmt3='test3') - sp[1].psy.update(fmt3='test4') - self.assertEqual(sp.plotters[2].fmt3.value, 'test3') - self.assertEqual(sp.plotters[3].fmt3.value, 'test4') + self.assertEqual(sp.plotters[0].fmt3.shared, {sp.plotters[2].fmt3}) + self.assertEqual(sp.plotters[1].fmt3.shared, {sp.plotters[3].fmt3}) + sp[0].psy.update(fmt3="test3") + sp[1].psy.update(fmt3="test4") + self.assertEqual(sp.plotters[2].fmt3.value, "test3") + self.assertEqual(sp.plotters[3].fmt3.value, "test4") sp.unshare() self.assertFalse(sp.plotters[0].fmt3.shared) self.assertFalse(sp.plotters[1].fmt3.shared) # share with provided bases by axes - sp[2:].share(sp[:2], keys='something', by='axes') - self.assertEqual(sp.plotters[0].fmt3.shared, - {sp.plotters[2].fmt3}) + sp[2:].share(sp[:2], keys="something", by="axes") + self.assertEqual(sp.plotters[0].fmt3.shared, {sp.plotters[2].fmt3}) self.assertFalse(sp.plotters[1].fmt3.shared) self.assertFalse(sp.plotters[3].fmt3.shared) - sp[0].psy.update(fmt3='test3') - self.assertEqual(sp.plotters[2].fmt3.value, 'test3') + sp[0].psy.update(fmt3="test3") + self.assertEqual(sp.plotters[2].fmt3.value, "test3") sp.unshare() self.assertFalse(sp.plotters[0].fmt3.shared) def _register_export_plotter(self): class SimplePlotFormatoption(tp.TestFormatoption): - plot_fmt = True priority = psyp.BEFOREPLOTTING @@ -1048,39 +1336,50 @@ def make_plot(self): self.data.plot(ax=self.ax) class TestPlotter(psyp.Plotter): + fmt1 = SimplePlotFormatoption("fmt1") - fmt1 = SimplePlotFormatoption('fmt1') - - psy.register_plotter('test_plotter', module='something', - plotter_name='irrelevant', - plotter_cls=TestPlotter) + psy.register_plotter( + "test_plotter", + module="something", + plotter_name="irrelevant", + plotter_cls=TestPlotter, + ) def test_export_01_replacement(self): """Test exporting a project""" + from tempfile import NamedTemporaryFile + import matplotlib.pyplot as plt - from matplotlib.testing.compare import compare_images - import pandas as pd import numpy as np - from tempfile import NamedTemporaryFile + import pandas as pd + from matplotlib.testing.compare import compare_images self._register_export_plotter() - with psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) as ds: + with psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) as ds: time = ds.time time.values # make sure the data is loaded ds = xr.Dataset( - {"v0": xr.Variable(('x', 'y'), np.arange(3 * 5).reshape(3, 5)), - "v1": xr.Variable(('time', 'y'), np.arange(5 * 5).reshape(5, 5))}, - {"x": xr.Variable(('x', ), [4, 5, 6]), - "y": xr.Variable(('y', ), [6, 7, 8, 9, 10]), - 'time': time}) + { + "v0": xr.Variable(("x", "y"), np.arange(3 * 5).reshape(3, 5)), + "v1": xr.Variable( + ("time", "y"), np.arange(5 * 5).reshape(5, 5) + ), + }, + { + "x": xr.Variable(("x",), [4, 5, 6]), + "y": xr.Variable(("y",), [6, 7, 8, 9, 10]), + "time": time, + }, + ) # create reference plots reffiles = [] fig, ax = plt.subplots() ds.v0[1].plot(ax=ax) reffiles.append( - NamedTemporaryFile(prefix='psyplot_', suffix='.png').name) + NamedTemporaryFile(prefix="psyplot_", suffix=".png").name + ) self._created_files.update(reffiles) fig.savefig(reffiles[-1]) @@ -1089,32 +1388,43 @@ def test_export_01_replacement(self): ds.v0.plot(ax=axes[0]) ds.v0[1:].plot(ax=axes[1]) reffiles.append( - NamedTemporaryFile(prefix='psyplot_', suffix='.png').name) + NamedTemporaryFile(prefix="psyplot_", suffix=".png").name + ) self._created_files.update(reffiles) fig.savefig(reffiles[-1]) - plt.close('all') + plt.close("all") # create project - psy.plot.test_plotter(ds, name='v0', x=1, attrs={'test': 7}, - ax=plt.subplots()[1]) - psy.plot.test_plotter(ds, name='v0', x=[slice(None), slice(1, None)], - attrs={'test': 3}, ax=plt.subplots(1, 2)[1]) + psy.plot.test_plotter( + ds, name="v0", x=1, attrs={"test": 7}, ax=plt.subplots()[1] + ) + psy.plot.test_plotter( + ds, + name="v0", + x=[slice(None), slice(1, None)], + attrs={"test": 3}, + ax=plt.subplots(1, 2)[1], + ) mp = psy.gcp(True) self.assertEqual(len(mp), 3, msg=mp) - base_name = NamedTemporaryFile(prefix='psyplot_').name - mp.export(base_name + '%i_%(test)s.png') + base_name = NamedTemporaryFile(prefix="psyplot_").name + mp.export(base_name + "%i_%(test)s.png") # compare reference files and exported files - self.assertTrue(osp.exists(base_name + '1_7.png'), - msg="Missing " + base_name + '1_7.png') - self._created_files.add(base_name + '1_7.png') - self.assertTrue(osp.exists(base_name + '2_3.png'), - msg="Missing " + base_name + '2_3.png') - self._created_files.add(base_name + '2_3.png') - results = compare_images(reffiles[0], base_name + '1_7.png', 1) + self.assertTrue( + osp.exists(base_name + "1_7.png"), + msg="Missing " + base_name + "1_7.png", + ) + self._created_files.add(base_name + "1_7.png") + self.assertTrue( + osp.exists(base_name + "2_3.png"), + msg="Missing " + base_name + "2_3.png", + ) + self._created_files.add(base_name + "2_3.png") + results = compare_images(reffiles[0], base_name + "1_7.png", 1) self.assertIsNone(results, msg=results) - results = compare_images(reffiles[1], base_name + '2_3.png', 1) + results = compare_images(reffiles[1], base_name + "2_3.png", 1) self.assertIsNone(results, msg=results) # check time formatting @@ -1123,7 +1433,8 @@ def test_export_01_replacement(self): fig, ax = plt.subplots() ds.v1[1].plot(ax=ax) reffiles.append( - NamedTemporaryFile(prefix='psyplot_', suffix='.png').name) + NamedTemporaryFile(prefix="psyplot_", suffix=".png").name + ) self._created_files.update(reffiles) fig.savefig(reffiles[-1]) @@ -1131,58 +1442,77 @@ def test_export_01_replacement(self): ds.v1[2, :2].plot(ax=axes[0]) ds.v1[2, 2:].plot(ax=axes[1]) reffiles.append( - NamedTemporaryFile(prefix='psyplot_', suffix='.png').name) + NamedTemporaryFile(prefix="psyplot_", suffix=".png").name + ) self._created_files.update(reffiles) fig.savefig(reffiles[-1]) - plt.close('all') + plt.close("all") # create project - psy.plot.test_plotter(ds, name='v1', time=1, attrs={'test': 3}, - ax=plt.subplots()[1]) - psy.plot.test_plotter(ds, name='v1', time=2, attrs={'test': 5}, - y=[slice(0, 2), slice(2, None)], - ax=plt.subplots(1, 2)[1]) + psy.plot.test_plotter( + ds, name="v1", time=1, attrs={"test": 3}, ax=plt.subplots()[1] + ) + psy.plot.test_plotter( + ds, + name="v1", + time=2, + attrs={"test": 5}, + y=[slice(0, 2), slice(2, None)], + ax=plt.subplots(1, 2)[1], + ) mp = psy.gcp(True) self.assertEqual(len(mp), 3, msg=mp) - mp.export(base_name + '%%i_%m_%%(test)s.png', use_time=True) + mp.export(base_name + "%%i_%m_%%(test)s.png", use_time=True) # compare reference files and exported files - t1 = pd.to_datetime(time.values[1]).strftime('%m') - t2 = pd.to_datetime(time.values[2]).strftime('%m') - self.assertTrue(osp.exists(base_name + ('1_%s_3.png' % t1)), - msg="Missing " + base_name + ('1_%s_3.png' % t1)) - self._created_files.add(base_name + ('1_%s_3.png' % t1)) - self.assertTrue(osp.exists(base_name + ('2_%s_5.png' % t2)), - msg="Missing " + base_name + ('2_%s_5.png' % t2)) - self._created_files.add(base_name + ('2_%s_5.png' % t2)) - results = compare_images(reffiles[0], base_name + ('1_%s_3.png' % t1), - 1) + t1 = pd.to_datetime(time.values[1]).strftime("%m") + t2 = pd.to_datetime(time.values[2]).strftime("%m") + self.assertTrue( + osp.exists(base_name + ("1_%s_3.png" % t1)), + msg="Missing " + base_name + ("1_%s_3.png" % t1), + ) + self._created_files.add(base_name + ("1_%s_3.png" % t1)) + self.assertTrue( + osp.exists(base_name + ("2_%s_5.png" % t2)), + msg="Missing " + base_name + ("2_%s_5.png" % t2), + ) + self._created_files.add(base_name + ("2_%s_5.png" % t2)) + results = compare_images( + reffiles[0], base_name + ("1_%s_3.png" % t1), 1 + ) self.assertIsNone(results, msg=results) - results = compare_images(reffiles[1], base_name + ('2_%s_5.png' % t2), - 1) + results = compare_images( + reffiles[1], base_name + ("2_%s_5.png" % t2), 1 + ) self.assertIsNone(results, msg=results) # check pdf replacement psy.close(mp) - sp = psy.plot.test_plotter(ds, name='v1', time=1, attrs={'test': 3}, - ax=plt.subplots()[1]) - sp.export(base_name + '%m_%%(test)s.pdf', use_time=True) - self.assertTrue(osp.exists(base_name + ('%s_3.pdf' % t1)), - msg="Missing " + base_name + ('%s_3.pdf' % t1)) - self._created_files.add(base_name + ('%s_3.pdf' % t1)) + sp = psy.plot.test_plotter( + ds, name="v1", time=1, attrs={"test": 3}, ax=plt.subplots()[1] + ) + sp.export(base_name + "%m_%%(test)s.pdf", use_time=True) + self.assertTrue( + osp.exists(base_name + ("%s_3.pdf" % t1)), + msg="Missing " + base_name + ("%s_3.pdf" % t1), + ) + self._created_files.add(base_name + ("%s_3.pdf" % t1)) def test_export_02_list(self): """Test whether the exporting to a list works well""" import tempfile + self._register_export_plotter() - sp = psy.plot.test_plotter(bt.get_file('test-t2m-u-v.nc'), - name='t2m', time=[1, 2, 3], z=0) + sp = psy.plot.test_plotter( + bt.get_file("test-t2m-u-v.nc"), name="t2m", time=[1, 2, 3], z=0 + ) self.assertEqual(len(sp), 3, msg=sp) fnames = list( - tempfile.NamedTemporaryFile(suffix='.png', prefix='psyplot_').name - for _ in range(3)) + tempfile.NamedTemporaryFile(suffix=".png", prefix="psyplot_").name + for _ in range(3) + ) self._created_files.update(fnames) sp.export(fnames) @@ -1193,17 +1523,24 @@ def test_export_02_list(self): def test_export_03_append(self): """Append to a pdf file""" import tempfile + self._register_export_plotter() fig1, ax1 = plt.subplots(1, 2) fig2, ax2 = plt.subplots() axes = list(ax1) + [ax2] - sp = psy.plot.test_plotter(bt.get_file('test-t2m-u-v.nc'), - name='t2m', time=[1, 2, 3], z=0, y=0, - ax=axes) + sp = psy.plot.test_plotter( + bt.get_file("test-t2m-u-v.nc"), + name="t2m", + time=[1, 2, 3], + z=0, + y=0, + ax=axes, + ) self.assertEqual(len(sp), 3, msg=sp) fname = tempfile.NamedTemporaryFile( - suffix='.pdf', prefix='psyplot_').name + suffix=".pdf", prefix="psyplot_" + ).name self._created_files.add(fname) pdf = sp.export(fname, close_pdf=False) @@ -1220,13 +1557,16 @@ def test_update(self): """Test the update of an :class:`psyplot.data.ArrayList`""" variables, coords = self._from_dataset_test_variables ds = xr.Dataset(variables, coords) - psy.register_plotter('test_plotter', module='something', - plotter_name='unimportant', - plotter_cls=tp.TestPlotter) + psy.register_plotter( + "test_plotter", + module="something", + plotter_name="unimportant", + plotter_cls=tp.TestPlotter, + ) # add 2 arrays - psy.plot.test_plotter(ds, name=['v0', 'v1'], t=0) + psy.plot.test_plotter(ds, name=["v0", "v1"], t=0) # add a list - psy.plot.test_plotter(ds, name=['v0', 'v1'], t=0, prefer_list=True) + psy.plot.test_plotter(ds, name=["v0", "v1"], t=0, prefer_list=True) mp = psy.gcp(True) @@ -1234,12 +1574,14 @@ def test_update(self): self.assertEqual(len(mp.plotters), 3, msg=mp) # update the list - mp.update(t=1, fmt2='updated') + mp.update(t=1, fmt2="updated") for i, plotter in enumerate(mp.plotters): - self.assertEqual(plotter['fmt2'], 'updated', - msg='Plotter of array %i not updated! %s' % ( - i, mp[i])) + self.assertEqual( + plotter["fmt2"], + "updated", + msg="Plotter of array %i not updated! %s" % (i, mp[i]), + ) self.assertEqual(mp[0].time, ds.time[1]) self.assertEqual(mp[1].time, ds.time[1]) @@ -1248,28 +1590,30 @@ def test_update(self): class TestPlotterInterface(unittest.TestCase): - list_class = psy.Project def setUp(self): for identifier in list(psy.registered_plotters): psy.unregister_plotter(identifier) - psy.close('all') - plt.close('all') + psy.close("all") + plt.close("all") def tearDown(self): for identifier in list(psy.registered_plotters): psy.unregister_plotter(identifier) - psy.close('all') - plt.close('all') + psy.close("all") + plt.close("all") tp.results.clear() def test_plotter_registration(self): """Test the registration of a plotter""" - psy.register_plotter('test_plotter', - import_plotter=True, module='test_plotter', - plotter_name='TestPlotter') - self.assertTrue(hasattr(psy.plot, 'test_plotter')) + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) + self.assertTrue(hasattr(psy.plot, "test_plotter")) self.assertIs(psy.plot.test_plotter.plotter_cls, tp.TestPlotter) psy.plot.test_plotter.print_func = str self.assertEqual(psy.plot.test_plotter.fmt1(), tp.SimpleFmt.__doc__) @@ -1277,83 +1621,111 @@ def test_plotter_registration(self): # test the warning if not six.PY2: with self.assertWarnsRegex(UserWarning, "not_existent_module"): - psy.register_plotter('something', "not_existent_module", - 'not_important', import_plotter=True) - psy.unregister_plotter('test_plotter') - self.assertFalse(hasattr(psy.Project, 'test_plotter')) - self.assertFalse(hasattr(psy.plot, 'test_plotter')) + psy.register_plotter( + "something", + "not_existent_module", + "not_important", + import_plotter=True, + ) + psy.unregister_plotter("test_plotter") + self.assertFalse(hasattr(psy.Project, "test_plotter")) + self.assertFalse(hasattr(psy.plot, "test_plotter")) def test_plot_creation_01_array(self): """Test the plot creation with a plotter that takes one array""" - psy.register_plotter('test_plotter', - import_plotter=True, module='test_plotter', - plotter_name='TestPlotter') - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - sp = psy.plot.test_plotter(ds, name='t2m') + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + sp = psy.plot.test_plotter(ds, name="t2m") self.assertEqual(len(sp), 1) - self.assertEqual(sp[0].name, 't2m') + self.assertEqual(sp[0].name, "t2m") self.assertEqual(sp[0].shape, ds.t2m.shape) self.assertEqual(sp[0].values.tolist(), ds.t2m.values.tolist()) psy.close() - psy.unregister_plotter('test_plotter') + psy.unregister_plotter("test_plotter") def test_plot_creation_02_array_default_dims(self): # add a default value for the y dimension - psy.register_plotter('test_plotter', - import_plotter=True, module='test_plotter', - plotter_name='TestPlotter', - default_dims={'y': 0}) - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - sp = psy.plot.test_plotter(ds, name='t2m') + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + default_dims={"y": 0}, + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + sp = psy.plot.test_plotter(ds, name="t2m") self.assertEqual(len(sp), 1) - self.assertEqual(sp[0].name, 't2m') + self.assertEqual(sp[0].name, "t2m") self.assertEqual(sp[0].shape, ds.t2m.isel(lat=0).shape) - self.assertEqual(sp[0].values.tolist(), - ds.t2m.isel(lat=0).values.tolist()) + self.assertEqual( + sp[0].values.tolist(), ds.t2m.isel(lat=0).values.tolist() + ) psy.close() - psy.unregister_plotter('test_plotter') + psy.unregister_plotter("test_plotter") def test_plot_creation_03_2arrays(self): # try multiple names and dimension - psy.register_plotter('test_plotter', - import_plotter=True, module='test_plotter', - plotter_name='TestPlotter', - default_dims={'y': 0}) - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - sp = psy.plot.test_plotter(ds, name=['t2m', 'u'], x=slice(3, 5)) + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + default_dims={"y": 0}, + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + sp = psy.plot.test_plotter(ds, name=["t2m", "u"], x=slice(3, 5)) self.assertEqual(len(sp), 2) - self.assertEqual(sp[0].name, 't2m') - self.assertEqual(sp[1].name, 'u') - self.assertEqual(sp[0].shape, - ds.t2m.isel(lat=0, lon=slice(3, 5)).shape) - self.assertEqual(sp[1].shape, - ds.u.isel(lat=0, lon=slice(3, 5)).shape) - self.assertEqual(sp[0].values.tolist(), - ds.t2m.isel(lat=0, lon=slice(3, 5)).values.tolist()) - self.assertEqual(sp[1].values.tolist(), - ds.u.isel(lat=0, lon=slice(3, 5)).values.tolist()) + self.assertEqual(sp[0].name, "t2m") + self.assertEqual(sp[1].name, "u") + self.assertEqual( + sp[0].shape, ds.t2m.isel(lat=0, lon=slice(3, 5)).shape + ) + self.assertEqual(sp[1].shape, ds.u.isel(lat=0, lon=slice(3, 5)).shape) + self.assertEqual( + sp[0].values.tolist(), + ds.t2m.isel(lat=0, lon=slice(3, 5)).values.tolist(), + ) + self.assertEqual( + sp[1].values.tolist(), + ds.u.isel(lat=0, lon=slice(3, 5)).values.tolist(), + ) psy.close() - psy.unregister_plotter('test_plotter') + psy.unregister_plotter("test_plotter") def test_plot_creation_04_2variables(self): # test with array out of 2 variables - psy.register_plotter('test_plotter', - import_plotter=True, module='test_plotter', - plotter_name='TestPlotter', - default_dims={'y': 0}) - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - sp = psy.plot.test_plotter(ds, name=[['u', 'v']], x=slice(3, 5), - load=True) + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + default_dims={"y": 0}, + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + sp = psy.plot.test_plotter( + ds, name=[["u", "v"]], x=slice(3, 5), load=True + ) self.assertEqual(len(sp), 1) - self.assertIn('variable', sp[0].dims) - self.assertEqual(sp[0].coords['variable'].values.tolist(), ['u', 'v']) - self.assertEqual(list(sp[0].shape), - [2] + list(ds.t2m.isel(lat=0, lon=slice(3, 5)).shape)) - self.assertEqual(sp[0].values.tolist(), - ds[['u', 'v']].to_array().isel( - lat=0, lon=slice(3, 5)).values.tolist()) + self.assertIn("variable", sp[0].dims) + self.assertEqual(sp[0].coords["variable"].values.tolist(), ["u", "v"]) + self.assertEqual( + list(sp[0].shape), + [2] + list(ds.t2m.isel(lat=0, lon=slice(3, 5)).shape), + ) + self.assertEqual( + sp[0].values.tolist(), + ds[["u", "v"]] + .to_array() + .isel(lat=0, lon=slice(3, 5)) + .values.tolist(), + ) psy.close() - psy.unregister_plotter('test_plotter') + psy.unregister_plotter("test_plotter") def test_plot_creation_05_array_and_2variables(self): # test a combination of them @@ -1366,55 +1738,68 @@ def test_plot_creation_05_array_and_2variables(self): # (variable, time, lev)=(2, 5, 4), lat=88.5721685, lon=1.875, # arr3: 3-dim DataArray of u, v, with # (variable, time, lev)=(2, 5, 4), lat=88.5721685, lon=3.75]) - psy.register_plotter('test_plotter', - import_plotter=True, module='test_plotter', - plotter_name='TestPlotter', - default_dims={'y': 0}) - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - sp = psy.plot.test_plotter(ds, name=['t2m', ['u', 'v']], x=[1, 2]) + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + default_dims={"y": 0}, + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + sp = psy.plot.test_plotter(ds, name=["t2m", ["u", "v"]], x=[1, 2]) self.assertEqual(len(sp), 4, msg=str(sp)) self.assertEqual(sp[0].shape, ds.t2m.isel(lat=0, lon=1).shape) self.assertEqual(sp[1].shape, ds.t2m.isel(lat=0, lon=2).shape) - self.assertEqual(list(sp[2].shape), - [2] + list(ds.u.isel(lat=0, lon=1).shape)) - self.assertEqual(list(sp[2].shape), - [2] + list(ds.u.isel(lat=0, lon=2).shape)) - self.assertEqual(sp[0].values.tolist(), - ds.t2m.isel(lat=0, lon=1).values.tolist()) - self.assertEqual(sp[1].values.tolist(), - ds.t2m.isel(lat=0, lon=2).values.tolist()) - self.assertEqual(sp[2].values.tolist(), - ds[['u', 'v']].isel( - lat=0, lon=1).to_array().values.tolist()) - self.assertEqual(sp[3].values.tolist(), - ds[['u', 'v']].isel( - lat=0, lon=2).to_array().values.tolist()) + self.assertEqual( + list(sp[2].shape), [2] + list(ds.u.isel(lat=0, lon=1).shape) + ) + self.assertEqual( + list(sp[2].shape), [2] + list(ds.u.isel(lat=0, lon=2).shape) + ) + self.assertEqual( + sp[0].values.tolist(), ds.t2m.isel(lat=0, lon=1).values.tolist() + ) + self.assertEqual( + sp[1].values.tolist(), ds.t2m.isel(lat=0, lon=2).values.tolist() + ) + self.assertEqual( + sp[2].values.tolist(), + ds[["u", "v"]].isel(lat=0, lon=1).to_array().values.tolist(), + ) + self.assertEqual( + sp[3].values.tolist(), + ds[["u", "v"]].isel(lat=0, lon=2).to_array().values.tolist(), + ) psy.close() - psy.unregister_plotter('test_plotter') + psy.unregister_plotter("test_plotter") def test_plot_creation_06_list(self): """Test the plot creation with a plotter that takes a list of arrays""" - psy.register_plotter('test_plotter', - import_plotter=True, module='test_plotter', - plotter_name='TestPlotter', prefer_list=True) - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + prefer_list=True, + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) # test the creation of one list # psyplot.project.Project([arr4: psyplot.data.InteractiveList([ # arr0: 4-dim DataArray of t2m, with # (time, lev, lat, lon)=(5, 4, 96, 192), , # arr1: 4-dim DataArray of u, with # (time, lev, lat, lon)=(5, 4, 96, 192), ])]) - sp = psy.plot.test_plotter(ds, name=['t2m', 'u']) + sp = psy.plot.test_plotter(ds, name=["t2m", "u"]) self.assertEqual(len(sp), 1) self.assertEqual(len(sp[0]), 2) - self.assertEqual(sp[0][0].name, 't2m') - self.assertEqual(sp[0][1].name, 'u') + self.assertEqual(sp[0][0].name, "t2m") + self.assertEqual(sp[0][1].name, "u") self.assertEqual(sp[0][0].shape, ds.t2m.shape) self.assertEqual(sp[0][1].shape, ds.u.shape) self.assertEqual(sp[0][0].values.tolist(), ds.t2m.values.tolist()) self.assertEqual(sp[0][1].values.tolist(), ds.u.values.tolist()) psy.close() - psy.unregister_plotter('test_plotter') + psy.unregister_plotter("test_plotter") def test_plot_creation_07_list_and_dims(self): # use dimensions which should result in one list with 4 arrays, @@ -1428,31 +1813,39 @@ def test_plot_creation_07_list_and_dims(self): # (time, lev, lat)=(5, 4, 96), lon=1.875, # arr3: 3-dim DataArray of u, with # (time, lev, lat)=(5, 4, 96), lon=3.75])]) - psy.register_plotter('test_plotter', - import_plotter=True, module='test_plotter', - plotter_name='TestPlotter', prefer_list=True) - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - sp = psy.plot.test_plotter(ds, name=['t2m', 'u'], x=[1, 2]) + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + prefer_list=True, + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + sp = psy.plot.test_plotter(ds, name=["t2m", "u"], x=[1, 2]) self.assertEqual(len(sp), 1) self.assertEqual(len(sp[0]), 4) - self.assertEqual(sp[0][0].name, 't2m') - self.assertEqual(sp[0][1].name, 't2m') - self.assertEqual(sp[0][2].name, 'u') - self.assertEqual(sp[0][3].name, 'u') + self.assertEqual(sp[0][0].name, "t2m") + self.assertEqual(sp[0][1].name, "t2m") + self.assertEqual(sp[0][2].name, "u") + self.assertEqual(sp[0][3].name, "u") self.assertEqual(sp[0][0].shape, ds.t2m.isel(lon=1).shape) self.assertEqual(sp[0][1].shape, ds.t2m.isel(lon=2).shape) self.assertEqual(sp[0][2].shape, ds.u.isel(lon=1).shape) self.assertEqual(sp[0][3].shape, ds.u.isel(lon=2).shape) - self.assertEqual(sp[0][0].values.tolist(), - ds.t2m.isel(lon=1).values.tolist()) - self.assertEqual(sp[0][1].values.tolist(), - ds.t2m.isel(lon=2).values.tolist()) - self.assertEqual(sp[0][2].values.tolist(), - ds.u.isel(lon=1).values.tolist()) - self.assertEqual(sp[0][3].values.tolist(), - ds.u.isel(lon=2).values.tolist()) + self.assertEqual( + sp[0][0].values.tolist(), ds.t2m.isel(lon=1).values.tolist() + ) + self.assertEqual( + sp[0][1].values.tolist(), ds.t2m.isel(lon=2).values.tolist() + ) + self.assertEqual( + sp[0][2].values.tolist(), ds.u.isel(lon=1).values.tolist() + ) + self.assertEqual( + sp[0][3].values.tolist(), ds.u.isel(lon=2).values.tolist() + ) psy.close() - psy.unregister_plotter('test_plotter') + psy.unregister_plotter("test_plotter") def test_plot_creation_08_list_and_2variables(self): # test with arrays out of 2 variables. Should result in a list of @@ -1462,27 +1855,35 @@ def test_plot_creation_08_list_and_2variables(self): # (variable, time, lev, lat)=(2, 5, 4, 96), lon=1.875, # arr1: 4-dim DataArray of t2m, u, with # (variable, time, lev, lat)=(2, 5, 4, 96), lon=3.75])]) - psy.register_plotter('test_plotter', - import_plotter=True, module='test_plotter', - plotter_name='TestPlotter', prefer_list=True) - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - sp = psy.plot.test_plotter(ds, name=[[['t2m', 'u']]], x=[1, 2]) + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + prefer_list=True, + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + sp = psy.plot.test_plotter(ds, name=[[["t2m", "u"]]], x=[1, 2]) self.assertEqual(len(sp), 1) self.assertEqual(len(sp[0]), 2) - self.assertIn('variable', sp[0][0].dims) - self.assertIn('variable', sp[0][1].dims) - self.assertEqual(list(sp[0][0].shape), - [2] + list(ds.t2m.isel(lon=1).shape)) - self.assertEqual(list(sp[0][1].shape), - [2] + list(ds.u.isel(lon=1).shape)) + self.assertIn("variable", sp[0][0].dims) + self.assertIn("variable", sp[0][1].dims) + self.assertEqual( + list(sp[0][0].shape), [2] + list(ds.t2m.isel(lon=1).shape) + ) + self.assertEqual( + list(sp[0][1].shape), [2] + list(ds.u.isel(lon=1).shape) + ) self.assertEqual( sp[0][0].values.tolist(), - ds[['t2m', 'u']].to_array().isel(lon=1).values.tolist()) + ds[["t2m", "u"]].to_array().isel(lon=1).values.tolist(), + ) self.assertEqual( sp[0][1].values.tolist(), - ds[['t2m', 'u']].to_array().isel(lon=2).values.tolist()) + ds[["t2m", "u"]].to_array().isel(lon=2).values.tolist(), + ) psy.close() - psy.unregister_plotter('test_plotter') + psy.unregister_plotter("test_plotter") def test_plot_creation_09_list_of_list_of_arrays(self): # test list of list of arrays @@ -1497,34 +1898,43 @@ def test_plot_creation_09_list_of_list_of_arrays(self): # (time, lev, lat)=(5, 4, 96), lon=3.75, # arr1: 3-dim DataArray of u, with # (time, lev, lat)=(5, 4, 96), lon=3.75])]) - psy.register_plotter('test_plotter', - import_plotter=True, module='test_plotter', - plotter_name='TestPlotter', prefer_list=True) - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - sp = psy.plot.test_plotter(bt.get_file('test-t2m-u-v.nc'), - name=[['t2m', 'u']], x=[1, 2]) + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + prefer_list=True, + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + sp = psy.plot.test_plotter( + bt.get_file("test-t2m-u-v.nc"), name=[["t2m", "u"]], x=[1, 2] + ) self.assertEqual(len(sp), 2) self.assertEqual(len(sp[0]), 2) self.assertEqual(len(sp[1]), 2) - self.assertEqual(sp[0][0].name, 't2m') - self.assertEqual(sp[0][1].name, 'u') - self.assertEqual(sp[1][0].name, 't2m') - self.assertEqual(sp[1][1].name, 'u') + self.assertEqual(sp[0][0].name, "t2m") + self.assertEqual(sp[0][1].name, "u") + self.assertEqual(sp[1][0].name, "t2m") + self.assertEqual(sp[1][1].name, "u") self.assertEqual(sp[0][0].shape, ds.t2m.isel(lon=1).shape) self.assertEqual(sp[0][1].shape, ds.u.isel(lon=1).shape) self.assertEqual(sp[1][0].shape, ds.t2m.isel(lon=2).shape) self.assertEqual(sp[1][1].shape, ds.u.isel(lon=2).shape) - self.assertEqual(sp[0][0].values.tolist(), - ds.t2m.isel(lon=1).values.tolist()) - self.assertEqual(sp[0][1].values.tolist(), - ds.u.isel(lon=1).values.tolist()) - self.assertEqual(sp[1][0].values.tolist(), - ds.t2m.isel(lon=2).values.tolist()) - self.assertEqual(sp[1][1].values.tolist(), - ds.u.isel(lon=2).values.tolist()) + self.assertEqual( + sp[0][0].values.tolist(), ds.t2m.isel(lon=1).values.tolist() + ) + self.assertEqual( + sp[0][1].values.tolist(), ds.u.isel(lon=1).values.tolist() + ) + self.assertEqual( + sp[1][0].values.tolist(), ds.t2m.isel(lon=2).values.tolist() + ) + self.assertEqual( + sp[1][1].values.tolist(), ds.u.isel(lon=2).values.tolist() + ) psy.close() ds.close() - psy.unregister_plotter('test_plotter') + psy.unregister_plotter("test_plotter") def test_plot_creation_10_list_array_and_2variables(self): # test list of list with array and an array out of 2 variables @@ -1539,171 +1949,200 @@ def test_plot_creation_10_list_array_and_2variables(self): # (time, lev, lat)=(5, 4, 96), lon=1.875, # arr1: 4-dim DataArray of u, v, with # (variable, time, lev, lat)=(2, 5, 4, 96), lon=1.875])]) - psy.register_plotter('test_plotter', - import_plotter=True, module='test_plotter', - plotter_name='TestPlotter', prefer_list=True) - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - sp = psy.plot.test_plotter(ds, name=[['t2m', ['u', 'v']]], x=[1, 2]) + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + prefer_list=True, + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + sp = psy.plot.test_plotter(ds, name=[["t2m", ["u", "v"]]], x=[1, 2]) self.assertEqual(len(sp), 2) self.assertEqual(len(sp[0]), 2) self.assertEqual(len(sp[1]), 2) - self.assertEqual(sp[0][0].name, 't2m') - self.assertIn('variable', sp[0][1].dims) - self.assertEqual(sp[0][1].coords['variable'].values.tolist(), - ['u', 'v']) - self.assertEqual(sp[1][0].name, 't2m') - self.assertIn('variable', sp[1][1].dims) - self.assertEqual(sp[1][1].coords['variable'].values.tolist(), - ['u', 'v']) + self.assertEqual(sp[0][0].name, "t2m") + self.assertIn("variable", sp[0][1].dims) + self.assertEqual( + sp[0][1].coords["variable"].values.tolist(), ["u", "v"] + ) + self.assertEqual(sp[1][0].name, "t2m") + self.assertIn("variable", sp[1][1].dims) + self.assertEqual( + sp[1][1].coords["variable"].values.tolist(), ["u", "v"] + ) self.assertEqual(sp[0][0].shape, ds.t2m.isel(lon=1).shape) - self.assertEqual(list(sp[0][1].shape), - [2] + list(ds.u.isel(lon=1).shape)) + self.assertEqual( + list(sp[0][1].shape), [2] + list(ds.u.isel(lon=1).shape) + ) self.assertEqual(sp[1][0].shape, ds.t2m.isel(lon=2).shape) - self.assertEqual(list(sp[1][1].shape), - [2] + list(ds.u.isel(lon=2).shape)) - self.assertEqual(sp[0][0].values.tolist(), - ds.t2m.isel(lon=1).values.tolist()) - self.assertEqual(sp[0][1].values.tolist(), - ds[['u', 'v']].isel(lon=1).to_array().values.tolist()) - self.assertEqual(sp[1][0].values.tolist(), - ds.t2m.isel(lon=2).values.tolist()) - self.assertEqual(sp[1][1].values.tolist(), - ds[['u', 'v']].isel(lon=2).to_array().values.tolist()) + self.assertEqual( + list(sp[1][1].shape), [2] + list(ds.u.isel(lon=2).shape) + ) + self.assertEqual( + sp[0][0].values.tolist(), ds.t2m.isel(lon=1).values.tolist() + ) + self.assertEqual( + sp[0][1].values.tolist(), + ds[["u", "v"]].isel(lon=1).to_array().values.tolist(), + ) + self.assertEqual( + sp[1][0].values.tolist(), ds.t2m.isel(lon=2).values.tolist() + ) + self.assertEqual( + sp[1][1].values.tolist(), + ds[["u", "v"]].isel(lon=2).to_array().values.tolist(), + ) psy.close() - psy.unregister_plotter('test_plotter') + psy.unregister_plotter("test_plotter") def test_plot_creation_11_post_fmt(self): """Test the :attr:`psyplot.plotter.Plotter.post` formatoption""" - psy.register_plotter('test_plotter', - import_plotter=True, module='test_plotter', - plotter_name='TestPlotter') - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) # test whether it is plotted automatically - sp = psy.plot.test_plotter(ds, name='t2m', - post='self.ax.set_title("test")') - self.assertEqual(sp.plotters[0].ax.get_title(), 'test') + sp = psy.plot.test_plotter( + ds, name="t2m", post='self.ax.set_title("test")' + ) + self.assertEqual(sp.plotters[0].ax.get_title(), "test") # test whether the disabling works - sp = psy.plot.test_plotter(ds, name='t2m', enable_post=False, - post='self.ax.set_title("test")') - self.assertEqual(sp.plotters[0].ax.get_title(), '') + sp = psy.plot.test_plotter( + ds, name="t2m", enable_post=False, post='self.ax.set_title("test")' + ) + self.assertEqual(sp.plotters[0].ax.get_title(), "") def test_check_data(self): - """Test the :meth:`psyplot.project._PlotterInterface.check_data` method - """ + """Test the :meth:`psyplot.project._PlotterInterface.check_data` method""" class TestPlotter(psyp.Plotter): - @classmethod def check_data(cls, name, dims, is_unstructured): checks, messages = super(TestPlotter, cls).check_data( - name, dims, is_unstructured) - self.assertEqual(name, ['t2m']) + name, dims, is_unstructured + ) + self.assertEqual(name, ["t2m"]) for n, d in zip(name, dims): if test_x: # changed x-coordinate - removed = {'lev', 'time'} + removed = {"lev", "time"} else: - removed = {'lev', 'lon'} - self.assertEqual(len(d), - len(set(ds.t2m.dims) - removed)) + removed = {"lev", "lon"} + self.assertEqual(len(d), len(set(ds.t2m.dims) - removed)) self.assertEqual(set(d), set(ds.t2m.dims) - removed) test_x = False - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) - psy.register_plotter('test_plotter', module='nothing', - plotter_name='dont_care', plotter_cls=TestPlotter, - default_dims={'x': 1}, default_slice=slice(1, 3)) + psy.register_plotter( + "test_plotter", + module="nothing", + plotter_name="dont_care", + plotter_cls=TestPlotter, + default_dims={"x": 1}, + default_slice=slice(1, 3), + ) - psy.plot.test_plotter.check_data(ds, 't2m', {'lev': 3}) + psy.plot.test_plotter.check_data(ds, "t2m", {"lev": 3}) test_x = True - psy.plot.test_plotter.check_data(ds, 't2m', {'lev': 3}, - {'x': {'time'}}) + psy.plot.test_plotter.check_data( + ds, "t2m", {"lev": 3}, {"x": {"time"}} + ) - psy.unregister_plotter('test_plotter') + psy.unregister_plotter("test_plotter") class TestDatasetPlotter(unittest.TestCase): - """Test the Dataset accessor and :class:`psyplot.project.DatasetPlotter` - """ + """Test the Dataset accessor and :class:`psyplot.project.DatasetPlotter`""" def setUp(self): for identifier in list(psy.registered_plotters): psy.unregister_plotter(identifier) - psy.close('all') - plt.close('all') + psy.close("all") + plt.close("all") def tearDown(self): for identifier in list(psy.registered_plotters): psy.unregister_plotter(identifier) - psy.close('all') - plt.close('all') + psy.close("all") + plt.close("all") tp.results.clear() def test_plotting(self): - psy.register_plotter('test_plotter', - import_plotter=True, module='test_plotter', - plotter_name='TestPlotter') - - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - sp = ds.psy.plot.test_plotter(name='t2m') + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) + + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + sp = ds.psy.plot.test_plotter(name="t2m") self.assertEqual(len(sp), 1) - self.assertEqual(sp[0].name, 't2m') + self.assertEqual(sp[0].name, "t2m") self.assertEqual(sp[0].shape, ds.t2m.shape) self.assertEqual(sp[0].values.tolist(), ds.t2m.values.tolist()) self.assertIs(sp, psy.gcp()) class TestDataArrayPlotter(unittest.TestCase): - """Test DataArray accessor and :class:`psyplot.project.DataArrayPlotter` - """ + """Test DataArray accessor and :class:`psyplot.project.DataArrayPlotter`""" def setUp(self): for identifier in list(psy.registered_plotters): psy.unregister_plotter(identifier) - plt.close('all') + plt.close("all") def tearDown(self): for identifier in list(psy.registered_plotters): psy.unregister_plotter(identifier) - plt.close('all') + plt.close("all") tp.results.clear() def test_plotting(self): - psy.register_plotter('test_plotter', - import_plotter=True, module='test_plotter', - plotter_name='TestPlotter') + psy.register_plotter( + "test_plotter", + import_plotter=True, + module="test_plotter", + plotter_name="TestPlotter", + ) - ds = psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) - plotter = ds.t2m.psy.plot.test_plotter(fmt1='fmt1 set') - self.assertTrue(plotter.fmt1.value, 'fmt1 set') + ds = psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) + plotter = ds.t2m.psy.plot.test_plotter(fmt1="fmt1 set") + self.assertTrue(plotter.fmt1.value, "fmt1 set") @unittest.skipIf(not with_cdo, "Cdo not installed") class TestCdo(unittest.TestCase): - def setUp(self): - psy.close('all') - plt.close('all') + psy.close("all") + plt.close("all") def tearDown(self): for identifier in list(psy.registered_plotters): psy.unregister_plotter(identifier) - psy.close('all') - plt.close('all') + psy.close("all") + plt.close("all") tp.results.clear() def test_cdo(self): cdo = psy.Cdo() - sp = cdo.timmean(input=bt.get_file('test-t2m-u-v.nc'), - name='t2m', dims=dict(z=[1, 2])) - with psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) as ds: + sp = cdo.timmean( + input=bt.get_file("test-t2m-u-v.nc"), + name="t2m", + dims=dict(z=[1, 2]), + ) + with psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) as ds: lev = ds.lev.values self.assertEqual(len(sp), 2, msg=str(sp)) - self.assertEqual(sp[0].name, 't2m') - self.assertEqual(sp[1].name, 't2m') + self.assertEqual(sp[0].name, "t2m") + self.assertEqual(sp[1].name, "t2m") self.assertEqual(sp[0].lev.values, lev[1]) self.assertEqual(sp[1].lev.values, lev[2]) self.assertIsNone(sp[0].psy.plotter) @@ -1711,16 +2150,20 @@ def test_cdo(self): def test_cdo_plotter(self): cdo = psy.Cdo() - psy.register_plotter('test_plotter', module='test_plotter', - plotter_name='TestPlotter') - sp = cdo.timmean(input=bt.get_file('test-t2m-u-v.nc'), - name='t2m', dims=dict(z=[1, 2]), - plot_method='test_plotter') - with psy.open_dataset(bt.get_file('test-t2m-u-v.nc')) as ds: + psy.register_plotter( + "test_plotter", module="test_plotter", plotter_name="TestPlotter" + ) + sp = cdo.timmean( + input=bt.get_file("test-t2m-u-v.nc"), + name="t2m", + dims=dict(z=[1, 2]), + plot_method="test_plotter", + ) + with psy.open_dataset(bt.get_file("test-t2m-u-v.nc")) as ds: lev = ds.lev.values self.assertEqual(len(sp), 2, msg=str(sp)) - self.assertEqual(sp[0].name, 't2m') - self.assertEqual(sp[1].name, 't2m') + self.assertEqual(sp[0].name, "t2m") + self.assertEqual(sp[1].name, "t2m") self.assertEqual(sp[0].lev.values, lev[1]) self.assertEqual(sp[1].lev.values, lev[2]) self.assertIsInstance(sp[0].psy.plotter, tp.TestPlotter) @@ -1728,18 +2171,17 @@ def test_cdo_plotter(self): class TestMultipleSubplots(unittest.TestCase): - def test_one_subplot(self): - plt.close('all') + plt.close("all") axes = psy.multiple_subplots() self.assertEqual(len(axes), 1) self.assertEqual(plt.get_fignums(), [1]) self.assertEqual(len(plt.gcf().axes), 1) self.assertIs(axes[0], plt.gcf().axes[0]) - plt.close('all') + plt.close("all") def test_multiple_subplots(self): - plt.close('all') + plt.close("all") axes = psy.multiple_subplots(2, 2, 3, 5) self.assertEqual(len(axes), 5) self.assertEqual(plt.get_fignums(), [1, 2]) @@ -1748,8 +2190,8 @@ def test_multiple_subplots(self): it_ax = iter(axes) for ax2 in chain(plt.figure(1).axes, plt.figure(2).axes): self.assertIs(next(it_ax), ax2) - plt.close('all') + plt.close("all") -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_rcsetup.py b/tests/test_rcsetup.py index 49d175a..018e570 100644 --- a/tests/test_rcsetup.py +++ b/tests/test_rcsetup.py @@ -1,93 +1,88 @@ """Test module of the :mod:`psyplot.config.rcsetup` module.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only -import _base_testing as bt -import os.path as osp import unittest + import six + import psyplot -from psyplot.config.rcsetup import SubDict, RcParams, rcParams +from psyplot.config.rcsetup import RcParams, SubDict, rcParams class SubDictTest(unittest.TestCase): - def test_basic(self): - """Test the basic functionality - """ - d = {'test.1': 'test1', 'test.2': 'test2', - 'test1.1': 'test11', 'test1.2': 'test12'} - sub = SubDict(d, 'test.', pattern_base=r'test\.') - self.assertIn('1', sub) - self.assertIn('2', sub) - self.assertEqual(sub['1'], 'test1') - self.assertEqual(sub['2'], 'test2') - self.assertNotIn('test11', sub.values(), - msg='Item test1.1 catched in %s' % (sub, )) - self.assertNotIn('test12', sub.values(), - msg='Item test1.2 catched in %s' % (sub, )) + """Test the basic functionality""" + d = { + "test.1": "test1", + "test.2": "test2", + "test1.1": "test11", + "test1.2": "test12", + } + sub = SubDict(d, "test.", pattern_base=r"test\.") + self.assertIn("1", sub) + self.assertIn("2", sub) + self.assertEqual(sub["1"], "test1") + self.assertEqual(sub["2"], "test2") + self.assertNotIn( + "test11", sub.values(), msg="Item test1.1 catched in %s" % (sub,) + ) + self.assertNotIn( + "test12", sub.values(), msg="Item test1.2 catched in %s" % (sub,) + ) def test_replace(self): - """Test the replace property - """ - d = {'test.1': 'test1', 'test.2': 'test2', - 'test1.1': 'test11', 'test1.2': 'test12'} - sub = SubDict(d, 'test.', pattern_base=r'test\.') - sub['test'] = 5 # test something that is not traced back to d - self.assertNotIn('test.1', sub) - self.assertIn('1', sub) + """Test the replace property""" + d = { + "test.1": "test1", + "test.2": "test2", + "test1.1": "test11", + "test1.2": "test12", + } + sub = SubDict(d, "test.", pattern_base=r"test\.") + sub["test"] = 5 # test something that is not traced back to d + self.assertNotIn("test.1", sub) + self.assertIn("1", sub) sub.replace = False sub.trace = True - sub['test.2'] = 4 - self.assertIn('test.1', sub) - self.assertNotIn('1', sub) - self.assertEqual(sub['test.2'], 4) - self.assertEqual(d['test.2'], 4) + sub["test.2"] = 4 + self.assertIn("test.1", sub) + self.assertNotIn("1", sub) + self.assertEqual(sub["test.2"], 4) + self.assertEqual(d["test.2"], 4) sub.replace = True - self.assertNotIn('test.1', sub) - self.assertIn('1', sub) + self.assertNotIn("test.1", sub) + self.assertIn("1", sub) def test_trace(self): """Test the backtracing to the origin dictionary""" - d = {'test.1': 'test1', 'test.2': 'test2', - 'test1.1': 'test11', 'test1.2': 'test12'} - sub = SubDict(d, 'test.', pattern_base=r'test\.', trace=True) - self.assertIn('1', sub) - sub['1'] = 'change in d' - sub['test.3'] = 'test3' # new item - self.assertEqual(d['test.1'], 'change in d') - self.assertEqual(sub['1'], 'change in d') - self.assertIn('3', sub) - self.assertIn('test.3', d) + d = { + "test.1": "test1", + "test.2": "test2", + "test1.1": "test11", + "test1.2": "test12", + } + sub = SubDict(d, "test.", pattern_base=r"test\.", trace=True) + self.assertIn("1", sub) + sub["1"] = "change in d" + sub["test.3"] = "test3" # new item + self.assertEqual(d["test.1"], "change in d") + self.assertEqual(sub["1"], "change in d") + self.assertIn("3", sub) + self.assertIn("test.3", d) sub.trace = False - sub['1'] = 'do not change in d' - sub['4'] = 'test4' - self.assertEqual(d['test.1'], 'change in d') - self.assertEqual(sub['1'], 'do not change in d') - self.assertIn('4', sub) - self.assertNotIn('4', d) + sub["1"] = "do not change in d" + sub["4"] = "test4" + self.assertEqual(d["test.1"], "change in d") + self.assertEqual(sub["1"], "do not change in d") + self.assertIn("4", sub) + self.assertNotIn("4", d) class RcParamsTest(unittest.TestCase): @@ -96,62 +91,90 @@ class RcParamsTest(unittest.TestCase): @unittest.skipIf(six.PY2, "Missing necessary unittest methods") def test_dump(self): """Test the dumping of the rcParams""" - rc = RcParams(defaultParams={ - 'some.test': [1, lambda i: int(i), 'The documentation'], - 'some.other_test': [2, lambda i: int(i), 'Another documentation']}) + rc = RcParams( + defaultParams={ + "some.test": [1, lambda i: int(i), "The documentation"], + "some.other_test": [ + 2, + lambda i: int(i), + "Another documentation", + ], + } + ) rc.update_from_defaultParams() - rc.HEADER = 'the header' + rc.HEADER = "the header" s = rc.dump(default_flow_style=False) - self.assertIn('the header', s) - self.assertRegex(s, r'# The documentation\n\s*some.test') - self.assertRegex(s, r'# Another documentation\n\s*some.other_test') + self.assertIn("the header", s) + self.assertRegex(s, r"# The documentation\n\s*some.test") + self.assertRegex(s, r"# Another documentation\n\s*some.other_test") def test_catch(self): - rc = RcParams(defaultParams={ - 'some.test': [1, lambda i: int(i), 'The documentation'], - 'some.other_test': [2, lambda i: int(i), 'Another documentation']}) + rc = RcParams( + defaultParams={ + "some.test": [1, lambda i: int(i), "The documentation"], + "some.other_test": [ + 2, + lambda i: int(i), + "Another documentation", + ], + } + ) rc.update_from_defaultParams() with rc.catch(): - rc['some.test'] = 2 - self.assertEqual(rc['some.test'], 2) - self.assertEqual(rc['some.test'], 1) + rc["some.test"] = 2 + self.assertEqual(rc["some.test"], 2) + self.assertEqual(rc["some.test"], 1) - @unittest.skipIf(six.PY2, 'Method not available on Python2') + @unittest.skipIf(six.PY2, "Method not available on Python2") def test_error(self): """Test whether the correct Error is raised""" + def validate(i): try: return int(i) - except: + except Exception: raise ValueError("Expected failure") - rc = RcParams(defaultParams={ - 'some.test': [1, validate, 'The documentation'], - 'some.other_test': [2, validate, 'Another documentation']}) + + rc = RcParams( + defaultParams={ + "some.test": [1, validate, "The documentation"], + "some.other_test": [2, validate, "Another documentation"], + } + ) rc.update_from_defaultParams() with self.assertRaisesRegex(ValueError, "Expected failure"): - rc['some.test'] = 'test' + rc["some.test"] = "test" with self.assertRaises(KeyError): - rc['wrong_key'] = 1 - rc._deprecated_map['something'] = ['some.test', lambda x: x] - with self.assertWarnsRegex(UserWarning, rc.msg_depr % ('something', - 'some.test')): - rc['something'] = 3 + rc["wrong_key"] = 1 + rc._deprecated_map["something"] = ["some.test", lambda x: x] + with self.assertWarnsRegex( + UserWarning, rc.msg_depr % ("something", "some.test") + ): + rc["something"] = 3 # check whether the value has been changed correctly - self.assertEqual(rc['some.test'], 3) - rc._deprecated_ignore_map['ignored'] = 'some.test' - with self.assertWarnsRegex(UserWarning, rc.msg_depr_ignore % ( - 'ignored', 'some.test')): - rc['ignored'] = None + self.assertEqual(rc["some.test"], 3) + rc._deprecated_ignore_map["ignored"] = "some.test" + with self.assertWarnsRegex( + UserWarning, rc.msg_depr_ignore % ("ignored", "some.test") + ): + rc["ignored"] = None # check whether the value has not been changed - self.assertEqual(rc['some.test'], 3) + self.assertEqual(rc["some.test"], 3) def test_findall(self): - rc = RcParams(defaultParams={ - 'some.test': [1, lambda i: int(i), 'The documentation'], - 'some.other_test': [2, lambda i: int(i), 'Another documentation']}) + rc = RcParams( + defaultParams={ + "some.test": [1, lambda i: int(i), "The documentation"], + "some.other_test": [ + 2, + lambda i: int(i), + "Another documentation", + ], + } + ) rc.update_from_defaultParams() - self.assertEqual(rc.find_all('other'), {'some.other_test': 2}) + self.assertEqual(rc.find_all("other"), {"some.other_test": 2}) @unittest.skipIf(six.PY2, "Missing necessary unittest methods") def test_plugin(self): @@ -164,20 +187,22 @@ def test_plugin(self): return rc = psyplot.rcParams.copy() rc.load_plugins() - self.assertIn('test', rc) - self.assertEqual(rc['test'], 1) + self.assertIn("test", rc) + self.assertEqual(rc["test"], 1) with self.assertRaisesRegex( - ImportError, "plotters have already been defined"): + ImportError, "plotters have already been defined" + ): rc.load_plugins(True) - plotters = test_rc.pop('project.plotters') + plotters = test_rc.pop("project.plotters") try: with self.assertRaisesRegex( - ImportError, "default keys have already been defined"): + ImportError, "default keys have already been defined" + ): rc.load_plugins(True) - except: + except Exception: raise finally: - test_rc['project.plotters'] = plotters + test_rc["project.plotters"] = plotters def test_connect(self): """Test the connection and disconnection to rcParams""" @@ -190,24 +215,24 @@ def update_x(val): def update_y(val): y.update(val) - rcParams.connect('decoder.x', update_x) - rcParams.connect('decoder.y', update_y) + rcParams.connect("decoder.x", update_x) + rcParams.connect("decoder.y", update_y) - rcParams['decoder.x'] = {'test'} - self.assertEqual(x, {'test'}) + rcParams["decoder.x"] = {"test"} + self.assertEqual(x, {"test"}) self.assertEqual(y, set()) - rcParams['decoder.y'] = {'test2'} - self.assertEqual(y, {'test2'}) + rcParams["decoder.y"] = {"test2"} + self.assertEqual(y, {"test2"}) - rcParams.disconnect('decoder.x', update_x) - rcParams['decoder.x'] = {'test3'} - self.assertEqual(x, {'test'}) + rcParams.disconnect("decoder.x", update_x) + rcParams["decoder.x"] = {"test3"} + self.assertEqual(x, {"test"}) rcParams.disconnect() - rcParams['decoder.y'] = {'test4'} - self.assertEqual(y, {'test2'}) + rcParams["decoder.y"] = {"test4"} + self.assertEqual(y, {"test2"}) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_utils.py b/tests/test_utils.py index 966f411..936005a 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,30 +1,14 @@ """Module for testing the :mod:`psyplot.utils` module.""" -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. +# SPDX-FileCopyrightText: 2016-2024 University of Lausanne +# SPDX-FileCopyrightText: 2020-2021 Helmholtz-Zentrum Geesthacht + +# SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH # -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . +# SPDX-License-Identifier: LGPL-3.0-only -import _base_testing as bt import unittest + import psyplot.utils as utils @@ -35,8 +19,7 @@ def test_descriptor(self): """Test the descriptor functionality""" class Test(object): - - test = utils._temp_bool_prop('test') + test = utils._temp_bool_prop("test") t = Test() @@ -57,10 +40,14 @@ class TestUniqueEverSeen(unittest.TestCase): """Test the :func:`psyplot.utils.unique_everseen` function""" def test_simple(self): - self.assertEqual(list(utils.unique_everseen([1, 1, 2, 3, 4, 3])), - [1, 2, 3, 4]) + self.assertEqual( + list(utils.unique_everseen([1, 1, 2, 3, 4, 3])), [1, 2, 3, 4] + ) def test_key(self): - self.assertEqual(list(utils.unique_everseen([1, 1, 2, 3, 4, 3], - key=lambda i: i % 3)), - [1, 2, 3]) + self.assertEqual( + list( + utils.unique_everseen([1, 1, 2, 3, 4, 3], key=lambda i: i % 3) + ), + [1, 2, 3], + ) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..3be5bb3 --- /dev/null +++ b/tox.ini @@ -0,0 +1,23 @@ +; SPDX-FileCopyrightText: 2021-2024 Helmholtz-Zentrum hereon GmbH +; +; SPDX-License-Identifier: CC0-1.0 + +[tox] + +[testenv] +extras = + testsite + +commands = + # mypy psyplot # disabled for now + isort --check psyplot + black --line-length 79 --check psyplot + flake8 psyplot + pytest -v --cov=psyplot -x + reuse lint + cffconvert --validate + +[pytest] +DJANGO_SETTINGS_MODULE = testproject.settings +python_files = tests.py test_*.py *_tests.py +norecursedirs = .* build dist *.egg venv docs diff --git a/versioneer.py b/versioneer.py deleted file mode 100644 index d5a9070..0000000 --- a/versioneer.py +++ /dev/null @@ -1,1848 +0,0 @@ - -# Version: 0.18 - -"""The Versioneer - like a rocketeer, but for versions. - -The Versioneer -============== - -* like a rocketeer, but for versions! -* https://github.com/warner/python-versioneer -* Brian Warner -* License: Public Domain -* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy -* [![Latest Version] -(https://pypip.in/version/versioneer/badge.svg?style=flat) -](https://pypi.python.org/pypi/versioneer/) -* [![Build Status] -(https://travis-ci.org/warner/python-versioneer.png?branch=master) -](https://travis-ci.org/warner/python-versioneer) - -This is a tool for managing a recorded version number in distutils-based -python projects. The goal is to remove the tedious and error-prone "update -the embedded version string" step from your release process. Making a new -release should be as easy as recording a new tag in your version-control -system, and maybe making new tarballs. - - -## Quick Install - -* `pip install versioneer` to somewhere to your $PATH -* add a `[versioneer]` section to your setup.cfg (see below) -* run `versioneer install` in your source tree, commit the results - -## Version Identifiers - -Source trees come from a variety of places: - -* a version-control system checkout (mostly used by developers) -* a nightly tarball, produced by build automation -* a snapshot tarball, produced by a web-based VCS browser, like github's - "tarball from tag" feature -* a release tarball, produced by "setup.py sdist", distributed through PyPI - -Within each source tree, the version identifier (either a string or a number, -this tool is format-agnostic) can come from a variety of places: - -* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows - about recent "tags" and an absolute revision-id -* the name of the directory into which the tarball was unpacked -* an expanded VCS keyword ($Id$, etc) -* a `_version.py` created by some earlier build step - -For released software, the version identifier is closely related to a VCS -tag. Some projects use tag names that include more than just the version -string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool -needs to strip the tag prefix to extract the version identifier. For -unreleased software (between tags), the version identifier should provide -enough information to help developers recreate the same tree, while also -giving them an idea of roughly how old the tree is (after version 1.2, before -version 1.3). Many VCS systems can report a description that captures this, -for example `git describe --tags --dirty --always` reports things like -"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the -0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has -uncommitted changes. - -The version identifier is used for multiple purposes: - -* to allow the module to self-identify its version: `myproject.__version__` -* to choose a name and prefix for a 'setup.py sdist' tarball - -## Theory of Operation - -Versioneer works by adding a special `_version.py` file into your source -tree, where your `__init__.py` can import it. This `_version.py` knows how to -dynamically ask the VCS tool for version information at import time. - -`_version.py` also contains `$Revision$` markers, and the installation -process marks `_version.py` to have this marker rewritten with a tag name -during the `git archive` command. As a result, generated tarballs will -contain enough information to get the proper version. - -To allow `setup.py` to compute a version too, a `versioneer.py` is added to -the top level of your source tree, next to `setup.py` and the `setup.cfg` -that configures it. This overrides several distutils/setuptools commands to -compute the version when invoked, and changes `setup.py build` and `setup.py -sdist` to replace `_version.py` with a small static file that contains just -the generated version data. - -## Installation - -See [INSTALL.md](./INSTALL.md) for detailed installation instructions. - -## Version-String Flavors - -Code which uses Versioneer can learn about its version string at runtime by -importing `_version` from your main `__init__.py` file and running the -`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can -import the top-level `versioneer.py` and run `get_versions()`. - -Both functions return a dictionary with different flavors of version -information: - -* `['version']`: A condensed version string, rendered using the selected - style. This is the most commonly used value for the project's version - string. The default "pep440" style yields strings like `0.11`, - `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section - below for alternative styles. - -* `['full-revisionid']`: detailed revision identifier. For Git, this is the - full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". - -* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the - commit date in ISO 8601 format. This will be None if the date is not - available. - -* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that - this is only accurate if run in a VCS checkout, otherwise it is likely to - be False or None - -* `['error']`: if the version string could not be computed, this will be set - to a string describing the problem, otherwise it will be None. It may be - useful to throw an exception in setup.py if this is set, to avoid e.g. - creating tarballs with a version string of "unknown". - -Some variants are more useful than others. Including `full-revisionid` in a -bug report should allow developers to reconstruct the exact code being tested -(or indicate the presence of local changes that should be shared with the -developers). `version` is suitable for display in an "about" box or a CLI -`--version` output: it can be easily compared against release notes and lists -of bugs fixed in various releases. - -The installer adds the following text to your `__init__.py` to place a basic -version in `YOURPROJECT.__version__`: - - from ._version import get_versions - __version__ = get_versions()['version'] - del get_versions - -## Styles - -The setup.cfg `style=` configuration controls how the VCS information is -rendered into a version string. - -The default style, "pep440", produces a PEP440-compliant string, equal to the -un-prefixed tag name for actual releases, and containing an additional "local -version" section with more detail for in-between builds. For Git, this is -TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags ---dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the -tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and -that this commit is two revisions ("+2") beyond the "0.11" tag. For released -software (exactly equal to a known tag), the identifier will only contain the -stripped tag, e.g. "0.11". - -Other styles are available. See [details.md](details.md) in the Versioneer -source tree for descriptions. - -## Debugging - -Versioneer tries to avoid fatal errors: if something goes wrong, it will tend -to return a version of "0+unknown". To investigate the problem, run `setup.py -version`, which will run the version-lookup code in a verbose mode, and will -display the full contents of `get_versions()` (including the `error` string, -which may help identify what went wrong). - -## Known Limitations - -Some situations are known to cause problems for Versioneer. This details the -most significant ones. More can be found on Github -[issues page](https://github.com/warner/python-versioneer/issues). - -### Subprojects - -Versioneer has limited support for source trees in which `setup.py` is not in -the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are -two common reasons why `setup.py` might not be in the root: - -* Source trees which contain multiple subprojects, such as - [Buildbot](https://github.com/buildbot/buildbot), which contains both - "master" and "slave" subprojects, each with their own `setup.py`, - `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI - distributions (and upload multiple independently-installable tarballs). -* Source trees whose main purpose is to contain a C library, but which also - provide bindings to Python (and perhaps other langauges) in subdirectories. - -Versioneer will look for `.git` in parent directories, and most operations -should get the right version string. However `pip` and `setuptools` have bugs -and implementation details which frequently cause `pip install .` from a -subproject directory to fail to find a correct version string (so it usually -defaults to `0+unknown`). - -`pip install --editable .` should work correctly. `setup.py install` might -work too. - -Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in -some later version. - -[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking -this issue. The discussion in -[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the -issue from the Versioneer side in more detail. -[pip PR#3176](https://github.com/pypa/pip/pull/3176) and -[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve -pip to let Versioneer work correctly. - -Versioneer-0.16 and earlier only looked for a `.git` directory next to the -`setup.cfg`, so subprojects were completely unsupported with those releases. - -### Editable installs with setuptools <= 18.5 - -`setup.py develop` and `pip install --editable .` allow you to install a -project into a virtualenv once, then continue editing the source code (and -test) without re-installing after every change. - -"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a -convenient way to specify executable scripts that should be installed along -with the python package. - -These both work as expected when using modern setuptools. When using -setuptools-18.5 or earlier, however, certain operations will cause -`pkg_resources.DistributionNotFound` errors when running the entrypoint -script, which must be resolved by re-installing the package. This happens -when the install happens with one version, then the egg_info data is -regenerated while a different version is checked out. Many setup.py commands -cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into -a different virtualenv), so this can be surprising. - -[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes -this one, but upgrading to a newer version of setuptools should probably -resolve it. - -### Unicode version strings - -While Versioneer works (and is continually tested) with both Python 2 and -Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. -Newer releases probably generate unicode version strings on py2. It's not -clear that this is wrong, but it may be surprising for applications when then -write these strings to a network connection or include them in bytes-oriented -APIs like cryptographic checksums. - -[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates -this question. - - -## Updating Versioneer - -To upgrade your project to a new release of Versioneer, do the following: - -* install the new Versioneer (`pip install -U versioneer` or equivalent) -* edit `setup.cfg`, if necessary, to include any new configuration settings - indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. -* re-run `versioneer install` in your source tree, to replace - `SRC/_version.py` -* commit any changed files - -## Future Directions - -This tool is designed to make it easily extended to other version-control -systems: all VCS-specific components are in separate directories like -src/git/ . The top-level `versioneer.py` script is assembled from these -components by running make-versioneer.py . In the future, make-versioneer.py -will take a VCS name as an argument, and will construct a version of -`versioneer.py` that is specific to the given VCS. It might also take the -configuration arguments that are currently provided manually during -installation by editing setup.py . Alternatively, it might go the other -direction and include code from all supported VCS systems, reducing the -number of intermediate scripts. - - -## License - -To make Versioneer easier to embed, all its code is dedicated to the public -domain. The `_version.py` that it creates is also in the public domain. -Specifically, both are released under the Creative Commons "Public Domain -Dedication" license (CC0-1.0), as described in -https://creativecommons.org/publicdomain/zero/1.0/ . - -""" - -# Disclaimer -# ---------- -# -# Copyright (C) 2021 Helmholtz-Zentrum Hereon -# Copyright (C) 2020-2021 Helmholtz-Zentrum Geesthacht -# Copyright (C) 2016-2021 University of Lausanne -# -# This file is part of psyplot and is released under the GNU LGPL-3.O license. -# See COPYING and COPYING.LESSER in the root of the repository for full -# licensing details. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3.0 as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU LGPL-3.0 license for more details. -# -# You should have received a copy of the GNU LGPL-3.0 license -# along with this program. If not, see . -# -# This file is originally released into the public domain. Generated by -# versioneer-0.18 (https://github.com/warner/python-versioneer) - -from __future__ import print_function -try: - import configparser -except ImportError: - import ConfigParser as configparser -import errno -import json -import os -import re -import subprocess -import sys - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_root(): - """Get the project root directory. - - We require that all commands are run from the project root, i.e. the - directory that contains setup.py, setup.cfg, and versioneer.py . - """ - root = os.path.realpath(os.path.abspath(os.getcwd())) - setup_py = os.path.join(root, "setup.py") - versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - # allow 'python path/to/setup.py COMMAND' - root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) - setup_py = os.path.join(root, "setup.py") - versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - err = ("Versioneer was unable to run the project root directory. " - "Versioneer requires setup.py to be executed from " - "its immediate directory (like 'python setup.py COMMAND'), " - "or in a way that lets it use sys.argv[0] to find the root " - "(like 'python path/to/setup.py COMMAND').") - raise VersioneerBadRootError(err) - try: - # Certain runtime workflows (setup.py install/develop in a setuptools - # tree) execute all dependencies in a single python process, so - # "versioneer" may be imported multiple times, and python's shared - # module-import table will cache the first one. So we can't use - # os.path.dirname(__file__), as that will find whichever - # versioneer.py was first imported, even in later projects. - me = os.path.realpath(os.path.abspath(__file__)) - me_dir = os.path.normcase(os.path.splitext(me)[0]) - vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) - if me_dir != vsr_dir: - print("Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(me), versioneer_py)) - except NameError: - pass - return root - - -def get_config_from_root(root): - """Read the project setup.cfg file to determine Versioneer config.""" - # This might raise EnvironmentError (if setup.cfg is missing), or - # configparser.NoSectionError (if it lacks a [versioneer] section), or - # configparser.NoOptionError (if it lacks "VCS="). See the docstring at - # the top of versioneer.py for instructions on writing your setup.cfg . - setup_cfg = os.path.join(root, "setup.cfg") - parser = configparser.SafeConfigParser() - with open(setup_cfg, "r") as f: - parser.readfp(f) - VCS = parser.get("versioneer", "VCS") # mandatory - - def get(parser, name): - if parser.has_option("versioneer", name): - return parser.get("versioneer", name) - return None - cfg = VersioneerConfig() - cfg.VCS = VCS - cfg.style = get(parser, "style") or "" - cfg.versionfile_source = get(parser, "versionfile_source") - cfg.versionfile_build = get(parser, "versionfile_build") - cfg.tag_prefix = get(parser, "tag_prefix") - if cfg.tag_prefix in ("''", '""'): - cfg.tag_prefix = "" - cfg.parentdir_prefix = get(parser, "parentdir_prefix") - cfg.verbose = get(parser, "verbose") - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -# these dictionaries contain VCS-specific tools -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %s" % dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %s" % (commands,)) - return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) - return None, p.returncode - return stdout, p.returncode - - -LONG_VERSION_PY['git'] = ''' -# This file helps to compute a version number in source trees obtained from -# git-archive tarball (such as those provided by githubs download-from-tag -# feature). Distribution tarballs (built by setup.py sdist) and build -# directories (produced by setup.py build) will contain a much shorter file -# that just contains the computed version number. - -# This file is released into the public domain. Generated by -# versioneer-0.18 (https://github.com/warner/python-versioneer) - -"""Git implementation of _version.py.""" - -import errno -import os -import re -import subprocess -import sys - - -def get_keywords(): - """Get the keywords needed to look up the version information.""" - # these strings will be replaced by git during git-archive. - # setup.py/versioneer.py will grep for the variable names, so they must - # each be defined on a line of their own. _version.py will just call - # get_keywords(). - git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" - git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" - git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" - keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} - return keywords - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_config(): - """Create, populate and return the VersioneerConfig() object.""" - # these strings are filled in when 'setup.py versioneer' creates - # _version.py - cfg = VersioneerConfig() - cfg.VCS = "git" - cfg.style = "%(STYLE)s" - cfg.tag_prefix = "%(TAG_PREFIX)s" - cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" - cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" - cfg.verbose = False - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %%s" %% dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %%s" %% (commands,)) - return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %%s (error)" %% dispcmd) - print("stdout was %%s" %% stdout) - return None, p.returncode - return stdout, p.returncode - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for i in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %%s but none started with prefix %%s" %% - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") - date = keywords.get("date") - if date is not None: - # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %%d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) - if verbose: - print("discarding '%%s', no digits" %% ",".join(refs - tags)) - if verbose: - print("likely tags: %%s" %% ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - if verbose: - print("picking %%s" %% r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %%s not under git control" %% root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%%s*" %% tag_prefix], - cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%%s'" - %% describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%%s' doesn't start with prefix '%%s'" - print(fmt %% (full_tag, tag_prefix)) - pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" - %% (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], - cwd=root)[0].strip() - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post.devDISTANCE - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%%d" %% pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%%d" %% pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%%s" %% pieces["short"] - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%%s" %% pieces["short"] - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Eexceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%%s'" %% style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -def get_versions(): - """Get version information or return default if unable to do so.""" - # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have - # __file__, we can work backwards from there to the root. Some - # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which - # case we can only use expanded keywords. - - cfg = get_config() - verbose = cfg.verbose - - try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) - except NotThisMethod: - pass - - try: - root = os.path.realpath(__file__) - # versionfile_source is the relative path from the top of the source - # tree (where the .git directory might live) to this file. Invert - # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): - root = os.path.dirname(root) - except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} - - try: - pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) - return render(pieces, cfg.style) - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - except NotThisMethod: - pass - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} -''' - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") - date = keywords.get("date") - if date is not None: - # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) - if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) - if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - if verbose: - print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %s not under git control" % root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def do_vcs_install(manifest_in, versionfile_source, ipy): - """Git-specific installation logic for Versioneer. - - For Git, this means creating/changing .gitattributes to mark _version.py - for export-subst keyword substitution. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - files = [manifest_in, versionfile_source] - if ipy: - files.append(ipy) - try: - me = __file__ - if me.endswith(".pyc") or me.endswith(".pyo"): - me = os.path.splitext(me)[0] + ".py" - versioneer_file = os.path.relpath(me) - except NameError: - versioneer_file = "versioneer.py" - files.append(versioneer_file) - present = False - try: - f = open(".gitattributes", "r") - for line in f.readlines(): - if line.strip().startswith(versionfile_source): - if "export-subst" in line.strip().split()[1:]: - present = True - f.close() - except EnvironmentError: - pass - if not present: - f = open(".gitattributes", "a+") - f.write("%s export-subst\n" % versionfile_source) - f.close() - files.append(".gitattributes") - run_command(GITS, ["add", "--"] + files) - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for i in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -SHORT_VERSION_PY = """ -# This file was generated by 'versioneer.py' (0.18) from -# revision-control system data, or from the parent directory name of an -# unpacked source archive. Distribution tarballs contain a pre-generated copy -# of this file. - -import json - -version_json = ''' -%s -''' # END VERSION_JSON - - -def get_versions(): - return json.loads(version_json) -""" - - -def versions_from_file(filename): - """Try to determine the version from _version.py if present.""" - try: - with open(filename) as f: - contents = f.read() - except EnvironmentError: - raise NotThisMethod("unable to read _version.py") - mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) - if not mo: - mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) - if not mo: - raise NotThisMethod("no version_json in _version.py") - return json.loads(mo.group(1)) - - -def write_to_version_file(filename, versions): - """Write the given version number to the given _version.py file.""" - os.unlink(filename) - contents = json.dumps(versions, sort_keys=True, - indent=1, separators=(",", ": ")) - with open(filename, "w") as f: - f.write(SHORT_VERSION_PY % contents) - - print("set %s to '%s'" % (filename, versions["version"])) - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post.devDISTANCE - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Eexceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%s'" % style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -class VersioneerBadRootError(Exception): - """The project root directory is unknown or missing key files.""" - - -def get_versions(verbose=False): - """Get the project version from whatever source is available. - - Returns dict with two keys: 'version' and 'full'. - """ - if "versioneer" in sys.modules: - # see the discussion in cmdclass.py:get_cmdclass() - del sys.modules["versioneer"] - - root = get_root() - cfg = get_config_from_root(root) - - assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" - handlers = HANDLERS.get(cfg.VCS) - assert handlers, "unrecognized VCS '%s'" % cfg.VCS - verbose = verbose or cfg.verbose - assert cfg.versionfile_source is not None, \ - "please set versioneer.versionfile_source" - assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" - - versionfile_abs = os.path.join(root, cfg.versionfile_source) - - # extract version from first of: _version.py, VCS command (e.g. 'git - # describe'), parentdir. This is meant to work for developers using a - # source checkout, for users of a tarball created by 'setup.py sdist', - # and for users of a tarball/zipball created by 'git archive' or github's - # download-from-tag feature or the equivalent in other VCSes. - - get_keywords_f = handlers.get("get_keywords") - from_keywords_f = handlers.get("keywords") - if get_keywords_f and from_keywords_f: - try: - keywords = get_keywords_f(versionfile_abs) - ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) - if verbose: - print("got version from expanded keyword %s" % ver) - return ver - except NotThisMethod: - pass - - try: - ver = versions_from_file(versionfile_abs) - if verbose: - print("got version from file %s %s" % (versionfile_abs, ver)) - return ver - except NotThisMethod: - pass - - from_vcs_f = handlers.get("pieces_from_vcs") - if from_vcs_f: - try: - pieces = from_vcs_f(cfg.tag_prefix, root, verbose) - ver = render(pieces, cfg.style) - if verbose: - print("got version from VCS %s" % ver) - return ver - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - if verbose: - print("got version from parentdir %s" % ver) - return ver - except NotThisMethod: - pass - - if verbose: - print("unable to compute version") - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, "error": "unable to compute version", - "date": None} - - -def get_version(): - """Get the short version string for this project.""" - return get_versions()["version"] - - -def get_cmdclass(cmdclass=None): - """Get the custom setuptools/distutils subclasses used by Versioneer.""" - if "versioneer" in sys.modules: - del sys.modules["versioneer"] - # this fixes the "python setup.py develop" case (also 'install' and - # 'easy_install .'), in which subdependencies of the main project are - # built (using setup.py bdist_egg) in the same python process. Assume - # a main project A and a dependency B, which use different versions - # of Versioneer. A's setup.py imports A's Versioneer, leaving it in - # sys.modules by the time B's setup.py is executed, causing B to run - # with the wrong versioneer. Setuptools wraps the sub-dep builds in a - # sandbox that restores sys.modules to it's pre-build state, so the - # parent is protected against the child's "import versioneer". By - # removing ourselves from sys.modules here, before the child build - # happens, we protect the child from the parent's versioneer too. - # Also see https://github.com/warner/python-versioneer/issues/52 - - cmds = {} if cmdclass is None else cmdclass.copy() - - # we add "version" to both distutils and setuptools - from distutils.core import Command - - class cmd_version(Command): - description = "report generated version string" - user_options = [] - boolean_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - def run(self): - vers = get_versions(verbose=True) - print("Version: %s" % vers["version"]) - print(" full-revisionid: %s" % vers.get("full-revisionid")) - print(" dirty: %s" % vers.get("dirty")) - print(" date: %s" % vers.get("date")) - if vers["error"]: - print(" error: %s" % vers["error"]) - cmds["version"] = cmd_version - - # we override "build_py" in both distutils and setuptools - # - # most invocation pathways end up running build_py: - # distutils/build -> build_py - # distutils/install -> distutils/build ->.. - # setuptools/bdist_wheel -> distutils/install ->.. - # setuptools/bdist_egg -> distutils/install_lib -> build_py - # setuptools/install -> bdist_egg ->.. - # setuptools/develop -> ? - # pip install: - # copies source tree to a tempdir before running egg_info/etc - # if .git isn't copied too, 'git describe' will fail - # then does setup.py bdist_wheel, or sometimes setup.py install - # setup.py egg_info -> ? - - # we override different "build_py" commands for both environments - if "setuptools" in sys.modules: - from setuptools.command.build_py import build_py as _build_py - else: - from distutils.command.build_py import build_py as _build_py - - class cmd_build_py(_build_py): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - _build_py.run(self) - # now locate _version.py in the new build/ directory and replace - # it with an updated value - if cfg.versionfile_build: - target_versionfile = os.path.join(self.build_lib, - cfg.versionfile_build) - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - cmds["build_py"] = cmd_build_py - - if "cx_Freeze" in sys.modules: # cx_freeze enabled? - from cx_Freeze.dist import build_exe as _build_exe - # nczeczulin reports that py2exe won't like the pep440-style string - # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. - # setup(console=[{ - # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION - # "product_version": versioneer.get_version(), - # ... - - class cmd_build_exe(_build_exe): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - target_versionfile = cfg.versionfile_source - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - - _build_exe.run(self) - os.unlink(target_versionfile) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - cmds["build_exe"] = cmd_build_exe - del cmds["build_py"] - - if 'py2exe' in sys.modules: # py2exe enabled? - try: - from py2exe.distutils_buildexe import py2exe as _py2exe # py3 - except ImportError: - from py2exe.build_exe import py2exe as _py2exe # py2 - - class cmd_py2exe(_py2exe): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - target_versionfile = cfg.versionfile_source - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - - _py2exe.run(self) - os.unlink(target_versionfile) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - cmds["py2exe"] = cmd_py2exe - - # we override different "sdist" commands for both environments - if "setuptools" in sys.modules: - from setuptools.command.sdist import sdist as _sdist - else: - from distutils.command.sdist import sdist as _sdist - - class cmd_sdist(_sdist): - def run(self): - versions = get_versions() - self._versioneer_generated_versions = versions - # unless we update this, the command will keep using the old - # version - self.distribution.metadata.version = versions["version"] - return _sdist.run(self) - - def make_release_tree(self, base_dir, files): - root = get_root() - cfg = get_config_from_root(root) - _sdist.make_release_tree(self, base_dir, files) - # now locate _version.py in the new base_dir directory - # (remembering that it may be a hardlink) and replace it with an - # updated value - target_versionfile = os.path.join(base_dir, cfg.versionfile_source) - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, - self._versioneer_generated_versions) - cmds["sdist"] = cmd_sdist - - return cmds - - -CONFIG_ERROR = """ -setup.cfg is missing the necessary Versioneer configuration. You need -a section like: - - [versioneer] - VCS = git - style = pep440 - versionfile_source = src/myproject/_version.py - versionfile_build = myproject/_version.py - tag_prefix = - parentdir_prefix = myproject- - -You will also need to edit your setup.py to use the results: - - import versioneer - setup(version=versioneer.get_version(), - cmdclass=versioneer.get_cmdclass(), ...) - -Please read the docstring in ./versioneer.py for configuration instructions, -edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. -""" - -SAMPLE_CONFIG = """ -# See the docstring in versioneer.py for instructions. Note that you must -# re-run 'versioneer.py setup' after changing this section, and commit the -# resulting files. - -[versioneer] -#VCS = git -#style = pep440 -#versionfile_source = -#versionfile_build = -#tag_prefix = -#parentdir_prefix = - -""" - -INIT_PY_SNIPPET = """ -from ._version import get_versions -__version__ = get_versions()['version'] -del get_versions -""" - - -def do_setup(): - """Main VCS-independent setup function for installing Versioneer.""" - root = get_root() - try: - cfg = get_config_from_root(root) - except (EnvironmentError, configparser.NoSectionError, - configparser.NoOptionError) as e: - if isinstance(e, (EnvironmentError, configparser.NoSectionError)): - print("Adding sample versioneer config to setup.cfg", - file=sys.stderr) - with open(os.path.join(root, "setup.cfg"), "a") as f: - f.write(SAMPLE_CONFIG) - print(CONFIG_ERROR, file=sys.stderr) - return 1 - - print(" creating %s" % cfg.versionfile_source) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - - ipy = os.path.join(os.path.dirname(cfg.versionfile_source), - "__init__.py") - if os.path.exists(ipy): - try: - with open(ipy, "r") as f: - old = f.read() - except EnvironmentError: - old = "" - if INIT_PY_SNIPPET not in old: - print(" appending to %s" % ipy) - with open(ipy, "a") as f: - f.write(INIT_PY_SNIPPET) - else: - print(" %s unmodified" % ipy) - else: - print(" %s doesn't exist, ok" % ipy) - ipy = None - - # Make sure both the top-level "versioneer.py" and versionfile_source - # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so - # they'll be copied into source distributions. Pip won't be able to - # install the package without this. - manifest_in = os.path.join(root, "MANIFEST.in") - simple_includes = set() - try: - with open(manifest_in, "r") as f: - for line in f: - if line.startswith("include "): - for include in line.split()[1:]: - simple_includes.add(include) - except EnvironmentError: - pass - # That doesn't cover everything MANIFEST.in can do - # (http://docs.python.org/2/distutils/sourcedist.html#commands), so - # it might give some false negatives. Appending redundant 'include' - # lines is safe, though. - if "versioneer.py" not in simple_includes: - print(" appending 'versioneer.py' to MANIFEST.in") - with open(manifest_in, "a") as f: - f.write("include versioneer.py\n") - else: - print(" 'versioneer.py' already in MANIFEST.in") - if cfg.versionfile_source not in simple_includes: - print(" appending versionfile_source ('%s') to MANIFEST.in" % - cfg.versionfile_source) - with open(manifest_in, "a") as f: - f.write("include %s\n" % cfg.versionfile_source) - else: - print(" versionfile_source already in MANIFEST.in") - - # Make VCS-specific changes. For git, this means creating/changing - # .gitattributes to mark _version.py for export-subst keyword - # substitution. - do_vcs_install(manifest_in, cfg.versionfile_source, ipy) - return 0 - - -def scan_setup_py(): - """Validate the contents of setup.py against Versioneer's expectations.""" - found = set() - setters = False - errors = 0 - with open("setup.py", "r") as f: - for line in f.readlines(): - if "import versioneer" in line: - found.add("import") - if "versioneer.get_cmdclass()" in line: - found.add("cmdclass") - if "versioneer.get_version()" in line: - found.add("get_version") - if "versioneer.VCS" in line: - setters = True - if "versioneer.versionfile_source" in line: - setters = True - if len(found) != 3: - print("") - print("Your setup.py appears to be missing some important items") - print("(but I might be wrong). Please make sure it has something") - print("roughly like the following:") - print("") - print(" import versioneer") - print(" setup( version=versioneer.get_version(),") - print(" cmdclass=versioneer.get_cmdclass(), ...)") - print("") - errors += 1 - if setters: - print("You should remove lines like 'versioneer.VCS = ' and") - print("'versioneer.versionfile_source = ' . This configuration") - print("now lives in setup.cfg, and should be removed from setup.py") - print("") - errors += 1 - return errors - - -if __name__ == "__main__": - cmd = sys.argv[1] - if cmd == "setup": - errors = do_setup() - errors += scan_setup_py() - if errors: - sys.exit(1)