summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.circleci/config.yml95
-rw-r--r--.github/for_maintainers/new_gudhi_version_creation.md5
-rw-r--r--.github/for_maintainers/next_release_template.md10
-rw-r--r--.github/how_to_use_github_to_contribute_to_gudhi.md15
-rw-r--r--.github/next_release.md21
-rw-r--r--.github/workflows/pip-build-linux.yml18
-rw-r--r--.github/workflows/pip-build-osx.yml14
-rw-r--r--.github/workflows/pip-build-windows.yml2
-rw-r--r--.github/workflows/pip-packaging-linux.yml14
-rw-r--r--.github/workflows/pip-packaging-osx.yml19
-rw-r--r--.github/workflows/pip-packaging-windows.yml2
-rw-r--r--CMakeGUDHIVersion.txt2
-rw-r--r--biblio/bibliography.bib2
-rw-r--r--biblio/how_to_cite_gudhi.bib.in352
-rw-r--r--biblio/test/test_biblio.tex7
-rw-r--r--biblio/test/test_gudhi_citation.tex7
-rwxr-xr-xscripts/build_osx_universal_gmpfr.sh47
-rw-r--r--src/Cech_complex/doc/Intro_cech_complex.h2
-rw-r--r--src/Cech_complex/include/gudhi/Cech_complex.h3
-rw-r--r--src/Cech_complex/utilities/cechcomplex.md4
-rw-r--r--src/Doxyfile.in1
-rw-r--r--src/GudhUI/view/Viewer.cpp4
-rw-r--r--src/Nerve_GIC/example/CMakeLists.txt34
-rw-r--r--src/Nerve_GIC/include/gudhi/GIC.h18
-rw-r--r--src/Nerve_GIC/test/CMakeLists.txt17
-rw-r--r--src/Nerve_GIC/utilities/CMakeLists.txt36
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree.h55
-rw-r--r--src/Simplex_tree/test/simplex_tree_unit_test.cpp14
-rw-r--r--src/Tangential_complex/include/gudhi/Tangential_complex.h11
-rw-r--r--src/cmake/modules/GUDHI_submodules.cmake2
-rw-r--r--src/cmake/modules/GUDHI_user_version_target.cmake7
-rw-r--r--src/common/doc/main_page.md2
-rw-r--r--src/python/CMakeLists.txt73
-rw-r--r--src/python/doc/installation.rst6
-rw-r--r--src/python/doc/representations.rst119
-rw-r--r--src/python/doc/representations_sum.inc24
-rw-r--r--src/python/doc/rips_complex_user.rst127
-rw-r--r--src/python/gudhi/representations/vector_methods.py143
-rw-r--r--src/python/gudhi/simplex_tree.pxd2
-rw-r--r--src/python/gudhi/simplex_tree.pyx105
-rw-r--r--src/python/gudhi/tensorflow/perslay.py284
-rw-r--r--src/python/include/Alpha_complex_factory.h4
-rw-r--r--src/python/include/Simplex_tree_interface.h26
-rw-r--r--src/python/setup.py.in2
-rw-r--r--src/python/test/test_persistence_graphical_tools.py5
-rw-r--r--src/python/test/test_perslay.py147
-rwxr-xr-xsrc/python/test/test_representations.py22
-rwxr-xr-xsrc/python/test/test_simplex_tree.py365
-rwxr-xr-xsrc/python/test/test_wasserstein_distance.py9
49 files changed, 1686 insertions, 619 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index e2df5c87..ef22fbea 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -10,6 +10,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test examples
command: |
mkdir build
@@ -24,6 +29,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test unitary tests
command: |
mkdir build
@@ -38,6 +48,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test utilities
command: |
mkdir build
@@ -52,10 +67,13 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test python module. Generates and tests the python documentation
command: |
- git submodule init
- git submodule update
mkdir build
cd build
cmake -DWITH_GUDHI_THIRD_PARTY=OFF -DUSER_VERSION_DIR=version ..
@@ -64,6 +82,7 @@ jobs:
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 -DWITH_GUDHI_REMOTE_TEST=ON .
cd python
python3 setup.py build_ext --inplace
+ ctest --output-on-failure
make sphinx
cp -R sphinx /tmp/sphinx
python3 setup.py install
@@ -83,10 +102,13 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Generates the C++ documentation with doxygen
command: |
- git submodule init
- git submodule update
mkdir build
cd build
cmake -DWITH_GUDHI_THIRD_PARTY=OFF -DUSER_VERSION_DIR=version ..
@@ -104,6 +126,26 @@ jobs:
path: /tmp/doxygen
destination: doxygen
+ bibliography:
+ docker:
+ - image: gudhi/doxygen_for_gudhi:latest
+ steps:
+ - checkout
+ - run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
+ name: Test the LaTeX bibliography files
+ command: |
+ mkdir build
+ cd build
+ cmake -DWITH_GUDHI_THIRD_PARTY=OFF -DUSER_VERSION_DIR=version ..
+ cd biblio/test
+ latexmk -pdf -interaction=nonstopmode test_biblio.tex
+ latexmk -pdf -interaction=nonstopmode test_gudhi_citation.tex
+
### With all third parties, except CGAL and Eigen
@@ -114,6 +156,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test examples without cgal and eigen
command: |
mkdir build
@@ -128,6 +175,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test unitary tests without cgal and eigen
command: |
mkdir build
@@ -142,6 +194,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test utilities without cgal and eigen
command: |
mkdir build
@@ -156,10 +213,13 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test python module without cgal and eigen
command: |
- git submodule init
- git submodule update
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..
@@ -176,6 +236,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test examples without cgal
command: |
mkdir build
@@ -190,6 +255,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test unitary tests without cgal
command: |
mkdir build
@@ -204,6 +274,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test utilities without cgal
command: |
mkdir build
@@ -218,10 +293,13 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test python module without cgal
command: |
- git submodule init
- git submodule update
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=Release -DEIGEN3_INCLUDE_DIR=/eigen-3.3.9 -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..
@@ -246,3 +324,4 @@ workflows:
- utils
- python
- doxygen
+ - bibliography
diff --git a/.github/for_maintainers/new_gudhi_version_creation.md b/.github/for_maintainers/new_gudhi_version_creation.md
index 19ef168e..de8d0aa5 100644
--- a/.github/for_maintainers/new_gudhi_version_creation.md
+++ b/.github/for_maintainers/new_gudhi_version_creation.md
@@ -18,11 +18,10 @@ Checkin the modifications, build and test the version:
```bash
git submodule update --init
rm -rf build; mkdir build; cd build
-cmake -DCMAKE_BUILD_TYPE=Release -DCGAL_DIR=/your/path/to/CGAL -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_BENCHMARK=ON -DUSER_VERSION_DIR=gudhi.@GUDHI_VERSION@ -DPython_ADDITIONAL_VERSIONS=3 ..
+cmake -DCMAKE_BUILD_TYPE=Release -DCGAL_DIR=/your/path/to/CGAL -DWITH_GUDHI_REMOTE_TEST=ON -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_BENCHMARK=ON -DUSER_VERSION_DIR=gudhi.@GUDHI_VERSION@ -DPython_ADDITIONAL_VERSIONS=3 ..
make user_version
date +"%d-%m-%Y-%T" > gudhi.@GUDHI_VERSION@/timestamp.txt
tar -czvf gudhi.@GUDHI_VERSION@.tar.gz gudhi.@GUDHI_VERSION@
-md5sum gudhi.@GUDHI_VERSION@.tar.gz > md5sum.txt
sha256sum gudhi.@GUDHI_VERSION@.tar.gz > sha256sum.txt
sha512sum gudhi.@GUDHI_VERSION@.tar.gz > sha512sum.txt
@@ -87,7 +86,7 @@ Copy gudhi WebDAV python/@GUDHI_VERSION@ as python/latest (no symbolic link with
* Name the tag: tags/gudhi-release-@GUDHI_VERSION@
* Name the release GUDHI @GUDHI_VERSION@ release
* Write the release note
-* Drag'n drop *gudhi.@GUDHI_VERSION@.tar.gz*, *md5sum.txt*, *sha256sum.txt*, *sha512sum.txt* files
+* Drag'n drop *gudhi.@GUDHI_VERSION@.tar.gz*, *sha256sum.txt*, *sha512sum.txt* files
* Tick the *This is a pre-release* check button if this is a release candidate (untick if this is an official version)
* Click the *Publish the release* button
diff --git a/.github/for_maintainers/next_release_template.md b/.github/for_maintainers/next_release_template.md
index a2805a55..c57ae4eb 100644
--- a/.github/for_maintainers/next_release_template.md
+++ b/.github/for_maintainers/next_release_template.md
@@ -1,16 +1,16 @@
We are pleased to announce the release 3.X.X of the GUDHI library.
-As a major new feature, the GUDHI library now offers ...
+As a major new feature, the GUDHI library now offers **...**
We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz).
Below is a list of changes made since GUDHI 3.X-1.X-1:
- [Module](link)
- - ...
+ - **...**
- [Module](link)
- - ...
+ - **...**
- Miscellaneous
- The [list of bugs that were solved since GUDHI-3.X-1.X-1](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.1.1+is%3Aclosed) is available on GitHub.
@@ -26,3 +26,7 @@ Feel free to [contact us](https://gudhi.inria.fr/contact/) in case you have any
For further information about downloading and installing the library ([C++](https://gudhi.inria.fr/doc/latest/installation.html) or [Python](https://gudhi.inria.fr/python/latest/installation.html)), please visit the [GUDHI web site](https://gudhi.inria.fr/).
+## Contributors
+
+- **...**
+- **...** \ No newline at end of file
diff --git a/.github/how_to_use_github_to_contribute_to_gudhi.md b/.github/how_to_use_github_to_contribute_to_gudhi.md
index 738c1ce9..f72bb9d6 100644
--- a/.github/how_to_use_github_to_contribute_to_gudhi.md
+++ b/.github/how_to_use_github_to_contribute_to_gudhi.md
@@ -17,7 +17,7 @@ You can see your fork at https://github.com/LOGIN/gudhi-devel
## Create a local clone on your computer
```bash
-git clone https://github.com/LOGIN/gudhi-devel.git
+git clone --recurse-submodules https://github.com/LOGIN/gudhi-devel.git
```
This creates a directory gudhi-devel, which you are free to move around or rename. For the following, change to that directory:
@@ -25,16 +25,14 @@ This creates a directory gudhi-devel, which you are free to move around or renam
cd gudhi-devel
```
-When you clone the repository, you also need to download the *submodules*.
-
## Submodules
-Hera, used for Wasserstein distance, is available on an external git repository. To download it:
+When you clone the repository, you also need to download the *submodules*. This is done automatically thanks to `--recurse-submodules`.
+If you forgot this option, you can still download them with
```bash
git submodule update --init
```
-[gudhi-deploy](https://github.com/GUDHI/gudhi-deploy) is used for Continuous Integration python
-requirements and will also be downloaded by the above command.
+The submodules appear in the `ext/` subdirectory. There are currently 2, [Hera](https://github.com/anigmetov/hera) for distances between persistence diagrams, and [gudhi-deploy](https://github.com/GUDHI/gudhi-deploy) for Continuous Integration.
## Configuring a remote for a fork
```bash
@@ -68,6 +66,11 @@ It is safe, it will not mess with your files.
git submodule sync
git submodule update --init
```
+You can configure `git` to do this automatically with
+```bash
+git config submodule.recurse true
+```
+(add `--global` if you want it to apply to other projects as well)
## Create a branch, based on the current master
```bash
diff --git a/.github/next_release.md b/.github/next_release.md
index d5fcef1c..a74fafc6 100644
--- a/.github/next_release.md
+++ b/.github/next_release.md
@@ -1,22 +1,19 @@
-We are pleased to announce the release 3.7.0 of the GUDHI library.
+We are pleased to announce the release 3.8.0 of the GUDHI library.
-As a major new feature, the GUDHI library now offers ...
+As a major new feature, the GUDHI library now offers **...**
We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz).
-Below is a list of changes made since GUDHI 3.6.0:
+Below is a list of changes made since GUDHI 3.7.1:
- [Module](link)
- - ...
+ - **...**
-- [Rips complex](https://gudhi.inria.fr/python/latest/rips_complex_user.html)
- - Construction now rejects positional arguments, you need to specify `points=X`.
-
-- Installation
- - c++17 is the new minimal standard to compile the library. This implies Visual Studio minimal version is now 2017.
+- [Module](link)
+ - **...**
- Miscellaneous
- - The [list of bugs that were solved since GUDHI-3.6.0](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.7.0+is%3Aclosed) is available on GitHub.
+ - The [list of bugs that were solved since GUDHI-3.7.1](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.8.0+is%3Aclosed) is available on GitHub.
All modules are distributed under the terms of the MIT license.
However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details.
@@ -29,3 +26,7 @@ Feel free to [contact us](https://gudhi.inria.fr/contact/) in case you have any
For further information about downloading and installing the library ([C++](https://gudhi.inria.fr/doc/latest/installation.html) or [Python](https://gudhi.inria.fr/python/latest/installation.html)), please visit the [GUDHI web site](https://gudhi.inria.fr/).
+## Contributors
+
+- **...**
+- **...** \ No newline at end of file
diff --git a/.github/workflows/pip-build-linux.yml b/.github/workflows/pip-build-linux.yml
index 11b6271d..bc4f999e 100644
--- a/.github/workflows/pip-build-linux.yml
+++ b/.github/workflows/pip-build-linux.yml
@@ -12,16 +12,16 @@ jobs:
- uses: actions/checkout@v3
with:
submodules: true
- - name: Build wheel for Python 3.10
+ - name: Build wheel for Python 3.11
run: |
- mkdir build_310
- cd build_310
- cmake -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=$PYTHON310/bin/python ..
+ mkdir build_311
+ cd build_311
+ cmake -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=$PYTHON311/bin/python ..
cd src/python
- $PYTHON310/bin/python setup.py bdist_wheel
+ $PYTHON311/bin/python setup.py bdist_wheel
auditwheel repair dist/*.whl
- - name: Install and test wheel for Python 3.10
+ - name: Install and test wheel for Python 3.11
run: |
- $PYTHON310/bin/python -m pip install --user pytest build_310/src/python/dist/*.whl
- $PYTHON310/bin/python -c "import gudhi; print(gudhi.__version__)"
- $PYTHON310/bin/python -m pytest src/python/test/test_alpha_complex.py
+ $PYTHON311/bin/python -m pip install --user pytest build_311/src/python/dist/*.whl
+ $PYTHON311/bin/python -c "import gudhi; print(gudhi.__version__)"
+ $PYTHON311/bin/python -m pytest src/python/test/test_alpha_complex.py
diff --git a/.github/workflows/pip-build-osx.yml b/.github/workflows/pip-build-osx.yml
index 59e94ca5..a438124a 100644
--- a/.github/workflows/pip-build-osx.yml
+++ b/.github/workflows/pip-build-osx.yml
@@ -2,13 +2,18 @@ name: pip build osx
on: [push, pull_request]
+env:
+ MACOSX_DEPLOYMENT_TARGET: 10.14
+ _PYTHON_HOST_PLATFORM: macosx-10.14-universal2
+ ARCHFLAGS: "-arch arm64 -arch x86_64"
+
jobs:
build:
runs-on: macos-latest
strategy:
max-parallel: 4
matrix:
- python-version: ['3.10']
+ python-version: ['3.11']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v3
@@ -24,14 +29,21 @@ jobs:
brew install boost eigen gmp mpfr cgal || true
python -m pip install --user -r ext/gudhi-deploy/build-requirements.txt
python -m pip install --user twine delocate
+ ./scripts/build_osx_universal_gmpfr.sh
+ # Now the universal libraries are in $PWD/deps-uni/lib
- name: Build python wheel
run: |
+ export GMP_LIB_DIR=$PWD/deps-uni/lib
+ export GMPXX_LIB_DIR=$PWD/deps-uni/lib
+ export MPFR_LIB_DIR=$PWD/deps-uni/lib
python --version
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=Release -DPython_ADDITIONAL_VERSIONS=3 ..
cd src/python
python setup.py bdist_wheel
+ export PATH="$PATH:`python -m site --user-base`/bin"
+ delocate-wheel --require-archs universal2 -v dist/*.whl
- name: Install and test python wheel
run: |
python -m pip install --user pytest build/src/python/dist/*.whl
diff --git a/.github/workflows/pip-build-windows.yml b/.github/workflows/pip-build-windows.yml
index b3d75706..50bdfe2c 100644
--- a/.github/workflows/pip-build-windows.yml
+++ b/.github/workflows/pip-build-windows.yml
@@ -8,7 +8,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ['3.10']
+ python-version: ['3.11']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v3
diff --git a/.github/workflows/pip-packaging-linux.yml b/.github/workflows/pip-packaging-linux.yml
index 285cfa00..14b1cf7a 100644
--- a/.github/workflows/pip-packaging-linux.yml
+++ b/.github/workflows/pip-packaging-linux.yml
@@ -79,6 +79,19 @@ jobs:
$PYTHON310/bin/python -m pip install --user pytest build_310/src/python/dist/*.whl
$PYTHON310/bin/python -c "import gudhi; print(gudhi.__version__)"
$PYTHON310/bin/python -m pytest src/python/test/test_alpha_complex.py
+ - name: Build wheel for Python 3.11
+ run: |
+ mkdir build_311
+ cd build_311
+ cmake -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=$PYTHON311/bin/python ..
+ cd src/python
+ $PYTHON311/bin/python setup.py bdist_wheel
+ auditwheel repair dist/*.whl
+ - name: Install and test wheel for Python 3.11
+ run: |
+ $PYTHON311/bin/python -m pip install --user pytest build_311/src/python/dist/*.whl
+ $PYTHON311/bin/python -c "import gudhi; print(gudhi.__version__)"
+ $PYTHON311/bin/python -m pytest src/python/test/test_alpha_complex.py
- name: Publish on PyPi
env:
TWINE_USERNAME: __token__
@@ -89,3 +102,4 @@ jobs:
$PYTHON36/bin/python -m twine upload build_38/src/python/wheelhouse/*
$PYTHON36/bin/python -m twine upload build_39/src/python/wheelhouse/*
$PYTHON36/bin/python -m twine upload build_310/src/python/wheelhouse/*
+ $PYTHON36/bin/python -m twine upload build_311/src/python/wheelhouse/*
diff --git a/.github/workflows/pip-packaging-osx.yml b/.github/workflows/pip-packaging-osx.yml
index 3ae840c6..9ddbcfce 100644
--- a/.github/workflows/pip-packaging-osx.yml
+++ b/.github/workflows/pip-packaging-osx.yml
@@ -4,13 +4,18 @@ on:
release:
types: [published]
+env:
+ MACOSX_DEPLOYMENT_TARGET: 10.15
+ _PYTHON_HOST_PLATFORM: macosx-10.15-universal2
+ ARCHFLAGS: "-arch arm64 -arch x86_64"
+
jobs:
build:
runs-on: macos-latest
strategy:
max-parallel: 4
matrix:
- python-version: ['3.7', '3.8', '3.9', '3.10']
+ python-version: ['3.7', '3.8', '3.9', '3.10', '3.11']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v3
@@ -26,8 +31,13 @@ jobs:
brew install boost eigen gmp mpfr cgal || true
python -m pip install --user -r ext/gudhi-deploy/build-requirements.txt
python -m pip install --user twine delocate
+ ./scripts/build_osx_universal_gmpfr.sh
+ # Now the universal libs are in $PWD/deps-uni/lib
- name: Build python wheel
run: |
+ export GMP_LIB_DIR=$PWD/deps-uni/lib
+ export GMPXX_LIB_DIR=$PWD/deps-uni/lib
+ export MPFR_LIB_DIR=$PWD/deps-uni/lib
python --version
mkdir build
cd build
@@ -45,6 +55,7 @@ jobs:
TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
run: |
mkdir wheelhouse
- /Users/runner/.local/bin/delocate-listdeps build/src/python/dist/*
- /Users/runner/.local/bin/delocate-wheel --require-archs x86_64 -w wheelhouse build/src/python/dist/*
- python -m twine upload wheelhouse/* \ No newline at end of file
+ export PATH="$PATH:`python -m site --user-base`/bin"
+ delocate-listdeps build/src/python/dist/*
+ delocate-wheel --require-archs universal2 -w wheelhouse build/src/python/dist/*
+ python -m twine upload wheelhouse/*
diff --git a/.github/workflows/pip-packaging-windows.yml b/.github/workflows/pip-packaging-windows.yml
index 6f544499..df0db9a5 100644
--- a/.github/workflows/pip-packaging-windows.yml
+++ b/.github/workflows/pip-packaging-windows.yml
@@ -10,7 +10,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ['3.7', '3.8', '3.9', '3.10']
+ python-version: ['3.7', '3.8', '3.9', '3.10', '3.11']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v3
diff --git a/CMakeGUDHIVersion.txt b/CMakeGUDHIVersion.txt
index 1dab47ab..69c8e9fc 100644
--- a/CMakeGUDHIVersion.txt
+++ b/CMakeGUDHIVersion.txt
@@ -1,6 +1,6 @@
# Must be conform to pep440 - https://www.python.org/dev/peps/pep-0440/#pre-releases
set (GUDHI_MAJOR_VERSION 3)
-set (GUDHI_MINOR_VERSION 7)
+set (GUDHI_MINOR_VERSION 8)
# GUDHI_PATCH_VERSION can be 'ZaN' for Alpha release, 'ZbN' for Beta release, 'ZrcN' for release candidate or 'Z' for a final release.
set (GUDHI_PATCH_VERSION 0a0)
set(GUDHI_VERSION ${GUDHI_MAJOR_VERSION}.${GUDHI_MINOR_VERSION}.${GUDHI_PATCH_VERSION})
diff --git a/biblio/bibliography.bib b/biblio/bibliography.bib
index 0a3ef43d..d8472ad0 100644
--- a/biblio/bibliography.bib
+++ b/biblio/bibliography.bib
@@ -1090,7 +1090,7 @@ language={English}
@ARTICLE{Reininghaus_Huber_ALL_PSSK,
author = {J. Reininghaus and S. Huber and U. Bauer and R. Kwitt},
title = {A Stable Multi-Scale Kernel for Topological Machine Learning.},
- journal = {Proc. 2015 IEEE Conf. Comp. Vision & Pat. Rec. (CVPR '15)},
+ journal = {Proc. 2015 IEEE Conf. Comp. Vision \& Pat. Rec. (CVPR '15)},
year = {2015}
}
diff --git a/biblio/how_to_cite_gudhi.bib.in b/biblio/how_to_cite_gudhi.bib.in
index 579dbf41..02c09dea 100644
--- a/biblio/how_to_cite_gudhi.bib.in
+++ b/biblio/how_to_cite_gudhi.bib.in
@@ -1,168 +1,262 @@
@book{gudhi:urm
-, title = "{GUDHI} User and Reference Manual"
-, author = "{The GUDHI Project}"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, year = @GUDHI_VERSION_YEAR@
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/"
+, title = {GUDHI User and Reference Manual}
+, author = {The GUDHI Project}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, year = {@GUDHI_VERSION_YEAR@}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/}
}
-@incollection{gudhi:FilteredComplexes
-, author = "Cl\'ement Maria"
-, title = "Filtered Complexes"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__simplex__tree.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:CubicalComplex
+, author = {Pawel Dlotko}
+, title = {Cubical complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__cubical__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:PersistentCohomology
-, author = "Cl\'ement Maria"
-, title = "Persistent Cohomology"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__persistent__cohomology.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:FilteredComplexes
+, author = {Cl{\'{e}}ment Maria}
+, title = {Filtered Complexes}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__simplex__tree.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:Contraction
-, author = "David Salinas"
-, title = "Contraction"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__contr.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:ToplexMap
+, author = {Fran{\c{c}}ois Godi}
+, title = {Toplex map}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__toplex__map.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
@incollection{gudhi:SkeletonBlocker
-, author = "David Salinas"
-, title = "Skeleton-Blocker"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__skbl.html"
-, year = @GUDHI_VERSION_YEAR@
+, author = {David Salinas}
+, title = {Skeleton-Blocker}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__skbl.html}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:Contraction
+, author = {David Salinas}
+, title = {Contraction}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__contr.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
@incollection{gudhi:AlphaComplex
-, author = "Vincent Rouvreau"
-, title = "Alpha complex"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__alpha__complex.html"
-, year = @GUDHI_VERSION_YEAR@
+, author = {Vincent Rouvreau}
+, title = {Alpha complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__alpha__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:CubicalComplex
-, author = "Pawel Dlotko"
-, title = "Cubical complex"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__cubical__complex.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:CechComplex
+, author = {Vincent Rouvreau and Hind Montassif}
+, title = {{\v{C}}ech complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__cech__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:RipsComplex
+, author = {Cl{\'{e}}ment Maria and Pawel Dlotko and Vincent Rouvreau and Marc Glisse}
+, title = {Rips complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__rips__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:Collapse
+, author = {Siddharth Pritam and Marc Glisse}
+, title = {Edge collapse}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__edge__collapse.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
@incollection{gudhi:WitnessComplex
-, author = "Siargey Kachanovich"
-, title = "Witness complex"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__witness__complex.html"
-, year = @GUDHI_VERSION_YEAR@
+, author = {Siargey Kachanovich}
+, title = {Witness complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__witness__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:SubSampling
-, author = "Cl\'ement Jamin and Siargey Kachanovich"
-, title = "Subsampling"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__subsampling.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:CoverComplex
+, author = {Mathieu Carri{\`{e}}re}
+, title = {Cover complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__cover__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:SpatialSearching
-, author = "Cl\'ement Jamin"
-, title = "Spatial searching"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__spatial__searching.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:CoxeterTriangulation
+, author = {Siargey Kachanovich}
+, title = {Coxeter triangulation}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__cover__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
@incollection{gudhi:TangentialComplex
-, author = "Cl\'ement Jamin"
-, title = "Tangential complex"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__tangential__complex.html"
-, year = @GUDHI_VERSION_YEAR@
+, author = {Cl{\'{e}}ment Jamin}
+, title = {Tangential complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__tangential__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:RipsComplex
-, author = "Cl\'ement Maria and Pawel Dlotko and Vincent Rouvreau and Marc Glisse"
-, title = "Rips complex"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__rips__complex.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:PersistentCohomology
+, author = {Cl{\'{e}}ment Maria}
+, title = {Persistent Cohomology}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__persistent__cohomology.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
@incollection{gudhi:BottleneckDistance
-, author = "Fran{{\c{c}}ois Godi"
-, title = "Bottleneck distance"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__bottleneck__distance.html"
-, year = @GUDHI_VERSION_YEAR@
+, author = {Fran{\c{c}}ois Godi}
+, title = {Bottleneck distance}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__bottleneck__distance.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:cython
-, author = "Vincent Rouvreau"
-, title = "Cython interface"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/python/@GUDHI_VERSION@/"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:PersistenceRepresentations
+, author = {Pawel Dlotko}
+, title = {Persistence representations}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group___persistence__representations.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:CoverComplex
-, author = "Mathieu Carri\`ere"
-, title = "Cover complex"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__cover__complex.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:SubSampling
+, author = {Cl{\'{e}}ment Jamin and Siargey Kachanovich}
+, title = {Subsampling}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__subsampling.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:PersistenceRepresentations
-, author = "Pawel Dlotko"
-, title = "Persistence representations"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group___persistence__representations.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:SpatialSearching
+, author = {Cl{\'{e}}ment Jamin}
+, title = {Spatial searching}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__spatial__searching.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:Collapse
-, author = "Siddharth Pritam and Marc Glisse"
-, title = "Edge collapse"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__edge__collapse.html"
-, year = @GUDHI_VERSION_YEAR@
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Python specific gudhi modules
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+@incollection{gudhi:WeightedRipsComplex
+, author = {Rapha{\"{e}}l Tinarrage and Yuichi Ike and Masatoshi Takenouchi}
+, title = {Weighted Rips Complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/python/@GUDHI_VERSION@/rips_complex_user.html#weighted-rips-complex}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:DTMRipsComplex
+, author = {Yuichi Ike}
+, title = {DTM Rips Complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/python/@GUDHI_VERSION@/rips_complex_user.html#dtm-rips-complex}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:WassersteinDistance
+, author = {Th{\'{e}}o Lacombe and Marc Glisse}
+, title = {Wasserstein distance}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/python/@GUDHI_VERSION@/wasserstein_distance_user.html}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:PersistenceRepresentationsScikitlearnInterface
+, author = {Mathieu Carri{\`{e}}re and Gard Spreemann and Wojciech Reise}
+, title = {Persistence representations scikit-learn like interface}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/python/@GUDHI_VERSION@/representations.html}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:Atol
+, author = {Martin Royer}
+, title = {Measure Vectorization for Automatic Topologically-Oriented Learning}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/python/@GUDHI_VERSION@/representations.html#gudhi.representations.vector_methods.Atol}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:DistanceToMeasure
+, author = {Marc Glisse}
+, title = {Distance to measure}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/python/@GUDHI_VERSION@/point_cloud.html#module-gudhi.point_cloud.knn}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:PersistenceBasedClustering
+, author = {Marc Glisse}
+, title = {persistence-based clustering}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/python/@GUDHI_VERSION@/clustering.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
diff --git a/biblio/test/test_biblio.tex b/biblio/test/test_biblio.tex
new file mode 100644
index 00000000..97dee9ed
--- /dev/null
+++ b/biblio/test/test_biblio.tex
@@ -0,0 +1,7 @@
+\documentclass{article}
+\usepackage{hyperref}
+\bibliographystyle{plainurl}
+\begin{document}
+\nocite{*}
+\bibliography{../bibliography}
+\end{document} \ No newline at end of file
diff --git a/biblio/test/test_gudhi_citation.tex b/biblio/test/test_gudhi_citation.tex
new file mode 100644
index 00000000..5fb2d33d
--- /dev/null
+++ b/biblio/test/test_gudhi_citation.tex
@@ -0,0 +1,7 @@
+\documentclass{article}
+\usepackage{hyperref}
+\bibliographystyle{plainurl}
+\begin{document}
+\nocite{*}
+\bibliography{../how_to_cite_gudhi}
+\end{document} \ No newline at end of file
diff --git a/scripts/build_osx_universal_gmpfr.sh b/scripts/build_osx_universal_gmpfr.sh
new file mode 100755
index 00000000..3dafa3ce
--- /dev/null
+++ b/scripts/build_osx_universal_gmpfr.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+set -e
+
+# In the working directory, creates deps-uni/lib/*
+# Assumes that the user has enough rights to run brew fetch
+
+# Downloading
+mkdir deps-amd64
+cd deps-amd64
+tar xf "`brew fetch --bottle-tag=big_sur gmp | sed -ne 's/^Downloaded to: //p'`"
+tar xf "`brew fetch --bottle-tag=big_sur mpfr | sed -ne 's/^Downloaded to: //p'`"
+cd ..
+mkdir deps-arm64
+cd deps-arm64
+tar xf "`brew fetch --bottle-tag=arm64_big_sur gmp | sed -ne 's/^Downloaded to: //p'`"
+tar xf "`brew fetch --bottle-tag=arm64_big_sur mpfr | sed -ne 's/^Downloaded to: //p'`"
+cd ..
+
+# Merging
+mkdir -p deps-uni/lib
+GMP1=deps-amd64/gmp/*/lib/libgmp.*.dylib
+GMP=`basename $GMP1`
+GMPXX1=deps-amd64/gmp/*/lib/libgmpxx.*.dylib
+GMPXX=`basename $GMPXX1`
+MPFR1=deps-amd64/mpfr/*/lib/libmpfr.*.dylib
+MPFR=`basename $MPFR1`
+lipo -create $GMP1 deps-arm64/gmp/*/lib/$GMP -output deps-uni/lib/$GMP
+lipo -create $GMPXX1 deps-arm64/gmp/*/lib/$GMPXX -output deps-uni/lib/$GMPXX
+lipo -create $MPFR1 deps-arm64/mpfr/*/lib/$MPFR -output deps-uni/lib/$MPFR
+
+# Necessary even for libs created by lipo
+install_name_tool -id $PWD/deps-uni/lib/$GMP deps-uni/lib/$GMP
+install_name_tool -id $PWD/deps-uni/lib/$GMPXX deps-uni/lib/$GMPXX
+install_name_tool -id $PWD/deps-uni/lib/$MPFR deps-uni/lib/$MPFR
+# Also fix dependencies
+BADGMP=`otool -L deps-uni/lib/$MPFR|sed -ne 's/[[:space:]]*\(.*libgmp\..*dylib\).*/\1/p'`
+install_name_tool -change $BADGMP $PWD/deps-uni/lib/$GMP deps-uni/lib/$MPFR
+BADGMP=`otool -L deps-uni/lib/$GMPXX|sed -ne 's/[[:space:]]*\(.*libgmp\..*dylib\).*/\1/p'`
+install_name_tool -change $BADGMP $PWD/deps-uni/lib/$GMP deps-uni/lib/$GMPXX
+
+ln -s $GMP deps-uni/lib/libgmp.dylib
+ln -s $GMPXX deps-uni/lib/libgmpxx.dylib
+ln -s $MPFR deps-uni/lib/libmpfr.dylib
+
+# Debug
+ls -l deps-uni/lib
+otool -L deps-uni/lib/*.*.dylib
diff --git a/src/Cech_complex/doc/Intro_cech_complex.h b/src/Cech_complex/doc/Intro_cech_complex.h
index 595fb64b..73093c07 100644
--- a/src/Cech_complex/doc/Intro_cech_complex.h
+++ b/src/Cech_complex/doc/Intro_cech_complex.h
@@ -17,7 +17,7 @@ namespace cech_complex {
/** \defgroup cech_complex ÄŒech complex
*
- * \author Vincent Rouvreau
+ * \author Vincent Rouvreau, Hind montassif
*
* @{
*
diff --git a/src/Cech_complex/include/gudhi/Cech_complex.h b/src/Cech_complex/include/gudhi/Cech_complex.h
index 625f7c9c..dbdf5e93 100644
--- a/src/Cech_complex/include/gudhi/Cech_complex.h
+++ b/src/Cech_complex/include/gudhi/Cech_complex.h
@@ -1,11 +1,12 @@
/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
* See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- * Author(s): Vincent Rouvreau
+ * Author(s): Vincent Rouvreau, Hind Montassif
*
* Copyright (C) 2018 Inria
*
* Modification(s):
* - YYYY/MM Author: Description of the modification
+ * - 2022/02 Hind Montassif : Replace MiniBall with Sphere_circumradius
*/
#ifndef CECH_COMPLEX_H_
diff --git a/src/Cech_complex/utilities/cechcomplex.md b/src/Cech_complex/utilities/cechcomplex.md
index 0e82674d..54c4e88d 100644
--- a/src/Cech_complex/utilities/cechcomplex.md
+++ b/src/Cech_complex/utilities/cechcomplex.md
@@ -36,14 +36,14 @@ where
* `-h [ --help ]` Produce help message
* `-o [ --output-file ]` Name of file in which the persistence diagram is written. Default print in standard output.
-* `-r [ --max-edge-length ]` (default = inf) Maximal length of an edge for the ÄŒech complex construction.
+* `-r [ --max-radius ]` (default = inf) Maximal radius for the ÄŒech complex construction.
* `-d [ --cpx-dimension ]` (default = 1) Maximal dimension of the ÄŒech complex we want to compute.
* `-p [ --field-charac ]` (default = 11) Characteristic p of the coefficient field Z/pZ for computing homology.
* `-m [ --min-persistence ]` (default = 0) Minimal lifetime of homology feature to be recorded. Enter a negative value to see zero length intervals.
* `-e [ --exact ]` for the exact computation version.
* `-f [ --fast ]` for the fast computation version.
-Beware: this program may use a lot of RAM and take a lot of time if `max-edge-length` is set to a large value.
+Beware: this program may use a lot of RAM and take a lot of time if `max-radius` is set to a large value.
**Example 1 with Z/2Z coefficients**
diff --git a/src/Doxyfile.in b/src/Doxyfile.in
index 1ec190d9..d5664a49 100644
--- a/src/Doxyfile.in
+++ b/src/Doxyfile.in
@@ -700,7 +700,6 @@ LAYOUT_FILE =
# search path. See also \cite for info how to create references.
CITE_BIB_FILES = @CMAKE_SOURCE_DIR@/biblio/bibliography.bib \
- @CMAKE_SOURCE_DIR@/biblio/how_to_cite_cgal.bib \
@CMAKE_SOURCE_DIR@/biblio/how_to_cite_gudhi.bib
#---------------------------------------------------------------------------
diff --git a/src/GudhUI/view/Viewer.cpp b/src/GudhUI/view/Viewer.cpp
index 6b17c833..2c00f86f 100644
--- a/src/GudhUI/view/Viewer.cpp
+++ b/src/GudhUI/view/Viewer.cpp
@@ -31,7 +31,11 @@ void Viewer::set_bounding_box(const Point_3 & lower_left, const Point_3 & upper_
}
void Viewer::update_GL() {
+#if QGLVIEWER_VERSION >= 0x020700
+ this->update();
+#else
this->updateGL();
+#endif
}
void Viewer::init_scene() {
diff --git a/src/Nerve_GIC/example/CMakeLists.txt b/src/Nerve_GIC/example/CMakeLists.txt
index 4b0f4677..9faf1f3b 100644
--- a/src/Nerve_GIC/example/CMakeLists.txt
+++ b/src/Nerve_GIC/example/CMakeLists.txt
@@ -1,25 +1,21 @@
project(Nerve_GIC_examples)
-if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+add_executable ( CoordGIC CoordGIC.cpp )
+add_executable ( FuncGIC FuncGIC.cpp )
- add_executable ( CoordGIC CoordGIC.cpp )
- add_executable ( FuncGIC FuncGIC.cpp )
+if (TBB_FOUND)
+ target_link_libraries(CoordGIC ${TBB_LIBRARIES})
+ target_link_libraries(FuncGIC ${TBB_LIBRARIES})
+endif()
- if (TBB_FOUND)
- target_link_libraries(CoordGIC ${TBB_LIBRARIES})
- target_link_libraries(FuncGIC ${TBB_LIBRARIES})
- endif()
+# Copy files for not to pollute sources when testing
+file(COPY "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+file(COPY "${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+file(COPY "${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat_PCA1" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- # Copy files for not to pollute sources when testing
- file(COPY "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- file(COPY "${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- file(COPY "${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat_PCA1" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+add_test(NAME Nerve_GIC_example_CoordGIC COMMAND $<TARGET_FILE:CoordGIC>
+ "${CMAKE_CURRENT_BINARY_DIR}/tore3D_1307.off" "0")
- add_test(NAME Nerve_GIC_example_CoordGIC COMMAND $<TARGET_FILE:CoordGIC>
- "${CMAKE_CURRENT_BINARY_DIR}/tore3D_1307.off" "0")
-
- add_test(NAME Nerve_GIC_example_FuncGIC COMMAND $<TARGET_FILE:FuncGIC>
- "${CMAKE_CURRENT_BINARY_DIR}/lucky_cat.off"
- "${CMAKE_CURRENT_BINARY_DIR}/lucky_cat_PCA1")
-
-endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+add_test(NAME Nerve_GIC_example_FuncGIC COMMAND $<TARGET_FILE:FuncGIC>
+ "${CMAKE_CURRENT_BINARY_DIR}/lucky_cat.off"
+ "${CMAKE_CURRENT_BINARY_DIR}/lucky_cat_PCA1")
diff --git a/src/Nerve_GIC/include/gudhi/GIC.h b/src/Nerve_GIC/include/gudhi/GIC.h
index 1b1f9323..047fba61 100644
--- a/src/Nerve_GIC/include/gudhi/GIC.h
+++ b/src/Nerve_GIC/include/gudhi/GIC.h
@@ -17,6 +17,14 @@
#include <mutex>
#endif
+#if __has_include(<CGAL/version.h>)
+# define GUDHI_GIC_USE_CGAL 1
+# include <gudhi/Bottleneck.h>
+#elif __has_include(<hera/bottleneck.h>)
+# define GUDHI_GIC_USE_HERA 1
+# include <hera/bottleneck.h>
+#endif
+
#include <gudhi/Debug_utils.h>
#include <gudhi/graph_simplicial_complex.h>
#include <gudhi/reader_utils.h>
@@ -25,7 +33,6 @@
#include <gudhi/Points_off_io.h>
#include <gudhi/distance_functions.h>
#include <gudhi/Persistent_cohomology.h>
-#include <gudhi/Bottleneck.h>
#include <boost/config.hpp>
#include <boost/graph/graph_traits.hpp>
@@ -35,8 +42,6 @@
#include <boost/graph/subgraph.hpp>
#include <boost/graph/graph_utility.hpp>
-#include <CGAL/version.h> // for CGAL_VERSION_NR
-
#include <iostream>
#include <vector>
#include <map>
@@ -1228,7 +1233,14 @@ class Cover_complex {
Cboot.set_cover_from_function();
Cboot.find_simplices();
Cboot.compute_PD();
+#ifdef GUDHI_GIC_USE_CGAL
double db = Gudhi::persistence_diagram::bottleneck_distance(this->PD, Cboot.PD);
+#elif defined GUDHI_GIC_USE_HERA
+ double db = hera::bottleneckDistExact(this->PD, Cboot.PD);
+#else
+ double db;
+ throw std::logic_error("This function requires CGAL or Hera for the bottleneck distance.");
+#endif
if (verbose) std::clog << db << std::endl;
distribution.push_back(db);
}
diff --git a/src/Nerve_GIC/test/CMakeLists.txt b/src/Nerve_GIC/test/CMakeLists.txt
index 567bf43f..e012a178 100644
--- a/src/Nerve_GIC/test/CMakeLists.txt
+++ b/src/Nerve_GIC/test/CMakeLists.txt
@@ -1,15 +1,12 @@
project(Graph_induced_complex_tests)
-if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
- include(GUDHI_boost_test)
+include(GUDHI_boost_test)
- add_executable ( Nerve_GIC_test_unit test_GIC.cpp )
- if (TBB_FOUND)
- target_link_libraries(Nerve_GIC_test_unit ${TBB_LIBRARIES})
- endif()
+add_executable ( Nerve_GIC_test_unit test_GIC.cpp )
+if (TBB_FOUND)
+ target_link_libraries(Nerve_GIC_test_unit ${TBB_LIBRARIES})
+endif()
- file(COPY data DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+file(COPY data DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- gudhi_add_boost_test(Nerve_GIC_test_unit)
-
-endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+gudhi_add_boost_test(Nerve_GIC_test_unit)
diff --git a/src/Nerve_GIC/utilities/CMakeLists.txt b/src/Nerve_GIC/utilities/CMakeLists.txt
index 65a08d9a..4521a992 100644
--- a/src/Nerve_GIC/utilities/CMakeLists.txt
+++ b/src/Nerve_GIC/utilities/CMakeLists.txt
@@ -1,27 +1,23 @@
project(Nerve_GIC_examples)
-if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+add_executable ( Nerve Nerve.cpp )
+add_executable ( VoronoiGIC VoronoiGIC.cpp )
- add_executable ( Nerve Nerve.cpp )
- add_executable ( VoronoiGIC VoronoiGIC.cpp )
+if (TBB_FOUND)
+ target_link_libraries(Nerve ${TBB_LIBRARIES})
+ target_link_libraries(VoronoiGIC ${TBB_LIBRARIES})
+endif()
- if (TBB_FOUND)
- target_link_libraries(Nerve ${TBB_LIBRARIES})
- target_link_libraries(VoronoiGIC ${TBB_LIBRARIES})
- endif()
+file(COPY KeplerMapperVisuFromTxtFile.py km.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+# Copy files for not to pollute sources when testing
+file(COPY "${CMAKE_SOURCE_DIR}/data/points/human.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- file(COPY KeplerMapperVisuFromTxtFile.py km.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- # Copy files for not to pollute sources when testing
- file(COPY "${CMAKE_SOURCE_DIR}/data/points/human.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+add_test(NAME Nerve_GIC_utilities_nerve COMMAND $<TARGET_FILE:Nerve>
+ "human.off" "2" "10" "0.3")
- add_test(NAME Nerve_GIC_utilities_nerve COMMAND $<TARGET_FILE:Nerve>
- "human.off" "2" "10" "0.3")
+add_test(NAME Nerve_GIC_utilities_VoronoiGIC COMMAND $<TARGET_FILE:VoronoiGIC>
+ "human.off" "100")
- add_test(NAME Nerve_GIC_utilities_VoronoiGIC COMMAND $<TARGET_FILE:VoronoiGIC>
- "human.off" "100")
-
- install(TARGETS Nerve DESTINATION bin)
- install(TARGETS VoronoiGIC DESTINATION bin)
- install(FILES KeplerMapperVisuFromTxtFile.py km.py km.py.COPYRIGHT DESTINATION bin)
-
-endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+install(TARGETS Nerve DESTINATION bin)
+install(TARGETS VoronoiGIC DESTINATION bin)
+install(FILES KeplerMapperVisuFromTxtFile.py km.py km.py.COPYRIGHT DESTINATION bin)
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h
index 9059219c..4177a0b8 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h
@@ -24,6 +24,8 @@
#include <boost/iterator/transform_iterator.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/range/adaptor/reversed.hpp>
+#include <boost/range/adaptor/transformed.hpp>
+#include <boost/range/size.hpp>
#include <boost/container/static_vector.hpp>
#ifdef GUDHI_USE_TBB
@@ -702,10 +704,10 @@ class Simplex_tree {
return true;
}
- private:
- /** \brief Inserts a simplex represented by a vector of vertex.
- * @param[in] simplex vector of Vertex_handles, representing the vertices of the new simplex. The vector must be
- * sorted by increasing vertex handle order.
+ protected:
+ /** \brief Inserts a simplex represented by a range of vertex.
+ * @param[in] simplex range of Vertex_handles, representing the vertices of the new simplex. The range must be
+ * sorted by increasing vertex handle order, and not empty.
* @param[in] filtration the filtration value assigned to the new simplex.
* @return If the new simplex is inserted successfully (i.e. it was not in the
* simplicial complex yet) the bool is set to true and the Simplex_handle is the handle assigned
@@ -717,12 +719,13 @@ class Simplex_tree {
* null_simplex.
*
*/
- std::pair<Simplex_handle, bool> insert_vertex_vector(const std::vector<Vertex_handle>& simplex,
+ template <class RandomVertexHandleRange = std::initializer_list<Vertex_handle>>
+ std::pair<Simplex_handle, bool> insert_simplex_raw(const RandomVertexHandleRange& simplex,
Filtration_value filtration) {
Siblings * curr_sib = &root_;
std::pair<Simplex_handle, bool> res_insert;
auto vi = simplex.begin();
- for (; vi != simplex.end() - 1; ++vi) {
+ for (; vi != std::prev(simplex.end()); ++vi) {
GUDHI_CHECK(*vi != null_vertex(), "cannot use the dummy null_vertex() as a real vertex");
res_insert = curr_sib->members_.emplace(*vi, Node(curr_sib, filtration));
if (!(has_children(res_insert.first))) {
@@ -743,9 +746,10 @@ class Simplex_tree {
return std::pair<Simplex_handle, bool>(null_simplex(), false);
}
// otherwise the insertion has succeeded - size is a size_type
- if (static_cast<int>(simplex.size()) - 1 > dimension_) {
+ int dim = static_cast<int>(boost::size(simplex)) - 1;
+ if (dim > dimension_) {
// Update dimension if needed
- dimension_ = static_cast<int>(simplex.size()) - 1;
+ dimension_ = dim;
}
return res_insert;
}
@@ -786,7 +790,7 @@ class Simplex_tree {
// Copy before sorting
std::vector<Vertex_handle> copy(first, last);
std::sort(std::begin(copy), std::end(copy));
- return insert_vertex_vector(copy, filtration);
+ return insert_simplex_raw(copy, filtration);
}
/** \brief Insert a N-simplex and all his subfaces, from a N-simplex represented by a range of
@@ -1119,16 +1123,12 @@ class Simplex_tree {
dimension_ = 1;
}
- root_.members_.reserve(num_vertices(skel_graph));
+ root_.members_.reserve(num_vertices(skel_graph)); // probably useless in most cases
+ auto verts = vertices(skel_graph) | boost::adaptors::transformed([&](auto v){
+ return Dit_value_t(v, Node(&root_, get(vertex_filtration_t(), skel_graph, v))); });
+ root_.members_.insert(boost::begin(verts), boost::end(verts));
+ // This automatically sorts the vertices, the graph concept doesn't guarantee the order in which we iterate.
- typename boost::graph_traits<OneSkeletonGraph>::vertex_iterator v_it,
- v_it_end;
- for (std::tie(v_it, v_it_end) = vertices(skel_graph); v_it != v_it_end;
- ++v_it) {
- root_.members_.emplace_hint(
- root_.members_.end(), *v_it,
- Node(&root_, get(vertex_filtration_t(), skel_graph, *v_it)));
- }
std::pair<typename boost::graph_traits<OneSkeletonGraph>::edge_iterator,
typename boost::graph_traits<OneSkeletonGraph>::edge_iterator> boost_edges = edges(skel_graph);
// boost_edges.first is the equivalent to boost_edges.begin()
@@ -1137,7 +1137,7 @@ class Simplex_tree {
auto edge = *(boost_edges.first);
auto u = source(edge, skel_graph);
auto v = target(edge, skel_graph);
- if (u == v) throw "Self-loops are not simplicial";
+ if (u == v) throw std::invalid_argument("Self-loops are not simplicial");
// We cannot skip edges with the wrong orientation and expect them to
// come a second time with the right orientation, that does not always
// happen in practice. emplace() should be a NOP when an element with the
@@ -1156,6 +1156,21 @@ class Simplex_tree {
}
}
+ /** \brief Inserts several vertices.
+ * @param[in] vertices A range of Vertex_handle
+ * @param[in] filt filtration value of the new vertices (the same for all)
+ *
+ * This may be faster than inserting the vertices one by one, especially in a random order.
+ * The complex does not need to be empty before calling this function. However, if a vertex is
+ * already present, its filtration value is not modified, unlike with other insertion functions. */
+ template <class VertexRange>
+ void insert_batch_vertices(VertexRange const& vertices, Filtration_value filt = 0) {
+ auto verts = vertices | boost::adaptors::transformed([&](auto v){
+ return Dit_value_t(v, Node(&root_, filt)); });
+ root_.members_.insert(boost::begin(verts), boost::end(verts));
+ if (dimension_ < 0 && !root_.members_.empty()) dimension_ = 0;
+ }
+
/** \brief Expands the Simplex_tree containing only its one skeleton
* until dimension max_dim.
*
@@ -1598,7 +1613,7 @@ class Simplex_tree {
Simplex_tree st_copy = *this;
// Add point for coning the simplicial complex
- this->insert_simplex({maxvert}, -3);
+ this->insert_simplex_raw({maxvert}, -3);
// For each simplex
std::vector<Vertex_handle> vr;
diff --git a/src/Simplex_tree/test/simplex_tree_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_unit_test.cpp
index 79bb5a93..ebcc406c 100644
--- a/src/Simplex_tree/test/simplex_tree_unit_test.cpp
+++ b/src/Simplex_tree/test/simplex_tree_unit_test.cpp
@@ -1038,3 +1038,17 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_boundaries_and_opposite_vertex_iterat
BOOST_CHECK(opposite_vertices.size() == 0);
}
}
+
+BOOST_AUTO_TEST_CASE(batch_vertices) {
+ typedef Simplex_tree<> typeST;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST BATCH VERTEX INSERTION" << std::endl;
+ typeST st;
+ st.insert_simplex_and_subfaces({3}, 1.5);
+ std::vector verts { 2, 3, 5, 6 };
+ st.insert_batch_vertices(verts);
+ BOOST_CHECK(st.num_vertices() == 4);
+ BOOST_CHECK(st.num_simplices() == 4);
+ BOOST_CHECK(st.filtration(st.find({2})) == 0.);
+ BOOST_CHECK(st.filtration(st.find({3})) == 1.5);
+}
diff --git a/src/Tangential_complex/include/gudhi/Tangential_complex.h b/src/Tangential_complex/include/gudhi/Tangential_complex.h
index 56a24af0..ab203ca5 100644
--- a/src/Tangential_complex/include/gudhi/Tangential_complex.h
+++ b/src/Tangential_complex/include/gudhi/Tangential_complex.h
@@ -56,6 +56,7 @@
#include <string>
#include <cstddef> // for std::size_t
#include <optional>
+#include <numeric> // for std::iota
#ifdef GUDHI_USE_TBB
#include <tbb/parallel_for.h>
@@ -345,10 +346,11 @@ class Tangential_complex {
m_stars.resize(m_points.size());
m_squared_star_spheres_radii_incl_margin.resize(m_points.size(), FT(-1));
#ifdef GUDHI_TC_PERTURB_POSITION
- if (m_points.empty())
+ if (m_points.empty()) {
m_translations.clear();
- else
+ } else {
m_translations.resize(m_points.size(), m_k.construct_vector_d_object()(m_ambient_dim));
+ }
#if defined(GUDHI_USE_TBB)
delete[] m_p_perturb_mutexes;
m_p_perturb_mutexes = new Mutex_for_perturb[m_points.size()];
@@ -623,6 +625,11 @@ class Tangential_complex {
int max_dim = -1;
+ // Ordered vertices to be inserted first by the create_complex method to avoid quadratic complexity.
+ std::vector<typename Simplex_tree_::Vertex_handle> vertices(m_points.size());
+ std::iota(vertices.begin(), vertices.end(), 0);
+ tree.insert_batch_vertices(vertices);
+
// For each triangulation
for (std::size_t idx = 0; idx < m_points.size(); ++idx) {
// For each cell of the star
diff --git a/src/cmake/modules/GUDHI_submodules.cmake b/src/cmake/modules/GUDHI_submodules.cmake
index c844386d..9ede852d 100644
--- a/src/cmake/modules/GUDHI_submodules.cmake
+++ b/src/cmake/modules/GUDHI_submodules.cmake
@@ -1,3 +1,5 @@
# For those who dislike bundled dependencies, this indicates where to find a preinstalled Hera.
set(HERA_INTERNAL_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/ext/hera/include)
set(HERA_INCLUDE_DIR ${HERA_INTERNAL_INCLUDE_DIR} CACHE PATH "Directory where one can find hera/{wasserstein.h,bottleneck.h}")
+# since everything is cleanly under include/hera/, there is no harm always including it
+include_directories(${HERA_INCLUDE_DIR})
diff --git a/src/cmake/modules/GUDHI_user_version_target.cmake b/src/cmake/modules/GUDHI_user_version_target.cmake
index 2144ff6f..b9bf1414 100644
--- a/src/cmake/modules/GUDHI_user_version_target.cmake
+++ b/src/cmake/modules/GUDHI_user_version_target.cmake
@@ -18,14 +18,17 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
string(TIMESTAMP GUDHI_VERSION_YEAR "%Y")
configure_file(${CMAKE_SOURCE_DIR}/biblio/how_to_cite_gudhi.bib.in "${CMAKE_CURRENT_BINARY_DIR}/biblio/how_to_cite_gudhi.bib" @ONLY)
file(COPY "${CMAKE_SOURCE_DIR}/biblio/bibliography.bib" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/biblio/")
+file(COPY "${CMAKE_SOURCE_DIR}/biblio/test" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/biblio")
# append cgal citation inside bibliography - sphinx cannot deal with more than one bib file
file(READ "${CMAKE_SOURCE_DIR}/biblio/how_to_cite_cgal.bib" CGAL_CITATION_CONTENT)
file(APPEND "${CMAKE_CURRENT_BINARY_DIR}/biblio/bibliography.bib" "${CGAL_CITATION_CONTENT}")
-# Copy biblio directory for user version
+# Copy biblio files for user version
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
- copy_directory ${CMAKE_CURRENT_BINARY_DIR}/biblio ${GUDHI_USER_VERSION_DIR}/biblio)
+ copy ${CMAKE_CURRENT_BINARY_DIR}/biblio/bibliography.bib ${GUDHI_USER_VERSION_DIR}/biblio/bibliography.bib)
+add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
+ copy ${CMAKE_CURRENT_BINARY_DIR}/biblio/how_to_cite_gudhi.bib ${GUDHI_USER_VERSION_DIR}/biblio/how_to_cite_gudhi.bib)
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy ${CMAKE_SOURCE_DIR}/README.md ${GUDHI_USER_VERSION_DIR}/README.md)
diff --git a/src/common/doc/main_page.md b/src/common/doc/main_page.md
index ce903405..9b7c2853 100644
--- a/src/common/doc/main_page.md
+++ b/src/common/doc/main_page.md
@@ -178,7 +178,7 @@
The set of all simplices is filtered by the radius of their minimal enclosing ball.
</td>
<td width="15%">
- <b>Author:</b> Vincent Rouvreau<br>
+ <b>Author:</b> Vincent Rouvreau, Hind Montassif<br>
<b>Introduced in:</b> GUDHI 2.2.0<br>
<b>Copyright:</b> MIT [(LGPL v3)](../../licensing/)<br>
<b>Requires:</b> \ref cgal
diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt
index 32ec13bd..198d0c65 100644
--- a/src/python/CMakeLists.txt
+++ b/src/python/CMakeLists.txt
@@ -44,7 +44,7 @@ function( add_gudhi_debug_info DEBUG_INFO )
endfunction( add_gudhi_debug_info )
if(PYTHONINTERP_FOUND)
- if(PYBIND11_FOUND AND CYTHON_FOUND)
+ if(NUMPY_FOUND AND PYBIND11_FOUND AND CYTHON_FOUND)
add_gudhi_debug_info("Pybind11 version ${PYBIND11_VERSION}")
# PyBind11 modules
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ")
@@ -163,10 +163,10 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'clustering/_tomato', ")
set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/wasserstein', ")
set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/bottleneck', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'nerve_gic', ")
if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'datasets/generators/_points', ")
set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'bottleneck', ")
- set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'nerve_gic', ")
endif ()
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'subsampling', ")
@@ -291,8 +291,8 @@ if(PYTHONINTERP_FOUND)
# Other .py files
file(COPY "gudhi/persistence_graphical_tools.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/")
- file(COPY "gudhi/wasserstein" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
- file(COPY "gudhi/tensorflow" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
+ file(COPY "gudhi/tensorflow" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/")
+ file(COPY "gudhi/wasserstein" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/point_cloud" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/clustering" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi" FILES_MATCHING PATTERN "*.py")
file(COPY "gudhi/weighted_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
@@ -432,38 +432,38 @@ if(PYTHONINTERP_FOUND)
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/bottleneck_basic_example.py")
add_gudhi_py_test(test_bottleneck_distance)
+ endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
- # Cover complex
- file(COPY ${CMAKE_SOURCE_DIR}/data/points/human.off DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- file(COPY ${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat.off DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- file(COPY ${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat_PCA1 DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- add_test(NAME cover_complex_nerve_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/nerve_of_a_covering.py"
- -f human.off -c 2 -r 10 -g 0.3)
+ # Cover complex
+ file(COPY ${CMAKE_SOURCE_DIR}/data/points/human.off DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+ file(COPY ${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat.off DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+ file(COPY ${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat_PCA1 DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+ add_test(NAME cover_complex_nerve_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/nerve_of_a_covering.py"
+ -f human.off -c 2 -r 10 -g 0.3)
- add_test(NAME cover_complex_coordinate_gic_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/coordinate_graph_induced_complex.py"
- -f human.off -c 0 -v)
+ add_test(NAME cover_complex_coordinate_gic_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/coordinate_graph_induced_complex.py"
+ -f human.off -c 0 -v)
- add_test(NAME cover_complex_functional_gic_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/functional_graph_induced_complex.py"
- -o lucky_cat.off
- -f lucky_cat_PCA1 -v)
+ add_test(NAME cover_complex_functional_gic_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/functional_graph_induced_complex.py"
+ -o lucky_cat.off
+ -f lucky_cat_PCA1 -v)
- add_test(NAME cover_complex_voronoi_gic_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/voronoi_graph_induced_complex.py"
- -f human.off -n 700 -v)
+ add_test(NAME cover_complex_voronoi_gic_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/voronoi_graph_induced_complex.py"
+ -f human.off -n 700 -v)
- add_gudhi_py_test(test_cover_complex)
- endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+ add_gudhi_py_test(test_cover_complex)
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
# Alpha
@@ -572,6 +572,11 @@ if(PYTHONINTERP_FOUND)
add_gudhi_py_test(test_diff)
endif()
+ # Perslay
+ if(TENSORFLOW_FOUND AND SKLEARN_FOUND)
+ add_gudhi_py_test(test_perslay)
+ endif()
+
# Betti curves
if(SKLEARN_FOUND AND SCIPY_FOUND)
add_gudhi_py_test(test_betti_curve_representations)
@@ -623,10 +628,10 @@ if(PYTHONINTERP_FOUND)
# Set missing or not modules
set(GUDHI_MODULES ${GUDHI_MODULES} "python" CACHE INTERNAL "GUDHI_MODULES")
- else(PYBIND11_FOUND AND CYTHON_FOUND)
- message("++ Python module will not be compiled because cython and/or pybind11 was/were not found")
+ else(NUMPY_FOUND AND PYBIND11_FOUND AND CYTHON_FOUND)
+ message("++ Python module will not be compiled because numpy and/or cython and/or pybind11 was/were not found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(PYBIND11_FOUND AND CYTHON_FOUND)
+ endif(NUMPY_FOUND AND PYBIND11_FOUND AND CYTHON_FOUND)
else(PYTHONINTERP_FOUND)
message("++ Python module will not be compiled because no Python interpreter was found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python" CACHE INTERNAL "GUDHI_MISSING_MODULES")
diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst
index 5491542f..7200b2f0 100644
--- a/src/python/doc/installation.rst
+++ b/src/python/doc/installation.rst
@@ -396,11 +396,13 @@ mathematics, science, and engineering.
TensorFlow
----------
+:class:`~gudhi.tensorflow.perslay.Perslay` from the :doc:`persistence representations </representations>` module
+requires `TensorFlow <https://www.tensorflow.org/>`_.
The :doc:`cubical complex </cubical_complex_tflow_itf_ref>`, :doc:`simplex tree </ls_simplex_tree_tflow_itf_ref>`
-and :doc:`Rips complex </rips_complex_tflow_itf_ref>` modules require `TensorFlow <https://www.tensorflow.org>`_
+and :doc:`Rips complex </rips_complex_tflow_itf_ref>` modules require `TensorFlow`_
for incorporating them in neural nets.
-`TensorFlow <https://www.tensorflow.org>`_ is also used in some automatic differentiation tests.
+`TensorFlow`_ is also used in some automatic differentiation tests.
Bug reports and contributions
*****************************
diff --git a/src/python/doc/representations.rst b/src/python/doc/representations.rst
index b0477197..5686974a 100644
--- a/src/python/doc/representations.rst
+++ b/src/python/doc/representations.rst
@@ -8,10 +8,16 @@ Representations manual
.. include:: representations_sum.inc
-This module, originally available at https://github.com/MathieuCarriere/sklearn-tda and named sklearn_tda, aims at bridging the gap between persistence diagrams and machine learning, by providing implementations of most of the vector representations for persistence diagrams in the literature, in a scikit-learn format. More specifically, it provides tools, using the scikit-learn standard interface, to compute distances and kernels on persistence diagrams, and to convert these diagrams into vectors in Euclidean space.
+This module aims at bridging the gap between persistence diagrams and machine learning, by providing implementations of most of the vector representations for persistence diagrams in the literature, in a scikit-learn format. More specifically, it provides tools, using the scikit-learn standard interface, to compute distances and kernels on persistence diagrams, and to convert these diagrams into vectors in Euclidean space. Moreover, this module also contains `PersLay <http://proceedings.mlr.press/v108/carriere20a.html>`_, which is a general neural network layer for performing deep learning with persistence diagrams, implemented in TensorFlow.
A diagram is represented as a numpy array of shape (n,2), as can be obtained from :func:`~gudhi.SimplexTree.persistence_intervals_in_dimension` for instance. Points at infinity are represented as a numpy array of shape (n,1), storing only the birth time. The classes in this module can handle several persistence diagrams at once. In that case, the diagrams are provided as a list of numpy arrays. Note that it is not necessary for the diagrams to have the same number of points, i.e., for the corresponding arrays to have the same number of rows: all classes can handle arrays with different shapes.
+This `notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-representations.ipynb>`__ explains how to
+efficiently combine machine learning and topological data analysis with the
+:doc:`representations module<representations>` in a scikit-learn fashion. This `notebook <https://github.com/MathieuCarriere/tda-tutorials/blob/perslay/Tuto-GUDHI-perslay-expe.ipynb>`__
+and `this one <https://github.com/MathieuCarriere/tda-tutorials/blob/perslay/Tuto-GUDHI-perslay-visu.ipynb>`__ explain how to use PersLay.
+
+
Examples
--------
@@ -30,8 +36,6 @@ This example computes the first two Landscapes associated to a persistence diagr
l=Landscape(num_landscapes=2,resolution=10).fit_transform(diags)
print(l)
-The output is:
-
.. testoutput::
[[1.02851895 2.05703791 2.57129739 1.54277843 0.89995409 1.92847304
@@ -45,13 +49,71 @@ Various kernels
This small example is also provided
:download:`diagram_vectorizations_distances_kernels.py <../example/diagram_vectorizations_distances_kernels.py>`
-Machine Learning and Topological Data Analysis
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+PersLay
+^^^^^^^
-This `notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-representations.ipynb>`_ explains how to
-efficiently combine machine learning and topological data analysis with the
-:doc:`representations module<representations>`.
+.. testsetup:: perslay
+
+ import numpy
+ numpy.set_printoptions(precision=5)
+
+.. testcode:: perslay
+
+ import numpy as np
+ import tensorflow as tf
+ from sklearn.preprocessing import MinMaxScaler
+ import gudhi.representations as gdr
+ import gudhi.tensorflow.perslay as prsl
+
+ diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])]
+ diagrams = gdr.DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diagrams)
+ diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32))
+
+ rho = tf.identity
+ phi = prsl.GaussianPerslayPhi((5, 5), ((-.5, 1.5), (-.5, 1.5)), .1)
+ weight = prsl.PowerPerslayWeight(1.,0.)
+ perm_op = tf.math.reduce_sum
+
+ perslay = prsl.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho)
+ vectors = perslay(diagrams)
+ print(vectors)
+
+.. testcleanup:: perslay
+ numpy.set_printoptions(precision=8)
+
+.. testoutput:: perslay
+
+ tf.Tensor(
+ [[[[1.72661e-16]
+ [4.17060e-09]
+ [1.13369e-08]
+ [8.57388e-12]
+ [2.12439e-14]]
+
+ [[4.17151e-09]
+ [1.00741e-01]
+ [2.73843e-01]
+ [3.07242e-02]
+ [7.61575e-05]]
+
+ [[8.03829e-06]
+ [1.58027e+00]
+ [8.29970e-01]
+ [1.23954e+01]
+ [3.07241e-02]]
+
+ [[8.02694e-06]
+ [1.30657e+00]
+ [9.09230e+00]
+ [6.16648e-02]
+ [1.39492e-06]]
+
+ [[9.03313e-13]
+ [1.49548e-07]
+ [1.51460e-04]
+ [1.02051e-06]
+ [7.80935e-16]]]], shape=(1, 5, 5, 1), dtype=float32)
Preprocessing
-------------
@@ -80,3 +142,44 @@ Metrics
:members:
:special-members:
:show-inheritance:
+
+PersLay
+-------
+.. autoclass:: gudhi.tensorflow.perslay.Perslay
+ :members:
+ :special-members:
+ :show-inheritance:
+
+Weight functions
+^^^^^^^^^^^^^^^^
+.. autoclass:: gudhi.tensorflow.perslay.GaussianMixturePerslayWeight
+ :members:
+ :special-members:
+ :show-inheritance:
+
+.. autoclass:: gudhi.tensorflow.perslay.GridPerslayWeight
+ :members:
+ :special-members:
+ :show-inheritance:
+
+.. autoclass:: gudhi.tensorflow.perslay.PowerPerslayWeight
+ :members:
+ :special-members:
+ :show-inheritance:
+
+Phi functions
+^^^^^^^^^^^^^
+.. autoclass:: gudhi.tensorflow.perslay.FlatPerslayPhi
+ :members:
+ :special-members:
+ :show-inheritance:
+
+.. autoclass:: gudhi.tensorflow.perslay.GaussianPerslayPhi
+ :members:
+ :special-members:
+ :show-inheritance:
+
+.. autoclass:: gudhi.tensorflow.perslay.TentPerslayPhi
+ :members:
+ :special-members:
+ :show-inheritance:
diff --git a/src/python/doc/representations_sum.inc b/src/python/doc/representations_sum.inc
index 4298aea9..f8c31ecf 100644
--- a/src/python/doc/representations_sum.inc
+++ b/src/python/doc/representations_sum.inc
@@ -1,14 +1,16 @@
.. table::
:widths: 30 40 30
- +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------+
- | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière, Martin Royer |
- | img/sklearn-tda.png | diagrams, compatible with scikit-learn. | |
- | | | :Since: GUDHI 3.1.0 |
- | | | |
- | | | :License: MIT |
- | | | |
- | | | :Requires: `Scikit-learn <installation.html#scikit-learn>`_ |
- +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------+
- | * :doc:`representations` |
- +------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------+
+ +------------------------------------------------------------------+----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------+
+ | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière, Martin Royer, Gard Spreemann, Wojciech Reise |
+ | img/sklearn-tda.png | diagrams, compatible with scikit-learn and tensorflow. | |
+ | | | :Since: GUDHI 3.1.0 |
+ | | | |
+ | | | :License: MIT |
+ | | | |
+ | | | :Requires: `Scikit-learn <installation.html#scikit-learn>`_, `TensorFlow <installation.html#tensorflow>`_ |
+ | | | |
+ +------------------------------------------------------------------+----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------+
+ | * :doc:`representations` |
+ +------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+
diff --git a/src/python/doc/rips_complex_user.rst b/src/python/doc/rips_complex_user.rst
index c41a7803..a4e83462 100644
--- a/src/python/doc/rips_complex_user.rst
+++ b/src/python/doc/rips_complex_user.rst
@@ -34,9 +34,6 @@ A vertex name corresponds to the index of the point in the given range (aka. the
On this example, as edges (4,5), (4,6) and (5,6) are in the complex, simplex (4,5,6) is added with the filtration value
set with :math:`max(filtration(4,5), filtration(4,6), filtration(5,6))`. And so on for simplex (0,1,2,3).
-If the :doc:`RipsComplex <rips_complex_ref>` interfaces are not detailed enough for your need, please refer to
-rips_persistence_step_by_step.cpp C++ example, where the graph construction over the Simplex_tree is more detailed.
-
A Rips complex can easily become huge, even if we limit the length of the edges
and the dimension of the simplices. One easy trick, before building a Rips
complex on a point cloud, is to call :func:`~gudhi.sparsify_point_set` which removes points
@@ -55,6 +52,13 @@ construction of a :class:`~gudhi.RipsComplex` object asks it to build a sparse R
parameter :math:`\varepsilon=0.3`, while the default `sparse=None` builds the
regular Rips complex.
+Another option which is especially useful if you want to compute persistent homology in "high" dimension (2 or more,
+sometimes even 1), is to build the Rips complex only up to dimension 1 (a graph), then use
+:func:`~gudhi.SimplexTree.collapse_edges` to reduce the size of this graph, and finally call
+:func:`~gudhi.SimplexTree.expansion` to get a simplicial complex of a suitable dimension to compute its homology. This
+trick gives the same persistence diagram as one would get with a plain use of `RipsComplex`, with a complex that is
+often significantly smaller and thus faster to process.
+
Point cloud
-----------
@@ -117,54 +121,44 @@ Notice that if we use
asking for a very sparse version (theory only gives some guarantee on the meaning of the output if `sparse<1`),
2 to 5 edges disappear, depending on the random vertex used to start the sparsification.
-Example from OFF file
-^^^^^^^^^^^^^^^^^^^^^
+Example step by step
+^^^^^^^^^^^^^^^^^^^^
-This example builds the :doc:`RipsComplex <rips_complex_ref>` from the given
-points in an OFF file, and max_edge_length value.
-Then it creates a :doc:`SimplexTree <simplex_tree_ref>` with it.
+While :doc:`RipsComplex <rips_complex_ref>` is convenient, for instance to build a simplicial complex in one line
+
+.. testcode::
-Finally, it is asked to display information about the Rips complex.
+ import gudhi
+ points = [[1, 1], [7, 0], [4, 6], [9, 6], [0, 14], [2, 19], [9, 17]]
+ cplx = gudhi.RipsComplex(points=points, max_edge_length=12.0).create_simplex_tree(max_dimension=2)
+you can achieve the same result without this class for more flexibility
.. testcode::
- import gudhi
- off_file = gudhi.__root_source_dir__ + '/data/points/alphacomplexdoc.off'
- point_cloud = gudhi.read_points_from_off_file(off_file = off_file)
- rips_complex = gudhi.RipsComplex(points=point_cloud, max_edge_length=12.0)
- simplex_tree = rips_complex.create_simplex_tree(max_dimension=1)
- result_str = 'Rips complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \
- repr(simplex_tree.num_simplices()) + ' simplices - ' + \
- repr(simplex_tree.num_vertices()) + ' vertices.'
- print(result_str)
- fmt = '%s -> %.2f'
- for filtered_value in simplex_tree.get_filtration():
- print(fmt % tuple(filtered_value))
+ import gudhi
+ from scipy.spatial.distance import cdist
+ points = [[1, 1], [7, 0], [4, 6], [9, 6], [0, 14], [2, 19], [9, 17]]
+ distance_matrix = cdist(points, points)
+ cplx = gudhi.SimplexTree.create_from_array(distance_matrix, max_filtration=12.0)
+ cplx.expansion(2)
-the program output is:
+or
-.. testoutput::
+.. testcode::
+
+ import gudhi
+ from scipy.spatial import cKDTree
+ points = [[1, 1], [7, 0], [4, 6], [9, 6], [0, 14], [2, 19], [9, 17]]
+ tree = cKDTree(points)
+ edges = tree.sparse_distance_matrix(tree, max_distance=12.0, output_type="coo_matrix")
+ cplx = gudhi.SimplexTree()
+ cplx.insert_edges_from_coo_matrix(edges)
+ cplx.expansion(2)
- Rips complex is of dimension 1 - 18 simplices - 7 vertices.
- [0] -> 0.00
- [1] -> 0.00
- [2] -> 0.00
- [3] -> 0.00
- [4] -> 0.00
- [5] -> 0.00
- [6] -> 0.00
- [2, 3] -> 5.00
- [4, 5] -> 5.39
- [0, 2] -> 5.83
- [0, 1] -> 6.08
- [1, 3] -> 6.32
- [1, 2] -> 6.71
- [5, 6] -> 7.28
- [2, 4] -> 8.94
- [0, 3] -> 9.43
- [4, 6] -> 9.49
- [3, 6] -> 11.00
+
+This way, you can easily add a call to :func:`~gudhi.SimplexTree.collapse_edges` before the expansion,
+use a different metric to compute the matrix, or other variations.
Distance matrix
---------------
@@ -223,54 +217,7 @@ until dimension 1 - one skeleton graph in other words), the output is:
[4, 6] -> 9.49
[3, 6] -> 11.00
-Example from csv file
-^^^^^^^^^^^^^^^^^^^^^
-
-This example builds the :doc:`RipsComplex <rips_complex_ref>` from the given
-distance matrix in a csv file, and max_edge_length value.
-Then it creates a :doc:`SimplexTree <simplex_tree_ref>` with it.
-
-Finally, it is asked to display information about the Rips complex.
-
-
-.. testcode::
-
- import gudhi
- distance_matrix = gudhi.read_lower_triangular_matrix_from_csv_file(csv_file=gudhi.__root_source_dir__ + \
- '/data/distance_matrix/full_square_distance_matrix.csv')
- rips_complex = gudhi.RipsComplex(distance_matrix=distance_matrix, max_edge_length=12.0)
- simplex_tree = rips_complex.create_simplex_tree(max_dimension=1)
- result_str = 'Rips complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \
- repr(simplex_tree.num_simplices()) + ' simplices - ' + \
- repr(simplex_tree.num_vertices()) + ' vertices.'
- print(result_str)
- fmt = '%s -> %.2f'
- for filtered_value in simplex_tree.get_filtration():
- print(fmt % tuple(filtered_value))
-
-the program output is:
-
-.. testoutput::
-
- Rips complex is of dimension 1 - 18 simplices - 7 vertices.
- [0] -> 0.00
- [1] -> 0.00
- [2] -> 0.00
- [3] -> 0.00
- [4] -> 0.00
- [5] -> 0.00
- [6] -> 0.00
- [2, 3] -> 5.00
- [4, 5] -> 5.39
- [0, 2] -> 5.83
- [0, 1] -> 6.08
- [1, 3] -> 6.32
- [1, 2] -> 6.71
- [5, 6] -> 7.28
- [2, 4] -> 8.94
- [0, 3] -> 9.43
- [4, 6] -> 9.49
- [3, 6] -> 11.00
+In case this lower triangular matrix is stored in a CSV file, like `data/distance_matrix/full_square_distance_matrix.csv` in the Gudhi distribution, you can read it with :func:`~gudhi.read_lower_triangular_matrix_from_csv_file`.
Correlation matrix
------------------
diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py
index a169aee8..ce74aee5 100644
--- a/src/python/gudhi/representations/vector_methods.py
+++ b/src/python/gudhi/representations/vector_methods.py
@@ -13,8 +13,13 @@ import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler
-from sklearn.neighbors import DistanceMetric
from sklearn.metrics import pairwise
+try:
+ # New location since 1.0
+ from sklearn.metrics import DistanceMetric
+except ImportError:
+ # Will be removed in 1.3
+ from sklearn.neighbors import DistanceMetric
from .preprocessing import DiagramScaler, BirthPersistenceTransform
@@ -101,7 +106,7 @@ class PersistenceImage(BaseEstimator, TransformerMixin):
"""
return self.fit_transform([diag])[0,:]
-def _automatic_sample_range(sample_range, X, y):
+def _automatic_sample_range(sample_range, X):
"""
Compute and returns sample range from the persistence diagrams if one of the sample_range values is numpy.nan.
@@ -114,7 +119,7 @@ def _automatic_sample_range(sample_range, X, y):
nan_in_range = np.isnan(sample_range)
if nan_in_range.any():
try:
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X)
[mx,my] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]]
[Mx,My] = [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
return np.where(nan_in_range, np.array([mx, My]), sample_range)
@@ -124,7 +129,7 @@ def _automatic_sample_range(sample_range, X, y):
return sample_range
-def _trim_on_edges(x, are_endpoints_nan):
+def _trim_endpoints(x, are_endpoints_nan):
if are_endpoints_nan[0]:
x = x[1:]
if are_endpoints_nan[1]:
@@ -132,11 +137,26 @@ def _trim_on_edges(x, are_endpoints_nan):
return x
+def _grid_from_sample_range(self, X):
+ sample_range = np.array(self.sample_range)
+ self.nan_in_range = np.isnan(sample_range)
+ self.new_resolution = self.resolution
+ if not self.keep_endpoints:
+ self.new_resolution += self.nan_in_range.sum()
+ self.sample_range_fixed = _automatic_sample_range(sample_range, X)
+ self.grid_ = np.linspace(self.sample_range_fixed[0], self.sample_range_fixed[1], self.new_resolution)
+ if not self.keep_endpoints:
+ self.grid_ = _trim_endpoints(self.grid_, self.nan_in_range)
+
+
class Landscape(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence landscapes from a list of persistence diagrams. A persistence landscape is a collection of 1D piecewise-linear functions computed from the rank function associated to the persistence diagram. These piecewise-linear functions are then sampled evenly on a given range and the corresponding vectors of samples are concatenated and returned. See http://jmlr.org/papers/v16/bubenik15a.html for more details.
+
+ Attributes:
+ grid_ (1d array): The grid on which the landscapes are computed.
"""
- def __init__(self, num_landscapes=5, resolution=100, sample_range=[np.nan, np.nan]):
+ def __init__(self, num_landscapes=5, resolution=100, sample_range=[np.nan, np.nan], *, keep_endpoints=False):
"""
Constructor for the Landscape class.
@@ -144,10 +164,10 @@ class Landscape(BaseEstimator, TransformerMixin):
num_landscapes (int): number of piecewise-linear functions to output (default 5).
resolution (int): number of sample for all piecewise-linear functions (default 100).
sample_range ([double, double]): minimum and maximum of all piecewise-linear function domains, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ keep_endpoints (bool): when computing `sample_range`, use the exact extremities (where the value is always 0). This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
self.num_landscapes, self.resolution, self.sample_range = num_landscapes, resolution, sample_range
- self.nan_in_range = np.isnan(np.array(self.sample_range))
- self.new_resolution = self.resolution + self.nan_in_range.sum()
+ self.keep_endpoints = keep_endpoints
def fit(self, X, y=None):
"""
@@ -157,9 +177,7 @@ class Landscape(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
- self.im_range = np.linspace(self.sample_range[0], self.sample_range[1], self.new_resolution)
- self.im_range = _trim_on_edges(self.im_range, self.nan_in_range)
+ _grid_from_sample_range(self, X)
return self
def transform(self, X):
@@ -174,7 +192,7 @@ class Landscape(BaseEstimator, TransformerMixin):
"""
Xfit = []
- x_values = self.im_range
+ x_values = self.grid_
for diag in X:
midpoints, heights = (diag[:, 0] + diag[:, 1]) / 2., (diag[:, 1] - diag[:, 0]) / 2.
tent_functions = np.maximum(heights[None, :] - np.abs(x_values[:, None] - midpoints[None, :]), 0)
@@ -208,8 +226,11 @@ class Landscape(BaseEstimator, TransformerMixin):
class Silhouette(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence silhouettes from a list of persistence diagrams. A persistence silhouette is computed by taking a weighted average of the collection of 1D piecewise-linear functions given by the persistence landscapes, and then by evenly sampling this average on a given range. Finally, the corresponding vector of samples is returned. See https://arxiv.org/abs/1312.0308 for more details.
+
+ Attributes:
+ grid_ (1d array): The grid on which the silhouette is computed.
"""
- def __init__(self, weight=lambda x: 1, resolution=100, sample_range=[np.nan, np.nan]):
+ def __init__(self, weight=lambda x: 1, resolution=100, sample_range=[np.nan, np.nan], *, keep_endpoints=False):
"""
Constructor for the Silhouette class.
@@ -217,10 +238,10 @@ class Silhouette(BaseEstimator, TransformerMixin):
weight (function): weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie on lists or numpy arrays of the form [p_x,p_y].
resolution (int): number of samples for the weighted average (default 100).
sample_range ([double, double]): minimum and maximum for the weighted average domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ keep_endpoints (bool): when computing `sample_range`, use the exact extremities (where the value is always 0). This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
self.weight, self.resolution, self.sample_range = weight, resolution, sample_range
- self.nan_in_range = np.isnan(np.array(self.sample_range))
- self.new_resolution = self.resolution + self.nan_in_range.sum()
+ self.keep_endpoints = keep_endpoints
def fit(self, X, y=None):
"""
@@ -230,9 +251,7 @@ class Silhouette(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
- self.im_range = np.linspace(self.sample_range[0], self.sample_range[1], self.new_resolution)
- self.im_range = _trim_on_edges(self.im_range, self.nan_in_range)
+ _grid_from_sample_range(self, X)
return self
def transform(self, X):
@@ -246,7 +265,7 @@ class Silhouette(BaseEstimator, TransformerMixin):
numpy array with shape (number of diagrams) x (**resolution**): output persistence silhouettes.
"""
Xfit = []
- x_values = self.im_range
+ x_values = self.grid_
for diag in X:
midpoints, heights = (diag[:, 0] + diag[:, 1]) / 2., (diag[:, 1] - diag[:, 0]) / 2.
@@ -275,36 +294,39 @@ class Silhouette(BaseEstimator, TransformerMixin):
class BettiCurve(BaseEstimator, TransformerMixin):
"""
Compute Betti curves from persistence diagrams. There are several modes of operation: with a given resolution (with or without a sample_range), with a predefined grid, and with none of the previous. With a predefined grid, the class computes the Betti numbers at those grid points. Without a predefined grid, if the resolution is set to None, it can be fit to a list of persistence diagrams and produce a grid that consists of (at least) the filtration values at which at least one of those persistence diagrams changes Betti numbers, and then compute the Betti numbers at those grid points. In the latter mode, the exact Betti curve is computed for the entire real line. Otherwise, if the resolution is given, the Betti curve is obtained by sampling evenly using either the given sample_range or based on the persistence diagrams.
- """
- def __init__(self, resolution=100, sample_range=[np.nan, np.nan], predefined_grid=None):
- """
- Constructor for the BettiCurve class.
+ Examples
+ --------
+ If pd is a persistence diagram and xs is a nonempty grid of finite values such that xs[0] >= pd.min(), then the results of:
- Parameters:
- resolution (int): number of sample for the piecewise-constant function (default 100).
- sample_range ([double, double]): minimum and maximum of the piecewise-constant function domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
- predefined_grid (1d array or None, default=None): Predefined filtration grid points at which to compute the Betti curves. Must be strictly ordered. Infinities are ok. If None (default), and resolution is given, the grid will be uniform from x_min to x_max in 'resolution' steps, otherwise a grid will be computed that captures all changes in Betti numbers in the provided data.
+ >>> bc = BettiCurve(predefined_grid=xs) # doctest: +SKIP
+ >>> result = bc(pd) # doctest: +SKIP
- Attributes:
- grid_ (1d array): The grid on which the Betti numbers are computed. If predefined_grid was specified, `grid_` will always be that grid, independently of data. If not, the grid is fitted to capture all filtration values at which the Betti numbers change.
+ and
- Examples
- --------
- If pd is a persistence diagram and xs is a nonempty grid of finite values such that xs[0] >= pd.min(), then the results of:
+ >>> from scipy.interpolate import interp1d # doctest: +SKIP
+ >>> bc = BettiCurve(resolution=None, predefined_grid=None) # doctest: +SKIP
+ >>> bettis = bc.fit_transform([pd]) # doctest: +SKIP
+ >>> interp = interp1d(bc.grid_, bettis[0, :], kind="previous", fill_value="extrapolate") # doctest: +SKIP
+ >>> result = np.array(interp(xs), dtype=int) # doctest: +SKIP
- >>> bc = BettiCurve(predefined_grid=xs) # doctest: +SKIP
- >>> result = bc(pd) # doctest: +SKIP
+ are the same.
- and
+ Attributes
+ ----------
+ grid_ : 1d array
+ The grid on which the Betti numbers are computed. If predefined_grid was specified, `grid_` will always be that grid, independently of data. If not and resolution is None, the grid is fitted to capture all filtration values at which the Betti numbers change.
+ """
- >>> from scipy.interpolate import interp1d # doctest: +SKIP
- >>> bc = BettiCurve(resolution=None, predefined_grid=None) # doctest: +SKIP
- >>> bettis = bc.fit_transform([pd]) # doctest: +SKIP
- >>> interp = interp1d(bc.grid_, bettis[0, :], kind="previous", fill_value="extrapolate") # doctest: +SKIP
- >>> result = np.array(interp(xs), dtype=int) # doctest: +SKIP
+ def __init__(self, resolution=100, sample_range=[np.nan, np.nan], predefined_grid=None, *, keep_endpoints=False):
+ """
+ Constructor for the BettiCurve class.
- are the same.
+ Parameters:
+ resolution (int): number of samples for the piecewise-constant function (default 100), or None for the exact curve.
+ sample_range ([double, double]): minimum and maximum of the piecewise-constant function domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ predefined_grid (1d array or None, default=None): Predefined filtration grid points at which to compute the Betti curves. Must be strictly ordered. Infinities are ok. If None (default), and resolution is given, the grid will be uniform from x_min to x_max in 'resolution' steps, otherwise a grid will be computed that captures all changes in Betti numbers in the provided data.
+ keep_endpoints (bool): when computing `sample_range` (fixed `resolution`, no `predefined_grid`), use the exact extremities. This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
if (predefined_grid is not None) and (not isinstance(predefined_grid, np.ndarray)):
@@ -313,6 +335,7 @@ class BettiCurve(BaseEstimator, TransformerMixin):
self.predefined_grid = predefined_grid
self.resolution = resolution
self.sample_range = sample_range
+ self.keep_endpoints = keep_endpoints
def is_fitted(self):
return hasattr(self, "grid_")
@@ -331,8 +354,7 @@ class BettiCurve(BaseEstimator, TransformerMixin):
events = np.unique(np.concatenate([pd.flatten() for pd in X] + [[-np.inf]], axis=0))
self.grid_ = np.array(events)
else:
- self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
- self.grid_ = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
+ _grid_from_sample_range(self, X)
else:
self.grid_ = self.predefined_grid # Get the predefined grid from user
@@ -431,8 +453,11 @@ class BettiCurve(BaseEstimator, TransformerMixin):
class Entropy(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence entropy. Persistence entropy is a statistic for persistence diagrams inspired from Shannon entropy. This statistic can also be used to compute a feature vector, called the entropy summary function. See https://arxiv.org/pdf/1803.08304.pdf for more details. Note that a previous implementation was contributed by Manuel Soriano-Trigueros.
+
+ Attributes:
+ grid_ (1d array): In vector mode, the grid on which the entropy summary function is computed.
"""
- def __init__(self, mode="scalar", normalized=True, resolution=100, sample_range=[np.nan, np.nan]):
+ def __init__(self, mode="scalar", normalized=True, resolution=100, sample_range=[np.nan, np.nan], *, keep_endpoints=False):
"""
Constructor for the Entropy class.
@@ -441,8 +466,10 @@ class Entropy(BaseEstimator, TransformerMixin):
normalized (bool): whether to normalize the entropy summary function (default True). Used only if **mode** = "vector".
resolution (int): number of sample for the entropy summary function (default 100). Used only if **mode** = "vector".
sample_range ([double, double]): minimum and maximum of the entropy summary function domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method. Used only if **mode** = "vector".
+ keep_endpoints (bool): when computing `sample_range`, use the exact extremities. This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
self.mode, self.normalized, self.resolution, self.sample_range = mode, normalized, resolution, sample_range
+ self.keep_endpoints = keep_endpoints
def fit(self, X, y=None):
"""
@@ -452,7 +479,9 @@ class Entropy(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
+ if self.mode == "vector":
+ _grid_from_sample_range(self, X)
+ self.step_ = self.grid_[1] - self.grid_[0]
return self
def transform(self, X):
@@ -466,8 +495,6 @@ class Entropy(BaseEstimator, TransformerMixin):
numpy array with shape (number of diagrams) x (1 if **mode** = "scalar" else **resolution**): output entropy.
"""
num_diag, Xfit = len(X), []
- x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
- step_x = x_values[1] - x_values[0]
new_X = BirthPersistenceTransform().fit_transform(X)
for i in range(num_diag):
@@ -482,8 +509,8 @@ class Entropy(BaseEstimator, TransformerMixin):
ent = np.zeros(self.resolution)
for j in range(num_pts_in_diag):
[px,py] = orig_diagram[j,:2]
- min_idx = np.clip(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
+ min_idx = np.clip(np.ceil((px - self.sample_range_fixed[0]) / self.step_).astype(int), 0, self.resolution)
+ max_idx = np.clip(np.ceil((py - self.sample_range_fixed[0]) / self.step_).astype(int), 0, self.resolution)
ent[min_idx:max_idx]-=p[j]*np.log(p[j])
if self.normalized:
ent = ent / np.linalg.norm(ent, ord=1)
@@ -683,17 +710,17 @@ class Atol(BaseEstimator, TransformerMixin):
>>> b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]])
>>> c = np.array([[3, 2, -1], [1, 2, -1]])
>>> atol_vectoriser = Atol(quantiser=KMeans(n_clusters=2, random_state=202006))
- >>> atol_vectoriser.fit(X=[a, b, c]).centers # doctest: +SKIP
- >>> # array([[ 2. , 0.66666667, 3.33333333],
- >>> # [ 2.6 , 2.8 , -0.4 ]])
+ >>> atol_vectoriser.fit(X=[a, b, c]).centers
+ array([[ 2.6 , 2.8 , -0.4 ],
+ [ 2. , 0.66666667, 3.33333333]])
>>> atol_vectoriser(a)
- >>> # array([1.18168665, 0.42375966]) # doctest: +SKIP
+ array([0.42375966, 1.18168665])
>>> atol_vectoriser(c)
- >>> # array([0.02062512, 1.25157463]) # doctest: +SKIP
- >>> atol_vectoriser.transform(X=[a, b, c]) # doctest: +SKIP
- >>> # array([[1.18168665, 0.42375966],
- >>> # [0.29861028, 1.06330156],
- >>> # [0.02062512, 1.25157463]])
+ array([1.25157463, 0.02062512])
+ >>> atol_vectoriser.transform(X=[a, b, c])
+ array([[0.42375966, 1.18168665],
+ [1.06330156, 0.29861028],
+ [1.25157463, 0.02062512]])
"""
# Note the example above must be up to date with the one in tests called test_atol_doc
def __init__(self, quantiser, weighting_method="cloud", contrast="gaussian"):
@@ -744,6 +771,8 @@ class Atol(BaseEstimator, TransformerMixin):
measures_concat = np.concatenate(X)
self.quantiser.fit(X=measures_concat, sample_weight=sample_weight)
self.centers = self.quantiser.cluster_centers_
+ # Hack, but some people are unhappy if the order depends on the version of sklearn
+ self.centers = self.centers[np.lexsort(self.centers.T)]
if self.quantiser.n_clusters == 1:
dist_centers = pairwise.pairwise_distances(measures_concat)
np.fill_diagonal(dist_centers, 0)
diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd
index 5642f82d..5309c6fa 100644
--- a/src/python/gudhi/simplex_tree.pxd
+++ b/src/python/gudhi/simplex_tree.pxd
@@ -56,6 +56,8 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
int upper_bound_dimension() nogil
bool find_simplex(vector[int] simplex) nogil
bool insert(vector[int] simplex, double filtration) nogil
+ void insert_matrix(double* filtrations, int n, int stride0, int stride1, double max_filtration) nogil except +
+ void insert_batch_vertices(vector[int] v, double f) nogil except +
vector[pair[vector[int], double]] get_star(vector[int] simplex) nogil
vector[pair[vector[int], double]] get_cofaces(vector[int] simplex, int dimension) nogil
void expansion(int max_dim) nogil except +
diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx
index 05bfe22e..4cf176f5 100644
--- a/src/python/gudhi/simplex_tree.pyx
+++ b/src/python/gudhi/simplex_tree.pyx
@@ -8,14 +8,24 @@
# - YYYY/MM Author: Description of the modification
from cython.operator import dereference, preincrement
-from libc.stdint cimport intptr_t
+from libc.stdint cimport intptr_t, int32_t, int64_t
import numpy as np
cimport gudhi.simplex_tree
+cimport cython
+from numpy.math cimport INFINITY
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
+ctypedef fused some_int:
+ int32_t
+ int64_t
+
+ctypedef fused some_float:
+ float
+ double
+
cdef bool callback(vector[int] simplex, void *blocker_func):
return (<object>blocker_func)(simplex)
@@ -228,6 +238,91 @@ cdef class SimplexTree:
"""
return self.get_ptr().insert(simplex, <double>filtration)
+ @staticmethod
+ @cython.boundscheck(False)
+ def create_from_array(filtrations, double max_filtration=INFINITY):
+ """Creates a new, empty complex and inserts vertices and edges. The vertices are numbered from 0 to n-1, and
+ the filtration values are encoded in the array, with the diagonal representing the vertices. It is the
+ caller's responsibility to ensure that this defines a filtration, which can be achieved with either::
+
+ filtrations[np.diag_indices_from(filtrations)] = filtrations.min(axis=1)
+
+ or::
+
+ diag = filtrations.diagonal()
+ filtrations = np.fmax(np.fmax(filtrations, diag[:, None]), diag[None, :])
+
+ :param filtrations: the filtration values of the vertices and edges to insert. The matrix is assumed to be symmetric.
+ :type filtrations: numpy.ndarray of shape (n,n)
+ :param max_filtration: only insert vertices and edges with filtration values no larger than max_filtration
+ :type max_filtration: float
+ :returns: the new complex
+ :rtype: SimplexTree
+ """
+ # TODO: document which half of the matrix is actually read?
+ filtrations = np.asanyarray(filtrations, dtype=float)
+ cdef double[:,:] F = filtrations
+ ret = SimplexTree()
+ cdef int n = F.shape[0]
+ assert n == F.shape[1], 'create_from_array() expects a square array'
+ with nogil:
+ ret.get_ptr().insert_matrix(&F[0,0], n, F.strides[0], F.strides[1], max_filtration)
+ return ret
+
+ def insert_edges_from_coo_matrix(self, edges):
+ """Inserts edges given by a sparse matrix in `COOrdinate format
+ <https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.html>`_.
+ If an edge is repeated, the smallest filtration value is used. Missing entries are not inserted.
+ Diagonal entries are currently interpreted as vertices, although we do not guarantee this behavior
+ in the future, and this is only useful if you want to insert vertices with a smaller filtration value
+ than the smallest edge containing it, since vertices are implicitly inserted together with the edges.
+
+ :param edges: the edges to insert and their filtration values.
+ :type edges: scipy.sparse.coo_matrix of shape (n,n)
+
+ .. seealso:: :func:`insert_batch`
+ """
+ # Without this, it could be slow if we end up inserting vertices in a bad order (flat_map).
+ self.get_ptr().insert_batch_vertices(np.unique(np.stack((edges.row, edges.col))), INFINITY)
+ # TODO: optimize this?
+ for edge in zip(edges.row, edges.col, edges.data):
+ self.get_ptr().insert((edge[0], edge[1]), edge[2])
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def insert_batch(self, some_int[:,:] vertex_array, some_float[:] filtrations):
+ """Inserts k-simplices given by a sparse array in a format similar
+ to `torch.sparse <https://pytorch.org/docs/stable/sparse.html>`_.
+ The n-th simplex has vertices `vertex_array[0,n]`, ...,
+ `vertex_array[k,n]` and filtration value `filtrations[n]`.
+ If a simplex is repeated, the smallest filtration value is used.
+ Simplices with a repeated vertex are currently interpreted as lower
+ dimensional simplices, but we do not guarantee this behavior in the
+ future. Any time a simplex is inserted, its faces are inserted as well
+ if needed to preserve a simplicial complex.
+
+ :param vertex_array: the k-simplices to insert.
+ :type vertex_array: numpy.array of shape (k+1,n)
+ :param filtrations: the filtration values.
+ :type filtrations: numpy.array of shape (n,)
+ """
+ cdef vector[int] vertices = np.unique(vertex_array)
+ cdef Py_ssize_t k = vertex_array.shape[0]
+ cdef Py_ssize_t n = vertex_array.shape[1]
+ assert filtrations.shape[0] == n, 'inconsistent sizes for vertex_array and filtrations'
+ cdef Py_ssize_t i
+ cdef Py_ssize_t j
+ cdef vector[int] v
+ with nogil:
+ # Without this, it could be slow if we end up inserting vertices in a bad order (flat_map).
+ # NaN currently does the wrong thing
+ self.get_ptr().insert_batch_vertices(vertices, INFINITY)
+ for i in range(n):
+ for j in range(k):
+ v.push_back(vertex_array[j, i])
+ self.get_ptr().insert(v, filtrations[i])
+ v.clear()
+
def get_simplices(self):
"""This function returns a generator with simplices and their given
filtration values.
@@ -376,7 +471,7 @@ cdef class SimplexTree:
"""
return self.get_ptr().prune_above_filtration(filtration)
- def expansion(self, max_dim):
+ def expansion(self, max_dimension):
"""Expands the simplex tree containing only its one skeleton
until dimension max_dim.
@@ -390,10 +485,10 @@ cdef class SimplexTree:
The simplex tree must contain no simplex of dimension bigger than
1 when calling the method.
- :param max_dim: The maximal dimension.
- :type max_dim: int
+ :param max_dimension: The maximal dimension.
+ :type max_dimension: int
"""
- cdef int maxdim = max_dim
+ cdef int maxdim = max_dimension
with nogil:
self.get_ptr().expansion(maxdim)
diff --git a/src/python/gudhi/tensorflow/perslay.py b/src/python/gudhi/tensorflow/perslay.py
new file mode 100644
index 00000000..9976c5f3
--- /dev/null
+++ b/src/python/gudhi/tensorflow/perslay.py
@@ -0,0 +1,284 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Mathieu Carrière
+#
+# Copyright (C) 2021 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import tensorflow as tf
+import math
+
+class GridPerslayWeight(tf.keras.layers.Layer):
+ """
+ This is a class for computing a differentiable weight function for persistence diagram points. This function is defined from an array that contains its values on a 2D grid.
+ """
+ def __init__(self, grid, grid_bnds, **kwargs):
+ """
+ Constructor for the GridPerslayWeight class.
+
+ Parameters:
+ grid (n x n numpy array): grid of values.
+ grid_bnds (2 x 2 numpy array): boundaries of the grid, of the form [[min_x, max_x], [min_y, max_y]].
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.grid = tf.Variable(initial_value=grid, trainable=True)
+ self.grid_bnds = grid_bnds
+
+ def build(self, input_shape):
+ return self
+
+ def call(self, diagrams):
+ """
+ Apply GridPerslayWeight on a ragged tensor containing a list of persistence diagrams.
+
+ Parameters:
+ diagrams (n x None x 2): ragged tensor containing n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points.
+
+ Returns:
+ weight (n x None): ragged tensor containing the weights of the points in the n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points.
+ """
+ grid_shape = self.grid.shape
+ indices = []
+ for dim in range(2):
+ [m,M] = self.grid_bnds[dim]
+ coords = tf.expand_dims(diagrams[:,:,dim],-1)
+ ids = grid_shape[dim]*(coords-m)/(M-m)
+ indices.append(tf.cast(ids, tf.int32))
+ weight = tf.gather_nd(params=self.grid, indices=tf.concat(indices, axis=2))
+ return weight
+
+class GaussianMixturePerslayWeight(tf.keras.layers.Layer):
+ """
+ This is a class for computing a differentiable weight function for persistence diagram points. This function is defined from a mixture of Gaussian functions.
+ """
+ def __init__(self, gaussians, **kwargs):
+ """
+ Constructor for the GridPerslayWeight class.
+
+ Parameters:
+ gaussians (4 x n numpy array): parameters of the n Gaussian functions, of the form transpose([[mu_x^1, mu_y^1, sigma_x^1, sigma_y^1], ..., [mu_x^n, mu_y^n, sigma_x^n, sigma_y^n]]).
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.W = tf.Variable(initial_value=gaussians, trainable=True)
+
+ def build(self, input_shape):
+ return self
+
+ def call(self, diagrams):
+ """
+ Apply GaussianMixturePerslayWeight on a ragged tensor containing a list of persistence diagrams.
+
+ Parameters:
+ diagrams (n x None x 2): ragged tensor containing n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points.
+
+ Returns:
+ weight (n x None): ragged tensor containing the weights of the points in the n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points.
+ """
+ means = tf.expand_dims(tf.expand_dims(self.W[:2,:],0),0)
+ variances = tf.expand_dims(tf.expand_dims(self.W[2:,:],0),0)
+ diagrams = tf.expand_dims(diagrams, -1)
+ dists = tf.math.multiply(tf.math.square(diagrams-means), 1/tf.math.square(variances))
+ weight = tf.math.reduce_sum(tf.math.exp(tf.math.reduce_sum(-dists, axis=2)), axis=2)
+ return weight
+
+class PowerPerslayWeight(tf.keras.layers.Layer):
+ """
+ This is a class for computing a differentiable weight function for persistence diagram points. This function is defined as a constant multiplied by the distance to the diagonal of the persistence diagram point raised to some power.
+ """
+ def __init__(self, constant, power, **kwargs):
+ """
+ Constructor for the PowerPerslayWeight class.
+
+ Parameters:
+ constant (float): constant value.
+ power (float): power applied to the distance to the diagonal.
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.constant = tf.Variable(initial_value=constant, trainable=True)
+ self.power = power
+
+ def build(self, input_shape):
+ return self
+
+ def call(self, diagrams):
+ """
+ Apply PowerPerslayWeight on a ragged tensor containing a list of persistence diagrams.
+
+ Parameters:
+ diagrams (n x None x 2): ragged tensor containing n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points.
+
+ Returns:
+ weight (n x None): ragged tensor containing the weights of the points in the n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points.
+ """
+ weight = self.constant * tf.math.pow(tf.math.abs(diagrams[:,:,1]-diagrams[:,:,0]), self.power)
+ return weight
+
+
+class GaussianPerslayPhi(tf.keras.layers.Layer):
+ """
+ This is a class for computing a transformation function for persistence diagram points. This function turns persistence diagram points into 2D Gaussian functions centered on the points, that are then evaluated on a regular 2D grid.
+ """
+ def __init__(self, image_size, image_bnds, variance, **kwargs):
+ """
+ Constructor for the GaussianPerslayPhi class.
+
+ Parameters:
+ image_size (int numpy array): number of grid elements on each grid axis, of the form [n_x, n_y].
+ image_bnds (2 x 2 numpy array): boundaries of the grid, of the form [[min_x, max_x], [min_y, max_y]].
+ variance (float): variance of the Gaussian functions.
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.image_size = image_size
+ self.image_bnds = image_bnds
+ self.variance = tf.Variable(initial_value=variance, trainable=True)
+
+ def build(self, input_shape):
+ return self
+
+ def call(self, diagrams):
+ """
+ Apply GaussianPerslayPhi on a ragged tensor containing a list of persistence diagrams.
+
+ Parameters:
+ diagrams (n x None x 2): ragged tensor containing n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points.
+
+ Returns:
+ output (n x None x image_size x image_size x 1): ragged tensor containing the evaluations on the 2D grid of the 2D Gaussian functions corresponding to the persistence diagram points, in the form of a 2D image with 1 channel that can be processed with, e.g., convolutional layers. The second dimension is ragged since persistence diagrams can have different numbers of points.
+ output_shape (int numpy array): shape of the output tensor.
+ """
+ diagrams_d = tf.concat([diagrams[:,:,0:1], diagrams[:,:,1:2]-diagrams[:,:,0:1]], axis=2)
+ step = [(self.image_bnds[i][1]-self.image_bnds[i][0])/self.image_size[i] for i in range(2)]
+ coords = [tf.range(self.image_bnds[i][0], self.image_bnds[i][1], step[i]) for i in range(2)]
+ M = tf.meshgrid(*coords)
+ mu = tf.concat([tf.expand_dims(tens, 0) for tens in M], axis=0)
+ for _ in range(2):
+ diagrams_d = tf.expand_dims(diagrams_d,-1)
+ dists = tf.math.square(diagrams_d-mu) / (2*tf.math.square(self.variance))
+ gauss = tf.math.exp(tf.math.reduce_sum(-dists, axis=2)) / (2*math.pi*tf.math.square(self.variance))
+ output = tf.expand_dims(gauss,-1)
+ output_shape = M[0].shape + tuple([1])
+ return output, output_shape
+
+class TentPerslayPhi(tf.keras.layers.Layer):
+ """
+ This is a class for computing a transformation function for persistence diagram points. This function turns persistence diagram points into 1D tent functions (linearly increasing on the first half of the bar corresponding to the point from zero to half of the bar length, linearly decreasing on the second half and zero elsewhere) centered on the points, that are then evaluated on a regular 1D grid.
+ """
+ def __init__(self, samples, **kwargs):
+ """
+ Constructor for the GaussianPerslayPhi class.
+
+ Parameters:
+ samples (float numpy array): grid elements on which to evaluate the tent functions, of the form [x_1, ..., x_n].
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.samples = tf.Variable(initial_value=samples, trainable=True)
+
+ def build(self, input_shape):
+ return self
+
+ def call(self, diagrams):
+ """
+ Apply TentPerslayPhi on a ragged tensor containing a list of persistence diagrams.
+
+ Parameters:
+ diagrams (n x None x 2): ragged tensor containing n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points.
+
+ Returns:
+ output (n x None x num_samples): ragged tensor containing the evaluations on the 1D grid of the 1D tent functions corresponding to the persistence diagram points. The second dimension is ragged since persistence diagrams can have different numbers of points.
+ output_shape (int numpy array): shape of the output tensor.
+ """
+ samples_d = tf.expand_dims(tf.expand_dims(self.samples,0),0)
+ xs, ys = diagrams[:,:,0:1], diagrams[:,:,1:2]
+ output = tf.math.maximum(.5*(ys-xs) - tf.math.abs(samples_d-.5*(ys+xs)), tf.constant([0.]))
+ output_shape = self.samples.shape
+ return output, output_shape
+
+class FlatPerslayPhi(tf.keras.layers.Layer):
+ """
+ This is a class for computing a transformation function for persistence diagram points. This function turns persistence diagram points into 1D constant functions (that evaluate to half of the bar length on the bar corresponding to the point and zero elsewhere), that are then evaluated on a regular 1D grid.
+ """
+ def __init__(self, samples, theta, **kwargs):
+ """
+ Constructor for the FlatPerslayPhi class.
+
+ Parameters:
+ samples (float numpy array): grid elements on which to evaluate the constant functions, of the form [x_1, ..., x_n].
+ theta (float): sigmoid parameter used to approximate the constant function with a differentiable sigmoid function. The bigger the theta, the closer to a constant function the output will be.
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.samples = tf.Variable(initial_value=samples, trainable=True)
+ self.theta = tf.Variable(initial_value=theta, trainable=True)
+
+ def build(self, input_shape):
+ return self
+
+ def call(self, diagrams):
+ """
+ Apply FlatPerslayPhi on a ragged tensor containing a list of persistence diagrams.
+
+ Parameters:
+ diagrams (n x None x 2): ragged tensor containing n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points.
+
+ Returns:
+ output (n x None x num_samples): ragged tensor containing the evaluations on the 1D grid of the 1D constant functions corresponding to the persistence diagram points. The second dimension is ragged since persistence diagrams can have different numbers of points.
+ output_shape (int numpy array): shape of the output tensor.
+ """
+ samples_d = tf.expand_dims(tf.expand_dims(self.samples,0),0)
+ xs, ys = diagrams[:,:,0:1], diagrams[:,:,1:2]
+ output = 1./(1.+tf.math.exp(-self.theta*(.5*(ys-xs)-tf.math.abs(samples_d-.5*(ys+xs)))))
+ output_shape = self.samples.shape
+ return output, output_shape
+
+class Perslay(tf.keras.layers.Layer):
+ """
+ This is a TensorFlow layer for vectorizing persistence diagrams in a differentiable way within a neural network. This function implements the PersLay equation, see `the corresponding article <http://proceedings.mlr.press/v108/carriere20a.html>`_.
+ """
+ def __init__(self, weight, phi, perm_op, rho, **kwargs):
+ """
+ Constructor for the Perslay class.
+
+ Parameters:
+ weight (function): weight function for the persistence diagram points. Can be either :class:`~gudhi.tensorflow.perslay.GridPerslayWeight`, :class:`~gudhi.tensorflow.perslay.GaussianMixturePerslayWeight`, :class:`~gudhi.tensorflow.perslay.PowerPerslayWeight`, or a custom TensorFlow function that takes persistence diagrams as argument (represented as an (n x None x 2) ragged tensor, where n is the number of diagrams).
+ phi (function): transformation function for the persistence diagram points. Can be either :class:`~gudhi.tensorflow.perslay.GaussianPerslayPhi`, :class:`~gudhi.tensorflow.perslay.TentPerslayPhi`, :class:`~gudhi.tensorflow.perslay.FlatPerslayPhi`, or a custom TensorFlow class (that can have trainable parameters) with a method `call` that takes persistence diagrams as argument (represented as an (n x None x 2) ragged tensor, where n is the number of diagrams).
+ perm_op (function): permutation invariant function, such as `tf.math.reduce_sum`, `tf.math.reduce_mean`, `tf.math.reduce_max`, `tf.math.reduce_min`, or a custom TensorFlow function that takes two arguments: a tensor and an axis on which to apply the permutation invariant operation. If perm_op is the string "topk" (where k is a number), this function will be computed as `tf.math.top_k` with parameter `int(k)`.
+ rho (function): postprocessing function that is applied after the permutation invariant operation. Can be any TensorFlow layer.
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.weight = weight
+ self.phi = phi
+ self.perm_op = perm_op
+ self.rho = rho
+
+ def build(self, input_shape):
+ return self
+
+ def call(self, diagrams):
+ """
+ Apply Perslay on a ragged tensor containing a list of persistence diagrams.
+
+ Parameters:
+ diagrams (n x None x 2): ragged tensor containing n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points.
+
+ Returns:
+ vector (n x output_shape): tensor containing the vectorizations of the persistence diagrams.
+ """
+ vector, dim = self.phi(diagrams)
+ weight = self.weight(diagrams)
+ for _ in range(len(dim)):
+ weight = tf.expand_dims(weight, -1)
+ vector = tf.math.multiply(vector, weight)
+
+ permop = self.perm_op
+ if type(permop) == str and permop[:3] == 'top':
+ k = int(permop[3:])
+ vector = vector.to_tensor(default_value=-1e10)
+ vector = tf.math.top_k(tf.transpose(vector, perm=[0, 2, 1]), k=k).values
+ vector = tf.reshape(vector, [-1,k*dim[0]])
+ else:
+ vector = permop(vector, axis=1)
+
+ vector = self.rho(vector)
+
+ return vector
diff --git a/src/python/include/Alpha_complex_factory.h b/src/python/include/Alpha_complex_factory.h
index 3d20aa8f..41eb72c1 100644
--- a/src/python/include/Alpha_complex_factory.h
+++ b/src/python/include/Alpha_complex_factory.h
@@ -106,7 +106,7 @@ class Exact_alpha_complex_dD final : public Abstract_alpha_complex {
return alpha_complex_.create_complex(*simplex_tree, max_alpha_square, exact_version_, default_filtration_value);
}
- virtual std::size_t num_vertices() const {
+ virtual std::size_t num_vertices() const override {
return alpha_complex_.num_vertices();
}
@@ -141,7 +141,7 @@ class Inexact_alpha_complex_dD final : public Abstract_alpha_complex {
return alpha_complex_.create_complex(*simplex_tree, max_alpha_square, false, default_filtration_value);
}
- virtual std::size_t num_vertices() const {
+ virtual std::size_t num_vertices() const override {
return alpha_complex_.num_vertices();
}
diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h
index 3848c5ad..0317ea39 100644
--- a/src/python/include/Simplex_tree_interface.h
+++ b/src/python/include/Simplex_tree_interface.h
@@ -40,6 +40,8 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
using Complex_simplex_iterator = typename Base::Complex_simplex_iterator;
using Extended_filtration_data = typename Base::Extended_filtration_data;
using Boundary_simplex_iterator = typename Base::Boundary_simplex_iterator;
+ using Siblings = typename Base::Siblings;
+ using Node = typename Base::Node;
typedef bool (*blocker_func_t)(Simplex simplex, void *user_data);
public:
@@ -62,6 +64,30 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
return (result.second);
}
+ void insert_matrix(double* filtrations, int n, int stride0, int stride1, double max_filtration) {
+ // We could delegate to insert_graph, but wrapping the matrix in a graph interface is too much work,
+ // and this is a bit more efficient.
+ auto& rm = this->root()->members_;
+ for(int i=0; i<n; ++i) {
+ char* p = reinterpret_cast<char*>(filtrations) + i * stride0;
+ double fv = *reinterpret_cast<double*>(p + i * stride1);
+ if(fv > max_filtration) continue;
+ auto sh = rm.emplace_hint(rm.end(), i, Node(this->root(), fv));
+ Siblings* children = nullptr;
+ // Should we make a first pass to count the number of edges so we can reserve the right space?
+ for(int j=i+1; j<n; ++j) {
+ double fe = *reinterpret_cast<double*>(p + j * stride1);
+ if(fe > max_filtration) continue;
+ if(!children) {
+ children = new Siblings(this->root(), i);
+ sh->second.assign_children(children);
+ }
+ children->members().emplace_hint(children->members().end(), j, Node(children, fe));
+ }
+ }
+
+ }
+
// Do not interface this function, only used in alpha complex interface for complex creation
bool insert_simplex(const Simplex& simplex, Filtration_value filtration = 0) {
Insertion_result result = Base::insert_simplex(simplex, filtration);
diff --git a/src/python/setup.py.in b/src/python/setup.py.in
index 1ecbe985..6eb0db42 100644
--- a/src/python/setup.py.in
+++ b/src/python/setup.py.in
@@ -48,8 +48,6 @@ ext_modules = cythonize(ext_modules, compiler_directives={'language_level': '3'}
for module in pybind11_modules:
my_include_dirs = include_dirs + [pybind11.get_include(False), pybind11.get_include(True)]
- if module.startswith('hera/'):
- my_include_dirs = ['@HERA_INCLUDE_DIR@'] + my_include_dirs
ext_modules.append(Extension(
'gudhi.' + module.replace('/', '.'),
sources = [source_dir + module + '.cc'],
diff --git a/src/python/test/test_persistence_graphical_tools.py b/src/python/test/test_persistence_graphical_tools.py
index c19836b7..0e2ac3f8 100644
--- a/src/python/test/test_persistence_graphical_tools.py
+++ b/src/python/test/test_persistence_graphical_tools.py
@@ -12,6 +12,7 @@ import gudhi as gd
import numpy as np
import matplotlib as plt
import pytest
+import warnings
def test_array_handler():
@@ -71,13 +72,13 @@ def test_limit_to_max_intervals():
(0, (0.0, 0.106382)),
]
# check no warnings if max_intervals equals to the diagrams number
- with pytest.warns(None) as record:
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
truncated_diags = gd.persistence_graphical_tools._limit_to_max_intervals(
diags, 10, key=lambda life_time: life_time[1][1] - life_time[1][0]
)
# check diagrams are not sorted
assert truncated_diags == diags
- assert len(record) == 0
# check warning if max_intervals lower than the diagrams number
with pytest.warns(UserWarning) as record:
diff --git a/src/python/test/test_perslay.py b/src/python/test/test_perslay.py
new file mode 100644
index 00000000..06497712
--- /dev/null
+++ b/src/python/test/test_perslay.py
@@ -0,0 +1,147 @@
+import numpy as np
+import tensorflow as tf
+from sklearn.preprocessing import MinMaxScaler
+from gudhi.tensorflow.perslay import *
+import gudhi.representations as gdr
+
+def test_gaussian_perslay():
+
+ diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])]
+ diagrams = gdr.DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diagrams)
+ diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32))
+
+ rho = tf.identity
+ phi = GaussianPerslayPhi((5, 5), ((-.5, 1.5), (-.5, 1.5)), .1)
+ weight = PowerPerslayWeight(1.,0.)
+ perm_op = tf.math.reduce_sum
+
+ perslay = Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho)
+ vectors = perslay(diagrams)
+
+ print(vectors.shape)
+
+ assert np.linalg.norm(vectors.numpy() - np.array(
+[[[[1.7266072e-16],
+ [4.1706043e-09],
+ [1.1336876e-08],
+ [8.5738821e-12],
+ [2.1243891e-14]],
+
+ [[4.1715076e-09],
+ [1.0074080e-01],
+ [2.7384272e-01],
+ [3.0724244e-02],
+ [7.6157507e-05]],
+
+ [[8.0382870e-06],
+ [1.5802664e+00],
+ [8.2997030e-01],
+ [1.2395413e+01],
+ [3.0724116e-02]],
+
+ [[8.0269419e-06],
+ [1.3065740e+00],
+ [9.0923014e+00],
+ [6.1664842e-02],
+ [1.3949171e-06]],
+
+ [[9.0331329e-13],
+ [1.4954816e-07],
+ [1.5145997e-04],
+ [1.0205092e-06],
+ [7.8093526e-16]]]]) <= 1e-7)
+
+test_gaussian_perslay()
+
+def test_tent_perslay():
+
+ diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])]
+ diagrams = gdr.DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diagrams)
+ diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32))
+
+ rho = tf.identity
+ phi = TentPerslayPhi(np.array(np.arange(-1.,2.,.1), dtype=np.float32))
+ weight = PowerPerslayWeight(1.,0.)
+ perm_op = 'top3'
+
+ perslay = Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho)
+ vectors = perslay(diagrams)
+
+ assert np.linalg.norm(vectors-np.array([[0., 0., 0., 0., 0., 0.,
+ 0., 0., 0., 0., 0., 0.,
+ 0., 0., 0., 0., 0., 0.,
+ 0., 0., 0., 0., 0., 0.,
+ 0., 0., 0., 0., 0., 0.,
+ 0., 0., 0., 0.09999999, 0., 0.,
+ 0.2, 0.05, 0., 0.19999999, 0., 0.,
+ 0.09999999, 0.02500001, 0., 0.125, 0., 0.,
+ 0.22500002, 0., 0., 0.3, 0., 0.,
+ 0.19999999, 0.05000001, 0., 0.10000002, 0.10000002, 0.,
+ 0., 0., 0., 0., 0., 0.,
+ 0., 0., 0., 0., 0., 0.,
+ 0., 0., 0., 0., 0., 0.,
+ 0., 0., 0., 0., 0., 0.,
+ 0., 0., 0., 0., 0., 0. ]])) <= 1e-7
+
+def test_flat_perslay():
+
+ diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])]
+ diagrams = gdr.DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diagrams)
+ diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32))
+
+ rho = tf.identity
+ phi = FlatPerslayPhi(np.array(np.arange(-1.,2.,.1), dtype=np.float32), 100.)
+ weight = PowerPerslayWeight(1.,0.)
+ perm_op = tf.math.reduce_sum
+
+ perslay = Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho)
+ vectors = perslay(diagrams)
+
+ assert np.linalg.norm(vectors-np.array([[0.0000000e+00, 0.0000000e+00, 1.8048651e-35, 3.9754645e-31, 8.7565101e-27,
+ 1.9287571e-22, 4.2483860e-18, 9.3576392e-14, 2.0611652e-09, 4.5398087e-05,
+ 5.0000376e-01, 1.0758128e+00, 1.9933071e+00, 1.0072457e+00, 1.9240967e+00,
+ 1.4999963e+00, 1.0000458e+00, 1.0066929e+00, 1.9933071e+00, 1.9999092e+00,
+ 1.0000000e+00, 9.0795562e-05, 4.1222914e-09, 1.8715316e-13, 8.4967405e-18,
+ 3.8574998e-22, 1.7512956e-26, 7.9508388e-31, 3.6097302e-35, 0.0000000e+00]]) <= 1e-7)
+
+def test_gmix_weight():
+
+ diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])]
+ diagrams = gdr.DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diagrams)
+ diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32))
+
+ rho = tf.identity
+ phi = FlatPerslayPhi(np.array(np.arange(-1.,2.,.1), dtype=np.float32), 100.)
+ weight = GaussianMixturePerslayWeight(np.array([[.5],[.5],[5],[5]], dtype=np.float32))
+ perm_op = tf.math.reduce_sum
+
+ perslay = Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho)
+ vectors = perslay(diagrams)
+
+ assert np.linalg.norm(vectors-np.array([[0.0000000e+00, 0.0000000e+00, 1.7869064e-35, 3.9359080e-31, 8.6693818e-27,
+ 1.9095656e-22, 4.2061142e-18, 9.2645292e-14, 2.0406561e-09, 4.4946366e-05,
+ 4.9502861e-01, 1.0652492e+00, 1.9753191e+00, 9.9723548e-01, 1.9043801e+00,
+ 1.4844525e+00, 9.8947650e-01, 9.9604094e-01, 1.9703994e+00, 1.9769192e+00,
+ 9.8850453e-01, 8.9751818e-05, 4.0749040e-09, 1.8500175e-13, 8.3990662e-18,
+ 3.8131562e-22, 1.7311636e-26, 7.8594399e-31, 3.5682349e-35, 0.0000000e+00]]) <= 1e-7)
+
+def test_grid_weight():
+
+ diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])]
+ diagrams = gdr.DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diagrams)
+ diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32))
+
+ rho = tf.identity
+ phi = FlatPerslayPhi(np.array(np.arange(-1.,2.,.1), dtype=np.float32), 100.)
+ weight = GridPerslayWeight(np.array(np.random.uniform(size=[100,100]),dtype=np.float32),((-0.01, 1.01),(-0.01, 1.01)))
+ perm_op = tf.math.reduce_sum
+
+ perslay = Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho)
+ vectors = perslay(diagrams)
+
+ assert np.linalg.norm(vectors-np.array([[0.0000000e+00, 0.0000000e+00, 1.5124093e-37, 3.3314498e-33, 7.3379791e-29,
+ 1.6163036e-24, 3.5601592e-20, 7.8417273e-16, 1.7272621e-11, 3.8043717e-07,
+ 4.1902456e-03, 1.7198652e-02, 1.2386327e-01, 9.2694648e-03, 1.9515079e-01,
+ 2.0629172e-01, 2.0210314e-01, 2.0442720e-01, 5.4709727e-01, 5.4939687e-01,
+ 2.7471092e-01, 2.4942532e-05, 1.1324385e-09, 5.1413016e-14, 2.3341474e-18,
+ 1.0596973e-22, 4.8110000e-27, 2.1841823e-31, 9.9163230e-36, 0.0000000e+00]]) <= 1e-7)
diff --git a/src/python/test/test_representations.py b/src/python/test/test_representations.py
index 58caab21..f4ffbdc1 100755
--- a/src/python/test/test_representations.py
+++ b/src/python/test/test_representations.py
@@ -161,7 +161,7 @@ def test_entropy_miscalculation():
return -np.dot(l, np.log(l))
sce = Entropy(mode="scalar")
assert [[pe(diag_ex)]] == sce.fit_transform([diag_ex])
- sce = Entropy(mode="vector", resolution=4, normalized=False)
+ sce = Entropy(mode="vector", resolution=4, normalized=False, keep_endpoints=True)
pef = [-1/4*np.log(1/4)-1/4*np.log(1/4)-1/2*np.log(1/2),
-1/4*np.log(1/4)-1/4*np.log(1/4)-1/2*np.log(1/2),
-1/2*np.log(1/2),
@@ -170,7 +170,7 @@ def test_entropy_miscalculation():
sce = Entropy(mode="vector", resolution=4, normalized=True)
pefN = (sce.fit_transform([diag_ex]))[0]
area = np.linalg.norm(pefN, ord=1)
- assert area==1
+ assert area==pytest.approx(1)
def test_kernel_empty_diagrams():
empty_diag = np.empty(shape = [0, 2])
@@ -249,5 +249,21 @@ def test_landscape_nan_range():
dgm = np.array([[2., 6.], [3., 5.]])
lds = Landscape(num_landscapes=2, resolution=9, sample_range=[np.nan, 6.])
lds_dgm = lds(dgm)
- assert (lds.sample_range[0] == 2) & (lds.sample_range[1] == 6)
+ assert (lds.sample_range_fixed[0] == 2) & (lds.sample_range_fixed[1] == 6)
assert lds.new_resolution == 10
+
+def test_endpoints():
+ diags = [ np.array([[2., 3.]]) ]
+ for vec in [ Landscape(), Silhouette(), BettiCurve(), Entropy(mode="vector") ]:
+ vec.fit(diags)
+ assert vec.grid_[0] > 2 and vec.grid_[-1] < 3
+ for vec in [ Landscape(keep_endpoints=True), Silhouette(keep_endpoints=True), BettiCurve(keep_endpoints=True), Entropy(mode="vector", keep_endpoints=True)]:
+ vec.fit(diags)
+ assert vec.grid_[0] == 2 and vec.grid_[-1] == 3
+ vec = BettiCurve(resolution=None)
+ vec.fit(diags)
+ assert np.equal(vec.grid_, [-np.inf, 2., 3.]).all()
+
+def test_get_params():
+ for vec in [ Landscape(), Silhouette(), BettiCurve(), Entropy(mode="vector") ]:
+ vec.get_params()
diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py
index 54bafed5..2ccbfbf5 100755
--- a/src/python/test/test_simplex_tree.py
+++ b/src/python/test/test_simplex_tree.py
@@ -249,6 +249,7 @@ def test_make_filtration_non_decreasing():
assert st.filtration([3, 4]) == 2.0
assert st.filtration([4, 5]) == 2.0
+
def test_extend_filtration():
# Inserted simplex:
@@ -257,86 +258,87 @@ def test_extend_filtration():
# / \ /
# o o
# /2\ /3
- # o o
- # 1 0
-
- st = SimplexTree()
- st.insert([0,2])
- st.insert([1,2])
- st.insert([0,3])
- st.insert([2,5])
- st.insert([3,4])
- st.insert([3,5])
- st.assign_filtration([0], 1.)
- st.assign_filtration([1], 2.)
- st.assign_filtration([2], 3.)
- st.assign_filtration([3], 4.)
- st.assign_filtration([4], 5.)
- st.assign_filtration([5], 6.)
-
- assert list(st.get_filtration()) == [
- ([0, 2], 0.0),
- ([1, 2], 0.0),
- ([0, 3], 0.0),
- ([3, 4], 0.0),
- ([2, 5], 0.0),
- ([3, 5], 0.0),
- ([0], 1.0),
- ([1], 2.0),
- ([2], 3.0),
- ([3], 4.0),
- ([4], 5.0),
- ([5], 6.0)
+ # o o
+ # 1 0
+
+ st = SimplexTree()
+ st.insert([0, 2])
+ st.insert([1, 2])
+ st.insert([0, 3])
+ st.insert([2, 5])
+ st.insert([3, 4])
+ st.insert([3, 5])
+ st.assign_filtration([0], 1.0)
+ st.assign_filtration([1], 2.0)
+ st.assign_filtration([2], 3.0)
+ st.assign_filtration([3], 4.0)
+ st.assign_filtration([4], 5.0)
+ st.assign_filtration([5], 6.0)
+
+ assert list(st.get_filtration()) == [
+ ([0, 2], 0.0),
+ ([1, 2], 0.0),
+ ([0, 3], 0.0),
+ ([3, 4], 0.0),
+ ([2, 5], 0.0),
+ ([3, 5], 0.0),
+ ([0], 1.0),
+ ([1], 2.0),
+ ([2], 3.0),
+ ([3], 4.0),
+ ([4], 5.0),
+ ([5], 6.0),
]
-
+
st.extend_filtration()
-
- assert list(st.get_filtration()) == [
- ([6], -3.0),
- ([0], -2.0),
- ([1], -1.8),
- ([2], -1.6),
- ([0, 2], -1.6),
- ([1, 2], -1.6),
- ([3], -1.4),
- ([0, 3], -1.4),
- ([4], -1.2),
- ([3, 4], -1.2),
- ([5], -1.0),
- ([2, 5], -1.0),
- ([3, 5], -1.0),
- ([5, 6], 1.0),
- ([4, 6], 1.2),
- ([3, 6], 1.4),
+
+ assert list(st.get_filtration()) == [
+ ([6], -3.0),
+ ([0], -2.0),
+ ([1], -1.8),
+ ([2], -1.6),
+ ([0, 2], -1.6),
+ ([1, 2], -1.6),
+ ([3], -1.4),
+ ([0, 3], -1.4),
+ ([4], -1.2),
+ ([3, 4], -1.2),
+ ([5], -1.0),
+ ([2, 5], -1.0),
+ ([3, 5], -1.0),
+ ([5, 6], 1.0),
+ ([4, 6], 1.2),
+ ([3, 6], 1.4),
([3, 4, 6], 1.4),
- ([3, 5, 6], 1.4),
- ([2, 6], 1.6),
- ([2, 5, 6], 1.6),
- ([1, 6], 1.8),
- ([1, 2, 6], 1.8),
- ([0, 6], 2.0),
- ([0, 2, 6], 2.0),
- ([0, 3, 6], 2.0)
+ ([3, 5, 6], 1.4),
+ ([2, 6], 1.6),
+ ([2, 5, 6], 1.6),
+ ([1, 6], 1.8),
+ ([1, 2, 6], 1.8),
+ ([0, 6], 2.0),
+ ([0, 2, 6], 2.0),
+ ([0, 3, 6], 2.0),
]
- dgms = st.extended_persistence(min_persistence=-1.)
+ dgms = st.extended_persistence(min_persistence=-1.0)
assert len(dgms) == 4
# Sort by (death-birth) descending - we are only interested in those with the longest life span
for idx in range(4):
- dgms[idx] = sorted(dgms[idx], key=lambda x:(-abs(x[1][0]-x[1][1])))
+ dgms[idx] = sorted(dgms[idx], key=lambda x: (-abs(x[1][0] - x[1][1])))
+
+ assert dgms[0][0][1][0] == pytest.approx(2.0)
+ assert dgms[0][0][1][1] == pytest.approx(3.0)
+ assert dgms[1][0][1][0] == pytest.approx(5.0)
+ assert dgms[1][0][1][1] == pytest.approx(4.0)
+ assert dgms[2][0][1][0] == pytest.approx(1.0)
+ assert dgms[2][0][1][1] == pytest.approx(6.0)
+ assert dgms[3][0][1][0] == pytest.approx(6.0)
+ assert dgms[3][0][1][1] == pytest.approx(1.0)
- assert dgms[0][0][1][0] == pytest.approx(2.)
- assert dgms[0][0][1][1] == pytest.approx(3.)
- assert dgms[1][0][1][0] == pytest.approx(5.)
- assert dgms[1][0][1][1] == pytest.approx(4.)
- assert dgms[2][0][1][0] == pytest.approx(1.)
- assert dgms[2][0][1][1] == pytest.approx(6.)
- assert dgms[3][0][1][0] == pytest.approx(6.)
- assert dgms[3][0][1][1] == pytest.approx(1.)
def test_simplices_iterator():
st = SimplexTree()
-
+
assert st.insert([0, 1, 2], filtration=4.0) == True
assert st.insert([2, 3, 4], filtration=2.0) == True
@@ -346,9 +348,10 @@ def test_simplices_iterator():
print("filtration is: ", simplex[1])
assert st.filtration(simplex[0]) == simplex[1]
+
def test_collapse_edges():
st = SimplexTree()
-
+
assert st.insert([0, 1], filtration=1.0) == True
assert st.insert([1, 2], filtration=1.0) == True
assert st.insert([2, 3], filtration=1.0) == True
@@ -360,31 +363,33 @@ def test_collapse_edges():
st.collapse_edges()
assert st.num_simplices() == 9
- assert st.find([0, 2]) == False # [1, 3] would be fine as well
+ assert st.find([0, 2]) == False # [1, 3] would be fine as well
for simplex in st.get_skeleton(0):
- assert simplex[1] == 1.
+ assert simplex[1] == 1.0
+
def test_reset_filtration():
st = SimplexTree()
-
- assert st.insert([0, 1, 2], 3.) == True
- assert st.insert([0, 3], 2.) == True
- assert st.insert([3, 4, 5], 3.) == True
- assert st.insert([0, 1, 6, 7], 4.) == True
+
+ assert st.insert([0, 1, 2], 3.0) == True
+ assert st.insert([0, 3], 2.0) == True
+ assert st.insert([3, 4, 5], 3.0) == True
+ assert st.insert([0, 1, 6, 7], 4.0) == True
# Guaranteed by construction
for simplex in st.get_simplices():
- assert st.filtration(simplex[0]) >= 2.
-
+ assert st.filtration(simplex[0]) >= 2.0
+
# dimension until 5 even if simplex tree is of dimension 3 to test the limits
for dimension in range(5, -1, -1):
- st.reset_filtration(0., dimension)
+ st.reset_filtration(0.0, dimension)
for simplex in st.get_skeleton(3):
print(simplex)
if len(simplex[0]) < (dimension) + 1:
- assert st.filtration(simplex[0]) >= 2.
+ assert st.filtration(simplex[0]) >= 2.0
else:
- assert st.filtration(simplex[0]) == 0.
+ assert st.filtration(simplex[0]) == 0.0
+
def test_boundaries_iterator():
st = SimplexTree()
@@ -400,16 +405,17 @@ def test_boundaries_iterator():
list(st.get_boundaries([]))
with pytest.raises(RuntimeError):
- list(st.get_boundaries([0, 4])) # (0, 4) does not exist
+ list(st.get_boundaries([0, 4])) # (0, 4) does not exist
with pytest.raises(RuntimeError):
- list(st.get_boundaries([6])) # (6) does not exist
+ list(st.get_boundaries([6])) # (6) does not exist
+
def test_persistence_intervals_in_dimension():
# Here is our triangulation of a 2-torus - taken from https://dioscuri-tda.org/Paris_TDA_Tutorial_2021.html
# 0-----3-----4-----0
# | \ | \ | \ | \ |
- # | \ | \ | \| \ |
+ # | \ | \ | \| \ |
# 1-----8-----7-----1
# | \ | \ | \ | \ |
# | \ | \ | \ | \ |
@@ -418,50 +424,52 @@ def test_persistence_intervals_in_dimension():
# | \ | \ | \ | \ |
# 0-----3-----4-----0
st = SimplexTree()
- st.insert([0,1,8])
- st.insert([0,3,8])
- st.insert([3,7,8])
- st.insert([3,4,7])
- st.insert([1,4,7])
- st.insert([0,1,4])
- st.insert([1,2,5])
- st.insert([1,5,8])
- st.insert([5,6,8])
- st.insert([6,7,8])
- st.insert([2,6,7])
- st.insert([1,2,7])
- st.insert([0,2,3])
- st.insert([2,3,5])
- st.insert([3,4,5])
- st.insert([4,5,6])
- st.insert([0,4,6])
- st.insert([0,2,6])
+ st.insert([0, 1, 8])
+ st.insert([0, 3, 8])
+ st.insert([3, 7, 8])
+ st.insert([3, 4, 7])
+ st.insert([1, 4, 7])
+ st.insert([0, 1, 4])
+ st.insert([1, 2, 5])
+ st.insert([1, 5, 8])
+ st.insert([5, 6, 8])
+ st.insert([6, 7, 8])
+ st.insert([2, 6, 7])
+ st.insert([1, 2, 7])
+ st.insert([0, 2, 3])
+ st.insert([2, 3, 5])
+ st.insert([3, 4, 5])
+ st.insert([4, 5, 6])
+ st.insert([0, 4, 6])
+ st.insert([0, 2, 6])
st.compute_persistence(persistence_dim_max=True)
-
+
H0 = st.persistence_intervals_in_dimension(0)
- assert np.array_equal(H0, np.array([[ 0., float("inf")]]))
+ assert np.array_equal(H0, np.array([[0.0, float("inf")]]))
H1 = st.persistence_intervals_in_dimension(1)
- assert np.array_equal(H1, np.array([[ 0., float("inf")], [ 0., float("inf")]]))
+ assert np.array_equal(H1, np.array([[0.0, float("inf")], [0.0, float("inf")]]))
H2 = st.persistence_intervals_in_dimension(2)
- assert np.array_equal(H2, np.array([[ 0., float("inf")]]))
+ assert np.array_equal(H2, np.array([[0.0, float("inf")]]))
# Test empty case
assert st.persistence_intervals_in_dimension(3).shape == (0, 2)
+
def test_equality_operator():
st1 = SimplexTree()
st2 = SimplexTree()
assert st1 == st2
- st1.insert([1,2,3], 4.)
+ st1.insert([1, 2, 3], 4.0)
assert st1 != st2
- st2.insert([1,2,3], 4.)
+ st2.insert([1, 2, 3], 4.0)
assert st1 == st2
+
def test_simplex_tree_deep_copy():
st = SimplexTree()
- st.insert([1, 2, 3], 0.)
+ st.insert([1, 2, 3], 0.0)
# compute persistence only on the original
st.compute_persistence()
@@ -480,14 +488,15 @@ def test_simplex_tree_deep_copy():
for a_splx in a_filt_list:
assert a_splx in st_filt_list
-
+
# test double free
del st
del st_copy
+
def test_simplex_tree_deep_copy_constructor():
st = SimplexTree()
- st.insert([1, 2, 3], 0.)
+ st.insert([1, 2, 3], 0.0)
# compute persistence only on the original
st.compute_persistence()
@@ -506,56 +515,132 @@ def test_simplex_tree_deep_copy_constructor():
for a_splx in a_filt_list:
assert a_splx in st_filt_list
-
+
# test double free
del st
del st_copy
+
def test_simplex_tree_constructor_exception():
with pytest.raises(TypeError):
- st = SimplexTree(other = "Construction from a string shall raise an exception")
+ st = SimplexTree(other="Construction from a string shall raise an exception")
+
+
+def test_create_from_array():
+ a = np.array([[1, 4, 13, 6], [4, 3, 11, 5], [13, 11, 10, 12], [6, 5, 12, 2]])
+ st = SimplexTree.create_from_array(a, max_filtration=5.0)
+ assert list(st.get_filtration()) == [([0], 1.0), ([3], 2.0), ([1], 3.0), ([0, 1], 4.0), ([1, 3], 5.0)]
+
+
+def test_insert_edges_from_coo_matrix():
+ try:
+ from scipy.sparse import coo_matrix
+ from scipy.spatial import cKDTree
+ except ImportError:
+ print("Skipping, no SciPy")
+ return
+
+ st = SimplexTree()
+ st.insert([1, 2, 7], 7)
+ row = np.array([2, 5, 3])
+ col = np.array([1, 4, 6])
+ dat = np.array([1, 2, 3])
+ edges = coo_matrix((dat, (row, col)))
+ st.insert_edges_from_coo_matrix(edges)
+ assert list(st.get_filtration()) == [
+ ([1], 1.0),
+ ([2], 1.0),
+ ([1, 2], 1.0),
+ ([4], 2.0),
+ ([5], 2.0),
+ ([4, 5], 2.0),
+ ([3], 3.0),
+ ([6], 3.0),
+ ([3, 6], 3.0),
+ ([7], 7.0),
+ ([1, 7], 7.0),
+ ([2, 7], 7.0),
+ ([1, 2, 7], 7.0),
+ ]
+
+ pts = np.random.rand(100, 2)
+ tree = cKDTree(pts)
+ edges = tree.sparse_distance_matrix(tree, max_distance=0.15, output_type="coo_matrix")
+ st = SimplexTree()
+ st.insert_edges_from_coo_matrix(edges)
+ assert 100 < st.num_simplices() < 1000
+
+
+def test_insert_batch():
+ st = SimplexTree()
+ # vertices
+ st.insert_batch(np.array([[6, 1, 5]]), np.array([-5.0, 2.0, -3.0]))
+ # triangles
+ st.insert_batch(np.array([[2, 10], [5, 0], [6, 11]]), np.array([4.0, 0.0]))
+ # edges
+ st.insert_batch(np.array([[1, 5], [2, 5]]), np.array([1.0, 3.0]))
+
+ assert list(st.get_filtration()) == [
+ ([6], -5.0),
+ ([5], -3.0),
+ ([0], 0.0),
+ ([10], 0.0),
+ ([0, 10], 0.0),
+ ([11], 0.0),
+ ([0, 11], 0.0),
+ ([10, 11], 0.0),
+ ([0, 10, 11], 0.0),
+ ([1], 1.0),
+ ([2], 1.0),
+ ([1, 2], 1.0),
+ ([2, 5], 4.0),
+ ([2, 6], 4.0),
+ ([5, 6], 4.0),
+ ([2, 5, 6], 4.0),
+ ]
+
def test_expansion_with_blocker():
- st=SimplexTree()
- st.insert([0,1],0)
- st.insert([0,2],1)
- st.insert([0,3],2)
- st.insert([1,2],3)
- st.insert([1,3],4)
- st.insert([2,3],5)
- st.insert([2,4],6)
- st.insert([3,6],7)
- st.insert([4,5],8)
- st.insert([4,6],9)
- st.insert([5,6],10)
- st.insert([6],10)
+ st = SimplexTree()
+ st.insert([0, 1], 0)
+ st.insert([0, 2], 1)
+ st.insert([0, 3], 2)
+ st.insert([1, 2], 3)
+ st.insert([1, 3], 4)
+ st.insert([2, 3], 5)
+ st.insert([2, 4], 6)
+ st.insert([3, 6], 7)
+ st.insert([4, 5], 8)
+ st.insert([4, 6], 9)
+ st.insert([5, 6], 10)
+ st.insert([6], 10)
def blocker(simplex):
try:
# Block all simplices that contain vertex 6
simplex.index(6)
- print(simplex, ' is blocked')
+ print(simplex, " is blocked")
return True
except ValueError:
- print(simplex, ' is accepted')
- st.assign_filtration(simplex, st.filtration(simplex) + 1.)
+ print(simplex, " is accepted")
+ st.assign_filtration(simplex, st.filtration(simplex) + 1.0)
return False
st.expansion_with_blocker(2, blocker)
assert st.num_simplices() == 22
assert st.dimension() == 2
- assert st.find([4,5,6]) == False
- assert st.filtration([0,1,2]) == 4.
- assert st.filtration([0,1,3]) == 5.
- assert st.filtration([0,2,3]) == 6.
- assert st.filtration([1,2,3]) == 6.
+ assert st.find([4, 5, 6]) == False
+ assert st.filtration([0, 1, 2]) == 4.0
+ assert st.filtration([0, 1, 3]) == 5.0
+ assert st.filtration([0, 2, 3]) == 6.0
+ assert st.filtration([1, 2, 3]) == 6.0
st.expansion_with_blocker(3, blocker)
assert st.num_simplices() == 23
assert st.dimension() == 3
- assert st.find([4,5,6]) == False
- assert st.filtration([0,1,2]) == 4.
- assert st.filtration([0,1,3]) == 5.
- assert st.filtration([0,2,3]) == 6.
- assert st.filtration([1,2,3]) == 6.
- assert st.filtration([0,1,2,3]) == 7.
+ assert st.find([4, 5, 6]) == False
+ assert st.filtration([0, 1, 2]) == 4.0
+ assert st.filtration([0, 1, 3]) == 5.0
+ assert st.filtration([0, 2, 3]) == 6.0
+ assert st.filtration([1, 2, 3]) == 6.0
+ assert st.filtration([0, 1, 2, 3]) == 7.0
diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py
index 72514099..42bf3299 100755
--- a/src/python/test/test_wasserstein_distance.py
+++ b/src/python/test/test_wasserstein_distance.py
@@ -90,10 +90,11 @@ def test_get_essential_parts():
def test_warn_infty():
- assert _warn_infty(matching=False)==np.inf
- c, m = _warn_infty(matching=True)
- assert (c == np.inf)
- assert (m is None)
+ with pytest.warns(UserWarning):
+ assert _warn_infty(matching=False)==np.inf
+ c, m = _warn_infty(matching=True)
+ assert (c == np.inf)
+ assert (m is None)
def _to_set(X):