summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.circleci/config.yml5
-rw-r--r--.github/for_maintainers/new_gudhi_version_creation.md11
-rw-r--r--.github/next_release.md12
-rw-r--r--.github/test-requirements.txt1
-rw-r--r--CMakeGUDHIVersion.txt4
-rw-r--r--Dockerfile_for_circleci_image1
-rw-r--r--Dockerfile_gudhi_installation6
-rw-r--r--azure-pipelines.yml43
-rw-r--r--biblio/bibliography.bib63
-rw-r--r--src/Alpha_complex/concept/SimplicialComplexForAlpha.h18
-rw-r--r--src/Alpha_complex/include/gudhi/Alpha_complex.h122
-rw-r--r--src/Alpha_complex/test/Alpha_complex_unit_test.cpp3
-rw-r--r--src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp3
-rw-r--r--src/Alpha_complex/utilities/alpha_complex_persistence.cpp3
-rw-r--r--src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h27
-rw-r--r--src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp6
-rw-r--r--src/Persistent_cohomology/doc/Intro_persistent_cohomology.h2
-rw-r--r--src/Persistent_cohomology/example/custom_persistence_sort.cpp3
-rw-r--r--src/Persistent_cohomology/example/persistence_from_file.cpp3
-rw-r--r--src/Persistent_cohomology/example/plain_homology.cpp3
-rw-r--r--src/Persistent_cohomology/example/rips_multifield_persistence.cpp3
-rw-r--r--src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp3
-rw-r--r--src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp13
-rw-r--r--src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h3
-rw-r--r--src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp3
-rw-r--r--src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp3
-rw-r--r--src/Rips_complex/utilities/rips_persistence.cpp3
-rw-r--r--src/Rips_complex/utilities/sparse_rips_persistence.cpp3
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree.h62
-rw-r--r--src/cmake/modules/GUDHI_third_party_libraries.cmake23
-rw-r--r--src/cmake/modules/GUDHI_user_version_target.cmake6
-rw-r--r--src/common/doc/main_page.md2
-rw-r--r--src/python/CMakeLists.txt189
-rw-r--r--src/python/doc/alpha_complex_sum.inc28
-rw-r--r--src/python/doc/alpha_complex_user.rst51
-rw-r--r--src/python/doc/bottleneck_distance_sum.inc22
-rw-r--r--src/python/doc/bottleneck_distance_user.rst6
-rw-r--r--src/python/doc/cubical_complex_user.rst11
-rw-r--r--src/python/doc/fileformats.rst2
-rw-r--r--src/python/doc/index.rst7
-rw-r--r--src/python/doc/installation.rst90
-rw-r--r--src/python/doc/nerve_gic_complex_ref.rst7
-rw-r--r--src/python/doc/nerve_gic_complex_sum.inc26
-rw-r--r--src/python/doc/nerve_gic_complex_user.rst9
-rw-r--r--src/python/doc/persistence_graphical_tools_sum.inc22
-rw-r--r--src/python/doc/persistence_graphical_tools_user.rst9
-rw-r--r--src/python/doc/persistent_cohomology_sum.inc2
-rw-r--r--src/python/doc/persistent_cohomology_user.rst9
-rw-r--r--src/python/doc/point_cloud.rst21
-rw-r--r--src/python/doc/point_cloud_sum.inc21
-rw-r--r--src/python/doc/representations.rst2
-rw-r--r--src/python/doc/representations_sum.inc22
-rw-r--r--src/python/doc/rips_complex_ref.rst13
-rw-r--r--src/python/doc/rips_complex_sum.inc3
-rw-r--r--src/python/doc/rips_complex_user.rst54
-rw-r--r--src/python/doc/simplex_tree_ref.rst1
-rw-r--r--src/python/doc/simplex_tree_user.rst7
-rw-r--r--src/python/doc/tangential_complex_sum.inc22
-rw-r--r--src/python/doc/tangential_complex_user.rst8
-rw-r--r--src/python/doc/wasserstein_distance_sum.inc20
-rw-r--r--src/python/doc/wasserstein_distance_user.rst22
-rw-r--r--src/python/doc/witness_complex_sum.inc28
-rw-r--r--src/python/doc/witness_complex_user.rst7
-rw-r--r--src/python/doc/zbibliography.rst10
-rwxr-xr-xsrc/python/example/alpha_complex_from_points_example.py3
-rwxr-xr-xsrc/python/example/alpha_rips_persistence_bottleneck_distance.py12
-rwxr-xr-xsrc/python/example/diagram_vectorizations_distances_kernels.py98
-rwxr-xr-xsrc/python/example/simplex_tree_example.py1
-rw-r--r--src/python/gudhi/alpha_complex.pyx17
-rw-r--r--src/python/gudhi/bottleneck.cc50
-rw-r--r--src/python/gudhi/bottleneck.pyx48
-rw-r--r--src/python/gudhi/cubical_complex.pyx152
-rw-r--r--src/python/gudhi/hera.cc27
-rw-r--r--src/python/gudhi/periodic_cubical_complex.pyx156
-rw-r--r--src/python/gudhi/persistence_graphical_tools.py30
-rw-r--r--src/python/gudhi/point_cloud/dtm.py179
-rw-r--r--src/python/gudhi/point_cloud/knn.py332
-rw-r--r--src/python/gudhi/point_cloud/timedelay.py5
-rw-r--r--src/python/gudhi/representations/kernel_methods.py183
-rw-r--r--src/python/gudhi/representations/metrics.py385
-rw-r--r--src/python/gudhi/representations/preprocessing.py60
-rw-r--r--src/python/gudhi/representations/vector_methods.py84
-rw-r--r--src/python/gudhi/rips_complex.pyx17
-rw-r--r--src/python/gudhi/simplex_tree.pxd87
-rw-r--r--src/python/gudhi/simplex_tree.pyx229
-rw-r--r--src/python/gudhi/wasserstein/barycenter.py49
-rw-r--r--src/python/gudhi/wasserstein/wasserstein.py92
-rw-r--r--src/python/gudhi/weighted_rips_complex.py59
-rw-r--r--src/python/include/Alpha_complex_interface.h1
-rw-r--r--src/python/include/Euclidean_strong_witness_complex_interface.h2
-rw-r--r--src/python/include/Euclidean_witness_complex_interface.h2
-rw-r--r--src/python/include/Nerve_gic_interface.h1
-rw-r--r--src/python/include/Persistent_cohomology_interface.h194
-rw-r--r--src/python/include/Rips_complex_interface.h1
-rw-r--r--src/python/include/Simplex_tree_interface.h21
-rw-r--r--src/python/include/Strong_witness_complex_interface.h2
-rw-r--r--src/python/include/Tangential_complex_interface.h1
-rw-r--r--src/python/include/Witness_complex_interface.h2
-rw-r--r--src/python/include/pybind11_diagram_utils.h39
-rw-r--r--src/python/setup.py.in29
-rwxr-xr-xsrc/python/test/test_cubical_complex.py27
-rwxr-xr-xsrc/python/test/test_dtm.py89
-rwxr-xr-xsrc/python/test/test_knn.py130
-rwxr-xr-xsrc/python/test/test_simplex_generators.py64
-rwxr-xr-xsrc/python/test/test_simplex_tree.py3
-rwxr-xr-xsrc/python/test/test_wasserstein_distance.py45
-rw-r--r--src/python/test/test_weighted_rips.py63
107 files changed, 3069 insertions, 1219 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 4f86cb12..40ddc08e 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -45,7 +45,6 @@ jobs:
python:
docker:
- image: gudhi/ci_for_gudhi:latest
- parallelism: 4
steps:
- checkout
- run:
@@ -62,12 +61,12 @@ jobs:
cd build;
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..;
cd python;
- python3 setup.py build_ext -j 4 --inplace;
+ python3 setup.py build_ext -j 2 --inplace;
make sphinx;
cp -R sphinx /tmp/sphinx;
python3 setup.py install;
python3 setup.py clean --all;
- ctest -j 4 --output-on-failure;
+ ctest -j 2 --output-on-failure;
- store_artifacts:
path: /tmp/sphinx
diff --git a/.github/for_maintainers/new_gudhi_version_creation.md b/.github/for_maintainers/new_gudhi_version_creation.md
index f176d392..86c393a0 100644
--- a/.github/for_maintainers/new_gudhi_version_creation.md
+++ b/.github/for_maintainers/new_gudhi_version_creation.md
@@ -16,8 +16,8 @@ rm -rf data/points/COIL_database/lucky_cat.off_dist data/points/COIL_database/lu
Checkin the modifications, build and test the version:
```bash
-mkdir build
-cd build
+git submodule update --init
+rm -rf build; mkdir build; cd build
cmake -DCGAL_DIR=/your/path/to/CGAL -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_BENCHMARK=ON -DUSER_VERSION_DIR=gudhi.@GUDHI_VERSION@ -DPython_ADDITIONAL_VERSIONS=3 ..
make user_version
date +"%d-%m-%Y-%T" > gudhi.@GUDHI_VERSION@/timestamp.txt
@@ -26,7 +26,7 @@ md5sum gudhi.@GUDHI_VERSION@.tar.gz > md5sum.txt
sha256sum gudhi.@GUDHI_VERSION@.tar.gz > sha256sum.txt
sha512sum gudhi.@GUDHI_VERSION@.tar.gz > sha512sum.txt
-make -j all test
+make -j 4 all && ctest -j 4 --output-on-failure
```
***[Check there are no error]***
@@ -42,7 +42,8 @@ make doxygen 2>&1 | tee dox.log && grep warning dox.log
```bash
cp -R gudhi.@GUDHI_VERSION@/doc/html gudhi.doc.@GUDHI_VERSION@/cpp
cd gudhi.@GUDHI_VERSION@
-rm -rf build; mkdir build; cd build; cmake -DCGAL_DIR=/your/path/to/CGAL -DWITH_GUDHI_EXAMPLE=ON -DPython_ADDITIONAL_VERSIONS=3 ..
+rm -rf build; mkdir build; cd build
+cmake -DCGAL_DIR=/your/path/to/CGAL -DWITH_GUDHI_EXAMPLE=ON -DPython_ADDITIONAL_VERSIONS=3 ..
export LC_ALL=en_US.UTF-8 # cf. bug
make sphinx
```
@@ -55,7 +56,7 @@ cd ../..
tar -czvf gudhi.doc.@GUDHI_VERSION@.tar.gz gudhi.doc.@GUDHI_VERSION@
cd gudhi.@GUDHI_VERSION@/build
-make all test
+make -j 4 all && ctest -j 4 --output-on-failure
```
***[Check there are no error]***
diff --git a/.github/next_release.md b/.github/next_release.md
index 83b98a1c..a2805a55 100644
--- a/.github/next_release.md
+++ b/.github/next_release.md
@@ -1,21 +1,19 @@
We are pleased to announce the release 3.X.X of the GUDHI library.
-As a major new feature, the GUDHI library now offers a Python interface to [Hera](https://bitbucket.org/grey_narn/hera/src/master/) to compute the Wasserstein distance.
-[PyBind11](https://github.com/pybind/pybind11) is now required to build the Python module.
+As a major new feature, the GUDHI library now offers ...
We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz).
-Below is a list of changes made since GUDHI 3.1.1:
+Below is a list of changes made since GUDHI 3.X-1.X-1:
-- [Wassertein distance](https://gudhi.inria.fr/python/latest/wasserstein_distance_user.html)
- - An another implementation comes from Hera (BSD-3-Clause) which is based on [Geometry Helps to Compare Persistence Diagrams](http://doi.acm.org/10.1145/3064175) by Michael Kerber, Dmitriy Morozov, and Arnur Nigmetov.
- - `gudhi.wasserstein.wasserstein_distance` has now an option to return the optimal matching that achieves the distance between the two diagrams.
+- [Module](link)
+ - ...
- [Module](link)
- ...
- Miscellaneous
- - The [list of bugs that were solved since GUDHI-3.1.1](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.2.0+is%3Aclosed) is available on GitHub.
+ - The [list of bugs that were solved since GUDHI-3.X-1.X-1](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.1.1+is%3Aclosed) is available on GitHub.
All modules are distributed under the terms of the MIT license.
However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details.
diff --git a/.github/test-requirements.txt b/.github/test-requirements.txt
index 4f9dcefb..fb1df134 100644
--- a/.github/test-requirements.txt
+++ b/.github/test-requirements.txt
@@ -10,3 +10,4 @@ tensorflow
torch
pykeops
hnswlib
+eagerpy
diff --git a/CMakeGUDHIVersion.txt b/CMakeGUDHIVersion.txt
index 0f827b9e..b9895bb1 100644
--- a/CMakeGUDHIVersion.txt
+++ b/CMakeGUDHIVersion.txt
@@ -1,6 +1,6 @@
set (GUDHI_MAJOR_VERSION 3)
-set (GUDHI_MINOR_VERSION 1)
-set (GUDHI_PATCH_VERSION 1)
+set (GUDHI_MINOR_VERSION 3)
+set (GUDHI_PATCH_VERSION 0.rc1)
set(GUDHI_VERSION ${GUDHI_MAJOR_VERSION}.${GUDHI_MINOR_VERSION}.${GUDHI_PATCH_VERSION})
message(STATUS "GUDHI version : ${GUDHI_VERSION}")
diff --git a/Dockerfile_for_circleci_image b/Dockerfile_for_circleci_image
index 20754e2a..c2e8a8f5 100644
--- a/Dockerfile_for_circleci_image
+++ b/Dockerfile_for_circleci_image
@@ -43,6 +43,7 @@ RUN apt-get install -y make \
python3 \
python3-pip \
python3-tk \
+ python3-grpcio \
libfreetype6-dev \
pkg-config \
curl
diff --git a/Dockerfile_gudhi_installation b/Dockerfile_gudhi_installation
index f9e8813b..461a8a19 100644
--- a/Dockerfile_gudhi_installation
+++ b/Dockerfile_gudhi_installation
@@ -58,9 +58,9 @@ RUN pip3 install \
# apt clean up
RUN apt autoremove && rm -rf /var/lib/apt/lists/*
-RUN curl -LO "https://github.com/GUDHI/gudhi-devel/releases/download/tags%2Fgudhi-release-3.1.1/gudhi.3.1.1.tar.gz" \
-&& tar xf gudhi.3.1.1.tar.gz \
-&& cd gudhi.3.1.1 \
+RUN curl -LO "https://github.com/GUDHI/gudhi-devel/releases/download/tags%2Fgudhi-release-3.2.0/gudhi.3.2.0.tar.gz" \
+&& tar xf gudhi.3.2.0.tar.gz \
+&& cd gudhi.3.2.0 \
&& mkdir build && cd build && cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_PYTHON=OFF -DPython_ADDITIONAL_VERSIONS=3 .. \
&& make all test install \
&& cmake -DWITH_GUDHI_PYTHON=ON . \
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 95b15db2..7b5334a7 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -4,35 +4,34 @@ jobs:
displayName: "Build and test"
timeoutInMinutes: 0
cancelTimeoutInMinutes: 60
-
- strategy:
- matrix:
- macOSrelease:
- imageName: 'macos-10.14'
- CMakeBuildType: Release
- customInstallation: 'brew update && brew install graphviz doxygen boost eigen gmp mpfr tbb cgal'
-
pool:
- vmImage: $(imageName)
-
+ vmImage: macOS-10.14
+ variables:
+ pythonVersion: '3.6'
+ cmakeBuildType: Release
+ customInstallation: 'brew update && brew install graphviz doxygen boost eigen gmp mpfr tbb cgal'
+
steps:
- - task: UsePythonVersion@0
- inputs:
- versionSpec: '3.7'
- architecture: 'x64'
+ - bash: echo "##vso[task.prependpath]$CONDA/bin"
+ displayName: Add conda to PATH
- - script: |
- $(customInstallation)
- git submodule update --init
- python -m pip install --upgrade pip
+ - bash: sudo conda create --yes --quiet --name gudhi_build_env
+ displayName: Create Anaconda environment
+
+ - bash: |
+ source activate gudhi_build_env
+ sudo conda install --yes --quiet --name gudhi_build_env python=$(pythonVersion)
python -m pip install --user -r .github/build-requirements.txt
python -m pip install --user -r .github/test-requirements.txt
+ $(customInstallation)
displayName: 'Install build dependencies'
- - script: |
+ - bash: |
+ source activate gudhi_build_env
+ git submodule update --init
mkdir build
cd build
- cmake -DCMAKE_BUILD_TYPE:STRING=$(CMakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..
- make
+ cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..
+ make -j 4
make doxygen
- ctest -j 8 --output-on-failure -E sphinx # remove sphinx build as it fails
+ ctest -j 4 --output-on-failure -E sphinx # remove sphinx build as it fails
displayName: 'Build, test and documentation generation'
diff --git a/biblio/bibliography.bib b/biblio/bibliography.bib
index caf9a38f..77039ab4 100644
--- a/biblio/bibliography.bib
+++ b/biblio/bibliography.bib
@@ -13,7 +13,9 @@ pages = {1--39},
publisher = {JMLR.org},
title = {{Statistical analysis and parameter selection for Mapper}},
volume = {19},
-year = {2018}
+year = {2018},
+url = {http://jmlr.org/papers/v19/17-291.html},
+doi = {10.5555/3291125.3291137}
}
@inproceedings{Dey13,
@@ -22,6 +24,7 @@ year = {2018}
booktitle = {Proceedings of the Twenty-ninth Annual Symposium on Computational Geometry},
year = {2013},
pages = {107--116},
+ doi = {10.1145/2462356.2462387}
}
@article{Carriere16,
@@ -30,6 +33,7 @@ journal = {Foundations of Computational Mathematics},
number = {6},
pages = {1333--1396},
publisher = {Springer-Verlag},
+doi = {10.1007/s10208-017-9370-z},
title = {{Structure and stability of the one-dimensional Mapper}},
volume = {18},
year = {2017}
@@ -47,6 +51,7 @@ journal = {Foundations of Computational Mathematics},
number = {1},
pages = {79--103},
publisher = {Springer-Verlag},
+doi = {10.1007/s10208-008-9027-z},
title = {{Extending persistence using Poincar{\'{e}} and Lefschetz duality}},
volume = {9},
year = {2009}
@@ -830,6 +835,7 @@ book{hatcher2002algebraic,
number = {4},
year = {2010},
pages = {367-405},
+ doi = {10.1007/s10208-010-9066-0},
ee = {http://dx.doi.org/10.1007/s10208-010-9066-0},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
@@ -925,6 +931,7 @@ language={English}
booktitle = {Symposium on Computational Geometry},
year = {2014},
pages = {345},
+ doi = {10.1145/2582112.2582165},
ee = {http://doi.acm.org/10.1145/2582112.2582165},
bibsource = {DBLP, http://dblp.uni-trier.de}
}
@@ -1208,11 +1215,38 @@ numpages = {11},
location = {Montr\'{e}al, Canada},
series = {NIPS’18}
}
-
+@Article{dtm,
+author={Chazal, Fr{\'e}d{\'e}ric
+and Cohen-Steiner, David
+and M{\'e}rigot, Quentin},
+title={Geometric Inference for Probability Measures},
+journal={Foundations of Computational Mathematics},
+year={2011},
+volume={11},
+number={6},
+pages={733-751},
+abstract={Data often comes in the form of a point cloud sampled from an unknown compact subset of Euclidean space. The general goal of geometric inference is then to recover geometric and topological features (e.g., Betti numbers, normals) of this subset from the approximating point cloud data. It appears that the study of distance functions allows one to address many of these questions successfully. However, one of the main limitations of this framework is that it does not cope well with outliers or with background noise. In this paper, we show how to extend the framework of distance functions to overcome this problem. Replacing compact subsets by measures, we introduce a notion of distance function to a probability distribution in Rd. These functions share many properties with classical distance functions, which make them suitable for inference purposes. In particular, by considering appropriate level sets of these distance functions, we show that it is possible to reconstruct offsets of sampled shapes with topological guarantees even in the presence of outliers. Moreover, in settings where empirical measures are considered, these functions can be easily evaluated, making them of particular practical interest.},
+issn={1615-3383},
+doi={10.1007/s10208-011-9098-0},
+url={https://doi.org/10.1007/s10208-011-9098-0}
+}
+@article{dtmdensity,
+author = "Biau, Gérard and Chazal, Frédéric and Cohen-Steiner, David and Devroye, Luc and Rodríguez, Carlos",
+doi = "10.1214/11-EJS606",
+fjournal = "Electronic Journal of Statistics",
+journal = "Electron. J. Statist.",
+pages = "204--237",
+publisher = "The Institute of Mathematical Statistics and the Bernoulli Society",
+title = "A weighted k-nearest neighbor density estimate for geometric inference",
+url = "https://doi.org/10.1214/11-EJS606",
+volume = "5",
+year = "2011"
+}
@article{turner2014frechet,
title={Fr{\'e}chet means for distributions of persistence diagrams},
author={Turner, Katharine and Mileyko, Yuriy and Mukherjee, Sayan and Harer, John},
journal={Discrete \& Computational Geometry},
+ doi={10.1007/s00454-014-9604-7},
volume={52},
number={1},
pages={44--70},
@@ -1220,6 +1254,31 @@ series = {NIPS’18}
publisher={Springer}
}
+@inproceedings{dtmfiltrations,
+ author = {Hirokazu Anai and
+ Fr{\'{e}}d{\'{e}}ric Chazal and
+ Marc Glisse and
+ Yuichi Ike and
+ Hiroya Inakoshi and
+ Rapha{\"{e}}l Tinarrage and
+ Yuhei Umeda},
+ editor = {Gill Barequet and
+ Yusu Wang},
+ title = {DTM-Based Filtrations},
+ booktitle = {35th International Symposium on Computational Geometry, SoCG 2019,
+ June 18-21, 2019, Portland, Oregon, {USA}},
+ series = {LIPIcs},
+ volume = {129},
+ pages = {58:1--58:15},
+ publisher = {Schloss Dagstuhl - Leibniz-Zentrum f{\"{u}}r Informatik},
+ year = {2019},
+ url = {https://doi.org/10.4230/LIPIcs.SoCG.2019.58},
+ doi = {10.4230/LIPIcs.SoCG.2019.58},
+ timestamp = {Tue, 11 Feb 2020 15:52:14 +0100},
+ biburl = {https://dblp.org/rec/conf/compgeom/AnaiCGIITU19.bib},
+ bibsource = {dblp computer science bibliography, https://dblp.org}
+}
+
@unpublished{edgecollapsesocg2020,
title = {{Edge Collapse and Persistence of Flag Complexes}},
author = {Boissonnat, Jean-Daniel and Pritam, Siddharth},
diff --git a/src/Alpha_complex/concept/SimplicialComplexForAlpha.h b/src/Alpha_complex/concept/SimplicialComplexForAlpha.h
index 1c6c3b0c..c20c3201 100644
--- a/src/Alpha_complex/concept/SimplicialComplexForAlpha.h
+++ b/src/Alpha_complex/concept/SimplicialComplexForAlpha.h
@@ -72,6 +72,24 @@ struct SimplicialComplexForAlpha {
/** \brief Return type of an insertion of a simplex
*/
typedef unspecified Insertion_result_type;
+
+ /** \name Map interface
+ * Conceptually a `std::unordered_map<Simplex_handle,std::size_t>`.
+ * @{ */
+ /** \brief Data stored for each simplex.
+ *
+ * Must be an integer type. */
+ typedef unspecified Simplex_key;
+ /** \brief Returns a constant dummy number that is either negative,
+ * or at least as large as the number of simplices. Suggested value: -1. */
+ Simplex_key null_key ();
+ /** \brief Returns the number stored for a simplex by `assign_key()`.
+ *
+ * If `assign_key()` has not been called, it must return `null_key()`. */
+ Simplex_key key ( Simplex_handle sh );
+ /** \brief Store a number for a simplex, which can later be retrieved with `key()`. */
+ void assign_key(Simplex_handle sh, Simplex_key n);
+ /** @} */
};
} // namespace alpha_complex
diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex.h b/src/Alpha_complex/include/gudhi/Alpha_complex.h
index 1b5d6997..ba91998d 100644
--- a/src/Alpha_complex/include/gudhi/Alpha_complex.h
+++ b/src/Alpha_complex/include/gudhi/Alpha_complex.h
@@ -112,9 +112,6 @@ class Alpha_complex {
typedef typename Kernel::Side_of_bounded_sphere_d Is_Gabriel;
typedef typename Kernel::Point_dimension_d Point_Dimension;
- // Type required to compute squared radius, or side of bounded sphere on a vector of points.
- typedef typename std::vector<Point_d> Vector_of_CGAL_points;
-
// Vertex_iterator type from CGAL.
typedef typename Delaunay_triangulation::Vertex_iterator CGAL_vertex_iterator;
@@ -124,6 +121,9 @@ class Alpha_complex {
// Structure to switch from simplex tree vertex handle to CGAL vertex iterator.
typedef typename std::vector< CGAL_vertex_iterator > Vector_vertex_iterator;
+ // Numeric type of coordinates in the kernel
+ typedef typename Kernel::FT FT;
+
private:
/** \brief Vertex iterator vector to switch from simplex tree vertex handle to CGAL vertex iterator.
* Vertex handles are inserted sequentially, starting at 0.*/
@@ -132,6 +132,8 @@ class Alpha_complex {
Delaunay_triangulation* triangulation_;
/** \brief Kernel for triangulation_ functions access.*/
Kernel kernel_;
+ /** \brief Cache for geometric constructions: circumcenter and squared radius of a simplex.*/
+ std::vector<std::pair<Point_d, FT>> cache_, old_cache_;
public:
/** \brief Alpha_complex constructor from an OFF file name.
@@ -246,6 +248,49 @@ class Alpha_complex {
}
}
+ /** \brief get_point_ returns the point corresponding to the vertex given as parameter.
+ * Only for internal use for faster access.
+ *
+ * @param[in] vertex Vertex handle of the point to retrieve.
+ * @return The point found.
+ */
+ const Point_d& get_point_(std::size_t vertex) const {
+ return vertex_handle_to_iterator_[vertex]->point();
+ }
+
+ /// Return a reference to the circumcenter and circumradius, writing them in the cache if necessary.
+ template<class SimplicialComplexForAlpha>
+ auto& get_cache(SimplicialComplexForAlpha& cplx, typename SimplicialComplexForAlpha::Simplex_handle s) {
+ auto k = cplx.key(s);
+ if(k==cplx.null_key()){
+ k = cache_.size();
+ cplx.assign_key(s, k);
+ // Using a transform_range is slower, currently.
+ thread_local std::vector<Point_d> v;
+ v.clear();
+ for (auto vertex : cplx.simplex_vertex_range(s))
+ v.push_back(get_point_(vertex));
+ Point_d c = kernel_.construct_circumcenter_d_object()(v.cbegin(), v.cend());
+ FT r = kernel_.squared_distance_d_object()(c, v[0]);
+ cache_.emplace_back(std::move(c), std::move(r));
+ }
+ return cache_[k];
+ }
+
+ /// Return the circumradius, either from the old cache or computed, without writing to the cache.
+ template<class SimplicialComplexForAlpha>
+ auto radius(SimplicialComplexForAlpha& cplx, typename SimplicialComplexForAlpha::Simplex_handle s) {
+ auto k = cplx.key(s);
+ if(k!=cplx.null_key())
+ return old_cache_[k].second;
+ // Using a transform_range is slower, currently.
+ thread_local std::vector<Point_d> v;
+ v.clear();
+ for (auto vertex : cplx.simplex_vertex_range(s))
+ v.push_back(get_point_(vertex));
+ return kernel_.compute_squared_radius_d_object()(v.cbegin(), v.cend());
+ }
+
public:
/** \brief Inserts all Delaunay triangulation into the simplicial complex.
* It also computes the filtration values accordingly to the \ref createcomplexalgorithm if default_filtration_value
@@ -324,52 +369,36 @@ class Alpha_complex {
if (!default_filtration_value) {
// --------------------------------------------------------------------------------------------
- // Will be re-used many times
- Vector_of_CGAL_points pointVector;
// ### For i : d -> 0
for (int decr_dim = triangulation_->maximal_dimension(); decr_dim >= 0; decr_dim--) {
// ### Foreach Sigma of dim i
for (Simplex_handle f_simplex : complex.skeleton_simplex_range(decr_dim)) {
int f_simplex_dim = complex.dimension(f_simplex);
if (decr_dim == f_simplex_dim) {
- pointVector.clear();
- #ifdef DEBUG_TRACES
- std::clog << "Sigma of dim " << decr_dim << " is";
- #endif // DEBUG_TRACES
- for (auto vertex : complex.simplex_vertex_range(f_simplex)) {
- pointVector.push_back(get_point(vertex));
- #ifdef DEBUG_TRACES
- std::clog << " " << vertex;
- #endif // DEBUG_TRACES
- }
- #ifdef DEBUG_TRACES
- std::clog << std::endl;
- #endif // DEBUG_TRACES
// ### If filt(Sigma) is NaN : filt(Sigma) = alpha(Sigma)
if (std::isnan(complex.filtration(f_simplex))) {
Filtration_value alpha_complex_filtration = 0.0;
// No need to compute squared_radius on a single point - alpha is 0.0
if (f_simplex_dim > 0) {
- // squared_radius function initialization
- Squared_Radius squared_radius = kernel_.compute_squared_radius_d_object();
-
- CGAL::NT_converter<typename Geom_traits::FT, Filtration_value> cv;
- auto sqrad = squared_radius(pointVector.begin(), pointVector.end());
- #if CGAL_VERSION_NR >= 1050000000
+ auto const& sqrad = radius(complex, f_simplex);
+#if CGAL_VERSION_NR >= 1050000000
if(exact) CGAL::exact(sqrad);
- #endif
+#endif
+ CGAL::NT_converter<FT, Filtration_value> cv;
alpha_complex_filtration = cv(sqrad);
}
complex.assign_filtration(f_simplex, alpha_complex_filtration);
- #ifdef DEBUG_TRACES
+#ifdef DEBUG_TRACES
std::clog << "filt(Sigma) is NaN : filt(Sigma) =" << complex.filtration(f_simplex) << std::endl;
- #endif // DEBUG_TRACES
+#endif // DEBUG_TRACES
}
// No need to propagate further, unweighted points all have value 0
if (decr_dim > 1)
propagate_alpha_filtration(complex, f_simplex);
}
}
+ old_cache_ = std::move(cache_);
+ cache_.clear();
}
// --------------------------------------------------------------------------------------------
@@ -388,9 +417,7 @@ class Alpha_complex {
void propagate_alpha_filtration(SimplicialComplexForAlpha& complex, Simplex_handle f_simplex) {
// From SimplicialComplexForAlpha type required to assign filtration values.
typedef typename SimplicialComplexForAlpha::Filtration_value Filtration_value;
-#ifdef DEBUG_TRACES
typedef typename SimplicialComplexForAlpha::Vertex_handle Vertex_handle;
-#endif // DEBUG_TRACES
// ### Foreach Tau face of Sigma
for (auto f_boundary : complex.boundary_simplex_range(f_simplex)) {
@@ -414,33 +441,18 @@ class Alpha_complex {
#endif // DEBUG_TRACES
// ### Else
} else {
- // insert the Tau points in a vector for is_gabriel function
- Vector_of_CGAL_points pointVector;
-#ifdef DEBUG_TRACES
- Vertex_handle vertexForGabriel = Vertex_handle();
-#endif // DEBUG_TRACES
- for (auto vertex : complex.simplex_vertex_range(f_boundary)) {
- pointVector.push_back(get_point(vertex));
- }
- // Retrieve the Sigma point that is not part of Tau - parameter for is_gabriel function
- Point_d point_for_gabriel;
- for (auto vertex : complex.simplex_vertex_range(f_simplex)) {
- point_for_gabriel = get_point(vertex);
- if (std::find(pointVector.begin(), pointVector.end(), point_for_gabriel) == pointVector.end()) {
-#ifdef DEBUG_TRACES
- // vertex is not found in Tau
- vertexForGabriel = vertex;
-#endif // DEBUG_TRACES
- // No need to continue loop
- break;
- }
- }
- // is_gabriel function initialization
- Is_Gabriel is_gabriel = kernel_.side_of_bounded_sphere_d_object();
- bool is_gab = is_gabriel(pointVector.begin(), pointVector.end(), point_for_gabriel)
- != CGAL::ON_BOUNDED_SIDE;
+ // Find which vertex of f_simplex is missing in f_boundary. We could actually write a variant of boundary_simplex_range that gives pairs (f_boundary, vertex). We rely on the fact that simplex_vertex_range is sorted.
+ auto longlist = complex.simplex_vertex_range(f_simplex);
+ auto shortlist = complex.simplex_vertex_range(f_boundary);
+ auto longiter = std::begin(longlist);
+ auto shortiter = std::begin(shortlist);
+ auto enditer = std::end(shortlist);
+ while(shortiter != enditer && *longiter == *shortiter) { ++longiter; ++shortiter; }
+ Vertex_handle extra = *longiter;
+ auto const& cache=get_cache(complex, f_boundary);
+ bool is_gab = kernel_.squared_distance_d_object()(cache.first, get_point_(extra)) >= cache.second;
#ifdef DEBUG_TRACES
- std::clog << " | Tau is_gabriel(Sigma)=" << is_gab << " - vertexForGabriel=" << vertexForGabriel << std::endl;
+ std::clog << " | Tau is_gabriel(Sigma)=" << is_gab << " - vertexForGabriel=" << extra << std::endl;
#endif // DEBUG_TRACES
// ### If Tau is not Gabriel of Sigma
if (false == is_gab) {
diff --git a/src/Alpha_complex/test/Alpha_complex_unit_test.cpp b/src/Alpha_complex/test/Alpha_complex_unit_test.cpp
index da1d8004..4b37e4bd 100644
--- a/src/Alpha_complex/test/Alpha_complex_unit_test.cpp
+++ b/src/Alpha_complex/test/Alpha_complex_unit_test.cpp
@@ -188,9 +188,6 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_from_points) {
// Test after prune_above_filtration
bool modified = simplex_tree.prune_above_filtration(0.6);
- if (modified) {
- simplex_tree.initialize_filtration();
- }
BOOST_CHECK(modified);
// Another way to check num_simplices
diff --git a/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp b/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp
index e93c412e..91899040 100644
--- a/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp
+++ b/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp
@@ -222,9 +222,6 @@ int main(int argc, char **argv) {
break;
}
- // Sort the simplices in the order of the filtration
- simplex_tree.initialize_filtration();
-
std::clog << "Simplex_tree dim: " << simplex_tree.dimension() << std::endl;
// Compute the persistence diagram of the complex
Persistent_cohomology pcoh(simplex_tree, true);
diff --git a/src/Alpha_complex/utilities/alpha_complex_persistence.cpp b/src/Alpha_complex/utilities/alpha_complex_persistence.cpp
index be60ff78..7c898dfd 100644
--- a/src/Alpha_complex/utilities/alpha_complex_persistence.cpp
+++ b/src/Alpha_complex/utilities/alpha_complex_persistence.cpp
@@ -75,9 +75,6 @@ int main(int argc, char **argv) {
std::clog << "Simplicial complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices()
<< " simplices - " << simplex.num_vertices() << " vertices." << std::endl;
- // Sort the simplices in the order of the filtration
- simplex.initialize_filtration();
-
std::clog << "Simplex_tree dim: " << simplex.dimension() << std::endl;
// Compute the persistence diagram of the complex
Gudhi::persistent_cohomology::Persistent_cohomology<Simplex_tree, Gudhi::persistent_cohomology::Field_Zp> pcoh(
diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h
index 1eb77c9c..f8f80ded 100644
--- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h
+++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h
@@ -13,6 +13,8 @@
#include <gudhi/Bitmap_cubical_complex/counter.h>
+#include <boost/config.hpp>
+
#include <iostream>
#include <vector>
#include <string>
@@ -110,6 +112,16 @@ class Bitmap_cubical_complex_base {
virtual inline std::vector<std::size_t> get_coboundary_of_a_cell(std::size_t cell) const;
/**
+ * This function finds a top-dimensional cell that is incident to the input cell and has
+ * the same filtration value. In case several cells are suitable, an arbitrary one is
+ * returned. Note that the input parameter can be a cell of any dimension (vertex, edge, etc).
+ * On the other hand, the output is always indicating the position of
+ * a top-dimensional cube in the data structure.
+ * \pre The filtration values are assigned as per `impose_lower_star_filtration()`.
+ **/
+ inline size_t get_top_dimensional_coface_of_a_cell(size_t splx);
+
+ /**
* This procedure compute incidence numbers between cubes. For a cube \f$A\f$ of
* dimension n and a cube \f$B \subset A\f$ of dimension n-1, an incidence
* between \f$A\f$ and \f$B\f$ is the integer with which \f$B\f$ appears in the boundary of \f$A\f$.
@@ -197,7 +209,7 @@ class Bitmap_cubical_complex_base {
/**
* Returns number of all cubes in the data structure.
**/
- inline unsigned size() const { return this->data.size(); }
+ inline std::size_t size() const { return this->data.size(); }
/**
* Writing to stream operator. By using it we get the values T of cells in order in which they are stored in the
@@ -603,6 +615,19 @@ void Bitmap_cubical_complex_base<T>::setup_bitmap_based_on_top_dimensional_cells
}
template <typename T>
+size_t Bitmap_cubical_complex_base<T>::get_top_dimensional_coface_of_a_cell(size_t splx) {
+ if (this->get_dimension_of_a_cell(splx) == this->dimension()){return splx;}
+ else{
+ for (auto v : this->get_coboundary_of_a_cell(splx)){
+ if(this->get_cell_data(v) == this->get_cell_data(splx)){
+ return this->get_top_dimensional_coface_of_a_cell(v);
+ }
+ }
+ }
+ BOOST_UNREACHABLE_RETURN(-2);
+}
+
+template <typename T>
Bitmap_cubical_complex_base<T>::Bitmap_cubical_complex_base(const std::vector<unsigned>& sizes_in_following_directions,
const std::vector<T>& top_dimensional_cells) {
this->setup_bitmap_based_on_top_dimensional_cells_list(sizes_in_following_directions, top_dimensional_cells);
diff --git a/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp b/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp
index 4769eca3..ceb9e226 100644
--- a/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp
+++ b/src/Bottleneck_distance/example/alpha_rips_persistence_bottleneck_distance.cpp
@@ -71,9 +71,6 @@ int main(int argc, char * argv[]) {
std::clog << "The Rips complex contains " << rips_stree.num_simplices() << " simplices and has dimension "
<< rips_stree.dimension() << " \n";
- // Sort the simplices in the order of the filtration
- rips_stree.initialize_filtration();
-
// Compute the persistence diagram of the complex
Persistent_cohomology rips_pcoh(rips_stree);
// initializes the coefficient field for homology
@@ -92,9 +89,6 @@ int main(int argc, char * argv[]) {
std::clog << "The Alpha complex contains " << alpha_stree.num_simplices() << " simplices and has dimension "
<< alpha_stree.dimension() << " \n";
- // Sort the simplices in the order of the filtration
- alpha_stree.initialize_filtration();
-
// Compute the persistence diagram of the complex
Persistent_cohomology alpha_pcoh(alpha_stree);
// initializes the coefficient field for homology
diff --git a/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h b/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h
index 46b784d8..b4f9fd2c 100644
--- a/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h
+++ b/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h
@@ -21,7 +21,7 @@ namespace persistent_cohomology {
\author Clément Maria
Computation of persistent cohomology using the algorithm of
- \cite DBLP:journals/dcg/SilvaMV11 and \cite DBLP:journals/corr/abs-1208-5018
+ \cite DBLP:journals/dcg/SilvaMV11 and \cite DBLP:conf/compgeom/DeyFW14
and the Compressed Annotation Matrix
implementation of \cite DBLP:conf/esa/BoissonnatDM13
diff --git a/src/Persistent_cohomology/example/custom_persistence_sort.cpp b/src/Persistent_cohomology/example/custom_persistence_sort.cpp
index 87e9c207..410cd987 100644
--- a/src/Persistent_cohomology/example/custom_persistence_sort.cpp
+++ b/src/Persistent_cohomology/example/custom_persistence_sort.cpp
@@ -86,9 +86,6 @@ int main(int argc, char **argv) {
" - " << simplex.num_simplices() << " simplices - " <<
simplex.num_vertices() << " vertices." << std::endl;
- // Sort the simplices in the order of the filtration
- simplex.initialize_filtration();
-
std::clog << "Simplex_tree dim: " << simplex.dimension() << std::endl;
Persistent_cohomology pcoh(simplex);
diff --git a/src/Persistent_cohomology/example/persistence_from_file.cpp b/src/Persistent_cohomology/example/persistence_from_file.cpp
index 79108730..38c44514 100644
--- a/src/Persistent_cohomology/example/persistence_from_file.cpp
+++ b/src/Persistent_cohomology/example/persistence_from_file.cpp
@@ -59,9 +59,6 @@ int main(int argc, char * argv[]) {
std::clog << std::endl;
}*/
- // Sort the simplices in the order of the filtration
- simplex_tree.initialize_filtration();
-
// Compute the persistence diagram of the complex
Persistent_cohomology< Simplex_tree<>, Field_Zp > pcoh(simplex_tree);
// initializes the coefficient field for homology
diff --git a/src/Persistent_cohomology/example/plain_homology.cpp b/src/Persistent_cohomology/example/plain_homology.cpp
index 4d329020..236b67de 100644
--- a/src/Persistent_cohomology/example/plain_homology.cpp
+++ b/src/Persistent_cohomology/example/plain_homology.cpp
@@ -59,9 +59,6 @@ int main() {
st.insert_simplex_and_subfaces(edge35);
st.insert_simplex(vertex4);
- // Sort the simplices in the order of the filtration
- st.initialize_filtration();
-
// Class for homology computation
// By default, since the complex has dimension 1, only 0-dimensional homology would be computed.
// Here we also want persistent homology to be computed for the maximal dimension in the complex (persistence_dim_max = true)
diff --git a/src/Persistent_cohomology/example/rips_multifield_persistence.cpp b/src/Persistent_cohomology/example/rips_multifield_persistence.cpp
index e2e2c0a5..2edf5bc4 100644
--- a/src/Persistent_cohomology/example/rips_multifield_persistence.cpp
+++ b/src/Persistent_cohomology/example/rips_multifield_persistence.cpp
@@ -59,9 +59,6 @@ int main(int argc, char * argv[]) {
std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
std::clog << " and has dimension " << simplex_tree.dimension() << " \n";
- // Sort the simplices in the order of the filtration
- simplex_tree.initialize_filtration();
-
// Compute the persistence diagram of the complex
Persistent_cohomology pcoh(simplex_tree);
// initializes the coefficient field for homology
diff --git a/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp b/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp
index 7da9f15d..a503d983 100644
--- a/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp
+++ b/src/Persistent_cohomology/example/rips_persistence_step_by_step.cpp
@@ -76,9 +76,6 @@ int main(int argc, char * argv[]) {
std::clog << "The complex contains " << st.num_simplices() << " simplices \n";
std::clog << " and has dimension " << st.dimension() << " \n";
- // Sort the simplices in the order of the filtration
- st.initialize_filtration();
-
// Compute the persistence diagram of the complex
Persistent_cohomology pcoh(st);
// initializes the coefficient field for homology
diff --git a/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp b/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp
index db456f70..8c5742aa 100644
--- a/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp
+++ b/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp
@@ -17,10 +17,6 @@
#include <boost/program_options.hpp>
-#ifdef GUDHI_USE_TBB
-#include <tbb/task_scheduler_init.h>
-#endif
-
#include <string>
#include <vector>
@@ -67,11 +63,6 @@ int main(int argc, char * argv[]) {
std::clog << "The complex contains " << st.num_simplices() << " simplices \n";
std::clog << " and has dimension " << st.dimension() << " \n";
-#ifdef GUDHI_USE_TBB
- // Unnecessary, but clarifies which operations are parallel.
- tbb::task_scheduler_init ts;
-#endif
-
// Sort the simplices in the order of the filtration
st.initialize_filtration();
int count = 0;
@@ -81,10 +72,6 @@ int main(int argc, char * argv[]) {
// Convert to a more convenient representation.
Gudhi::Hasse_complex<> hcpx(st);
-#ifdef GUDHI_USE_TBB
- ts.terminate();
-#endif
-
// Free some space.
delete &st;
diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
index ca4bc10d..d34ee07d 100644
--- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
+++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
@@ -561,7 +561,6 @@ class Persistent_cohomology {
void output_diagram(std::ostream& ostream = std::cout) {
cmp_intervals_by_length cmp(cpx_);
std::sort(std::begin(persistent_pairs_), std::end(persistent_pairs_), cmp);
- bool has_infinity = std::numeric_limits<Filtration_value>::has_infinity;
for (auto pair : persistent_pairs_) {
ostream << get<2>(pair) << " " << cpx_->dimension(get<0>(pair)) << " "
<< cpx_->filtration(get<0>(pair)) << " "
@@ -571,9 +570,9 @@ class Persistent_cohomology {
void write_output_diagram(std::string diagram_name) {
std::ofstream diagram_out(diagram_name.c_str());
+ diagram_out.exceptions(diagram_out.failbit);
cmp_intervals_by_length cmp(cpx_);
std::sort(std::begin(persistent_pairs_), std::end(persistent_pairs_), cmp);
- bool has_infinity = std::numeric_limits<Filtration_value>::has_infinity;
for (auto pair : persistent_pairs_) {
diagram_out << cpx_->dimension(get<0>(pair)) << " "
<< cpx_->filtration(get<0>(pair)) << " "
diff --git a/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp b/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp
index 67f921a6..b473738e 100644
--- a/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp
+++ b/src/Rips_complex/utilities/rips_correlation_matrix_persistence.cpp
@@ -71,9 +71,6 @@ int main(int argc, char* argv[]) {
std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
std::clog << " and has dimension " << simplex_tree.dimension() << " \n";
- // Sort the simplices in the order of the filtration
- simplex_tree.initialize_filtration();
-
// Compute the persistence diagram of the complex
Persistent_cohomology pcoh(simplex_tree);
// initializes the coefficient field for homology
diff --git a/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp b/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp
index 4ad19675..6306755d 100644
--- a/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp
+++ b/src/Rips_complex/utilities/rips_distance_matrix_persistence.cpp
@@ -50,9 +50,6 @@ int main(int argc, char* argv[]) {
std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
std::clog << " and has dimension " << simplex_tree.dimension() << " \n";
- // Sort the simplices in the order of the filtration
- simplex_tree.initialize_filtration();
-
// Compute the persistence diagram of the complex
Persistent_cohomology pcoh(simplex_tree);
// initializes the coefficient field for homology
diff --git a/src/Rips_complex/utilities/rips_persistence.cpp b/src/Rips_complex/utilities/rips_persistence.cpp
index 4cc63d3c..9d7490b3 100644
--- a/src/Rips_complex/utilities/rips_persistence.cpp
+++ b/src/Rips_complex/utilities/rips_persistence.cpp
@@ -52,9 +52,6 @@ int main(int argc, char* argv[]) {
std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
std::clog << " and has dimension " << simplex_tree.dimension() << " \n";
- // Sort the simplices in the order of the filtration
- simplex_tree.initialize_filtration();
-
// Compute the persistence diagram of the complex
Persistent_cohomology pcoh(simplex_tree);
// initializes the coefficient field for homology
diff --git a/src/Rips_complex/utilities/sparse_rips_persistence.cpp b/src/Rips_complex/utilities/sparse_rips_persistence.cpp
index 40606158..ac935b41 100644
--- a/src/Rips_complex/utilities/sparse_rips_persistence.cpp
+++ b/src/Rips_complex/utilities/sparse_rips_persistence.cpp
@@ -54,9 +54,6 @@ int main(int argc, char* argv[]) {
std::clog << "The complex contains " << simplex_tree.num_simplices() << " simplices \n";
std::clog << " and has dimension " << simplex_tree.dimension() << " \n";
- // Sort the simplices in the order of the filtration
- simplex_tree.initialize_filtration();
-
// Compute the persistence diagram of the complex
Persistent_cohomology pcoh(simplex_tree);
// initializes the coefficient field for homology
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h
index b455ae31..889dbd00 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h
@@ -142,7 +142,10 @@ class Simplex_tree {
public:
/** \brief Handle type to a simplex contained in the simplicial complex represented
- * by the simplex tree. */
+ * by the simplex tree.
+ *
+ * They are essentially pointers into internal vectors, and any insertion or removal
+ * of a simplex may invalidate any other Simplex_handle in the complex. */
typedef typename Dictionary::iterator Simplex_handle;
private:
@@ -255,11 +258,9 @@ class Simplex_tree {
*
* The filtration must be valid. If the filtration has not been initialized yet, the
* method initializes it (i.e. order the simplices). If the complex has changed since the last time the filtration
- * was initialized, please call `initialize_filtration()` to recompute it. */
+ * was initialized, please call `clear_filtration()` or `initialize_filtration()` to recompute it. */
Filtration_simplex_range const& filtration_simplex_range(Indexing_tag = Indexing_tag()) {
- if (filtration_vect_.empty()) {
- initialize_filtration();
- }
+ maybe_initialize_filtration();
return filtration_vect_;
}
@@ -485,7 +486,7 @@ class Simplex_tree {
public:
/** \brief Returns the key associated to a simplex.
*
- * The filtration must be initialized.
+ * If no key has been assigned, returns `null_key()`.
* \pre SimplexTreeOptions::store_key
*/
static Simplex_key key(Simplex_handle sh) {
@@ -495,7 +496,6 @@ class Simplex_tree {
/** \brief Returns the simplex that has index idx in the filtration.
*
* The filtration must be initialized.
- * \pre SimplexTreeOptions::store_key
*/
Simplex_handle simplex(Simplex_key idx) const {
return filtration_vect_[idx];
@@ -531,8 +531,7 @@ class Simplex_tree {
return Dictionary_it(nullptr);
}
- /** \brief Returns a key different for all keys associated to the
- * simplices of the simplicial complex. */
+ /** \brief Returns a fixed number not in the interval [0, `num_simplices()`). */
static Simplex_key null_key() {
return -1;
}
@@ -877,15 +876,13 @@ class Simplex_tree {
}
public:
- /** \brief Initializes the filtrations, i.e. sort the
- * simplices according to their order in the filtration and initializes all Simplex_keys.
+ /** \brief Initializes the filtration cache, i.e. sorts the
+ * simplices according to their order in the filtration.
*
- * After calling this method, filtration_simplex_range() becomes valid, and each simplex is
- * assigned a Simplex_key corresponding to its order in the filtration (from 0 to m-1 for a
- * simplicial complex with m simplices).
+ * It always recomputes the cache, even if one already exists.
*
- * Will be automatically called when calling filtration_simplex_range()
- * if the filtration has never been initialized yet. */
+ * Any insertion, deletion or change of filtration value invalidates this cache,
+ * which can be cleared with clear_filtration(). */
void initialize_filtration() {
filtration_vect_.clear();
filtration_vect_.reserve(num_simplices());
@@ -907,6 +904,21 @@ class Simplex_tree {
std::stable_sort(filtration_vect_.begin(), filtration_vect_.end(), is_before_in_filtration(this));
#endif
}
+ /** \brief Initializes the filtration cache if it isn't initialized yet.
+ *
+ * Automatically called by filtration_simplex_range(). */
+ void maybe_initialize_filtration() {
+ if (filtration_vect_.empty()) {
+ initialize_filtration();
+ }
+ }
+ /** \brief Clears the filtration cache produced by initialize_filtration().
+ *
+ * Useful when initialize_filtration() has already been called and we perform an operation
+ * (say an insertion) that invalidates the cache. */
+ void clear_filtration() {
+ filtration_vect_.clear();
+ }
private:
/** Recursive search of cofaces
@@ -1128,6 +1140,7 @@ class Simplex_tree {
* 1 when calling the method. */
void expansion(int max_dim) {
if (max_dim <= 1) return;
+ clear_filtration(); // Drop the cache.
dimension_ = max_dim;
for (Dictionary_it root_it = root_.members_.begin();
root_it != root_.members_.end(); ++root_it) {
@@ -1338,9 +1351,6 @@ class Simplex_tree {
/** \brief This function ensures that each simplex has a higher filtration value than its faces by increasing the
* filtration values.
* @return True if any filtration value was modified, false if the filtration was already non-decreasing.
- * \post Some simplex tree functions require the filtration to be valid. `make_filtration_non_decreasing()`
- * function is not launching `initialize_filtration()` but returns the filtration modification information. If the
- * complex has changed , please call `initialize_filtration()` to recompute it.
*
* If a simplex has a `NaN` filtration value, it is considered lower than any other defined filtration value.
*/
@@ -1352,6 +1362,8 @@ class Simplex_tree {
modified |= rec_make_filtration_non_decreasing(simplex.second.children());
}
}
+ if(modified)
+ clear_filtration(); // Drop the cache.
return modified;
}
@@ -1391,16 +1403,16 @@ class Simplex_tree {
public:
/** \brief Prune above filtration value given as parameter.
* @param[in] filtration Maximum threshold value.
- * @return The filtration modification information.
- * \post Some simplex tree functions require the filtration to be valid. `prune_above_filtration()`
- * function is not launching `initialize_filtration()` but returns the filtration modification information. If the
- * complex has changed , please call `initialize_filtration()` to recompute it.
+ * @return True if any simplex was removed, false if all simplices already had a value below the threshold.
* \post Note that the dimension of the simplicial complex may be lower after calling `prune_above_filtration()`
* than it was before. However, `upper_bound_dimension()` will return the old value, which remains a valid upper
* bound. If you care, you can call `dimension()` to recompute the exact dimension.
*/
bool prune_above_filtration(Filtration_value filtration) {
- return rec_prune_above_filtration(root(), filtration);
+ bool modified = rec_prune_above_filtration(root(), filtration);
+ if(modified)
+ clear_filtration(); // Drop the cache.
+ return modified;
}
private:
@@ -1467,7 +1479,6 @@ class Simplex_tree {
* @param[in] sh Simplex handle on the maximal simplex to remove.
* \pre Please check the simplex has no coface before removing it.
* \exception std::invalid_argument In debug mode, if sh has children.
- * \post Be aware that removing is shifting data in a flat_map (initialize_filtration to be done).
* \post Note that the dimension of the simplicial complex may be lower after calling `remove_maximal_simplex()`
* than it was before. However, `upper_bound_dimension()` will return the old value, which remains a valid upper
* bound. If you care, you can call `dimension()` to recompute the exact dimension.
@@ -1539,6 +1550,7 @@ class Simplex_tree {
* the original filtration values for each simplex.
*/
Extended_filtration_data extend_filtration() {
+ clear_filtration(); // Drop the cache.
// Compute maximum and minimum of filtration values
Vertex_handle maxvert = std::numeric_limits<Vertex_handle>::min();
diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake
index 2d010483..0abe66b7 100644
--- a/src/cmake/modules/GUDHI_third_party_libraries.cmake
+++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake
@@ -150,6 +150,25 @@ function( find_python_module PYTHON_MODULE_NAME )
endif()
endfunction( find_python_module )
+# For modules that do not define module.__version__
+function( find_python_module_no_version PYTHON_MODULE_NAME )
+ string(TOUPPER ${PYTHON_MODULE_NAME} PYTHON_MODULE_NAME_UP)
+ execute_process(
+ COMMAND ${PYTHON_EXECUTABLE} -c "import ${PYTHON_MODULE_NAME}"
+ RESULT_VARIABLE PYTHON_MODULE_RESULT
+ ERROR_VARIABLE PYTHON_MODULE_ERROR)
+ if(PYTHON_MODULE_RESULT EQUAL 0)
+ # Remove carriage return
+ message ("++ Python module ${PYTHON_MODULE_NAME} found")
+ set(${PYTHON_MODULE_NAME_UP}_FOUND TRUE PARENT_SCOPE)
+ else()
+ message ("PYTHON_MODULE_NAME = ${PYTHON_MODULE_NAME}
+ - PYTHON_MODULE_RESULT = ${PYTHON_MODULE_RESULT}
+ - PYTHON_MODULE_ERROR = ${PYTHON_MODULE_ERROR}")
+ set(${PYTHON_MODULE_NAME_UP}_FOUND FALSE PARENT_SCOPE)
+ endif()
+endfunction( find_python_module_no_version )
+
if( PYTHONINTERP_FOUND )
find_python_module("cython")
find_python_module("pytest")
@@ -160,6 +179,10 @@ if( PYTHONINTERP_FOUND )
find_python_module("sklearn")
find_python_module("ot")
find_python_module("pybind11")
+ find_python_module("torch")
+ find_python_module("pykeops")
+ find_python_module("eagerpy")
+ find_python_module_no_version("hnswlib")
endif()
if(NOT GUDHI_PYTHON_PATH)
diff --git a/src/cmake/modules/GUDHI_user_version_target.cmake b/src/cmake/modules/GUDHI_user_version_target.cmake
index 257d1939..9cf648e3 100644
--- a/src/cmake/modules/GUDHI_user_version_target.cmake
+++ b/src/cmake/modules/GUDHI_user_version_target.cmake
@@ -26,8 +26,12 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
# Generate bib files for Doxygen - cf. root CMakeLists.txt for explanation
string(TIMESTAMP GUDHI_VERSION_YEAR "%Y")
configure_file(${CMAKE_SOURCE_DIR}/biblio/how_to_cite_gudhi.bib.in "${CMAKE_CURRENT_BINARY_DIR}/biblio/how_to_cite_gudhi.bib" @ONLY)
-file(COPY "${CMAKE_SOURCE_DIR}/biblio/how_to_cite_cgal.bib" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/biblio/")
file(COPY "${CMAKE_SOURCE_DIR}/biblio/bibliography.bib" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/biblio/")
+
+# append cgal citation inside bibliography - sphinx cannot deal with more than one bib file
+file(READ "${CMAKE_SOURCE_DIR}/biblio/how_to_cite_cgal.bib" CGAL_CITATION_CONTENT)
+file(APPEND "${CMAKE_CURRENT_BINARY_DIR}/biblio/bibliography.bib" "${CGAL_CITATION_CONTENT}")
+
# Copy biblio directory for user version
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy_directory ${CMAKE_CURRENT_BINARY_DIR}/biblio ${GUDHI_USER_VERSION_DIR}/biblio)
diff --git a/src/common/doc/main_page.md b/src/common/doc/main_page.md
index 85b39be9..462d857e 100644
--- a/src/common/doc/main_page.md
+++ b/src/common/doc/main_page.md
@@ -342,7 +342,7 @@
theory is essentially composed of three elements: topological spaces, their homology groups and an evolution
scheme.
Computation of persistent cohomology using the algorithm of \cite DBLP:journals/dcg/SilvaMV11 and
- \cite DBLP:journals/corr/abs-1208-5018 and the Compressed Annotation Matrix implementation of
+ \cite DBLP:conf/compgeom/DeyFW14 and the Compressed Annotation Matrix implementation of
\cite DBLP:conf/esa/BoissonnatDM13 .
</td>
<td width="15%">
diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt
index a91ca30a..ab08cd6d 100644
--- a/src/python/CMakeLists.txt
+++ b/src/python/CMakeLists.txt
@@ -34,6 +34,7 @@ endfunction( add_gudhi_debug_info )
if(PYTHONINTERP_FOUND)
if(PYBIND11_FOUND)
add_gudhi_debug_info("Pybind11 version ${PYBIND11_VERSION}")
+ set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ")
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'hera', ")
endif()
if(CYTHON_FOUND)
@@ -46,7 +47,6 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'reader_utils', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'witness_complex', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'strong_witness_complex', ")
- set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'nerve_gic', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'subsampling', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'tangential_complex', ")
@@ -57,6 +57,7 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'representations', ")
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'wasserstein', ")
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'point_cloud', ")
+ set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'weighted_rips_complex', ")
add_gudhi_debug_info("Python version ${PYTHON_VERSION_STRING}")
add_gudhi_debug_info("Cython version ${CYTHON_VERSION}")
@@ -78,6 +79,19 @@ if(PYTHONINTERP_FOUND)
if(OT_FOUND)
add_gudhi_debug_info("POT version ${OT_VERSION}")
endif()
+ if(HNSWLIB_FOUND)
+ # Does not have a version number...
+ add_gudhi_debug_info("HNSWlib found")
+ endif()
+ if(TORCH_FOUND)
+ add_gudhi_debug_info("PyTorch version ${TORCH_VERSION}")
+ endif()
+ if(PYKEOPS_FOUND)
+ add_gudhi_debug_info("PyKeOps version ${PYKEOPS_VERSION}")
+ endif()
+ if(EAGERPY_FOUND)
+ add_gudhi_debug_info("EagerPy version ${EAGERPY_VERSION}")
+ endif()
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_RESULT_OF_USE_DECLTYPE', ")
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_ALL_NO_LIB', ")
@@ -107,24 +121,25 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DCGAL_EIGEN3_ENABLED', ")
endif (EIGEN3_FOUND)
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'off_reader', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'simplex_tree', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'rips_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'cubical_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'periodic_cubical_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'reader_utils', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'witness_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'strong_witness_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'off_reader', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'simplex_tree', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'rips_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'cubical_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'periodic_cubical_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'reader_utils', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'witness_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'strong_witness_complex', ")
+ set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera', ")
if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'bottleneck', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'nerve_gic', ")
+ set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'bottleneck', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'nerve_gic', ")
endif ()
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'alpha_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'subsampling', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'tangential_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'euclidean_witness_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'euclidean_strong_witness_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'alpha_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'subsampling', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'tangential_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'euclidean_witness_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'euclidean_strong_witness_complex', ")
endif ()
if(CGAL_FOUND)
@@ -218,6 +233,7 @@ if(PYTHONINTERP_FOUND)
file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/")
file(COPY "gudhi/wasserstein" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/point_cloud" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
+ file(COPY "gudhi/weighted_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
add_custom_command(
OUTPUT gudhi.so
@@ -229,6 +245,71 @@ if(PYTHONINTERP_FOUND)
install(CODE "execute_process(COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/setup.py install)")
+ # Documentation generation is available through sphinx - requires all modules
+ # Make it first as sphinx test is by far the longest test which is nice when testing in parallel
+ if(SPHINX_PATH)
+ if(MATPLOTLIB_FOUND)
+ if(NUMPY_FOUND)
+ if(SCIPY_FOUND)
+ if(SKLEARN_FOUND)
+ if(OT_FOUND)
+ if(PYBIND11_FOUND)
+ if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/")
+ # User warning - Sphinx is a static pages generator, and configured to work fine with user_version
+ # Images and biblio warnings because not found on developper version
+ if (GUDHI_PYTHON_PATH STREQUAL "src/python")
+ set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developper version. Images and biblio will miss")
+ endif()
+ # sphinx target requires gudhi.so, because conf.py reads gudhi version from it
+ add_custom_target(sphinx
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/doc
+ COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ ${SPHINX_PATH} -b html ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/sphinx
+ DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/gudhi.so"
+ COMMENT "${GUDHI_SPHINX_MESSAGE}" VERBATIM)
+
+ add_test(NAME sphinx_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ ${SPHINX_PATH} -b doctest ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/doctest)
+
+ # Set missing or not modules
+ set(GUDHI_MODULES ${GUDHI_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MODULES")
+ else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 4.11.0")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ else(PYBIND11_FOUND)
+ message("++ Python documentation module will not be compiled because pybind11 was not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(PYBIND11_FOUND)
+ else(OT_FOUND)
+ message("++ Python documentation module will not be compiled because POT was not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(OT_FOUND)
+ else(SKLEARN_FOUND)
+ message("++ Python documentation module will not be compiled because scikit-learn was not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(SKLEARN_FOUND)
+ else(SCIPY_FOUND)
+ message("++ Python documentation module will not be compiled because scipy was not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(SCIPY_FOUND)
+ else(NUMPY_FOUND)
+ message("++ Python documentation module will not be compiled because numpy was not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(NUMPY_FOUND)
+ else(MATPLOTLIB_FOUND)
+ message("++ Python documentation module will not be compiled because matplotlib was not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(MATPLOTLIB_FOUND)
+ else(SPHINX_PATH)
+ message("++ Python documentation module will not be compiled because sphinx and sphinxcontrib-bibtex were not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(SPHINX_PATH)
+
+
# Test examples
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
# Bottleneck and Alpha
@@ -374,6 +455,7 @@ if(PYTHONINTERP_FOUND)
${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/example/simplex_tree_example.py)
add_gudhi_py_test(test_simplex_tree)
+ add_gudhi_py_test(test_simplex_generators)
# Witness
add_test(NAME witness_complex_from_nearest_landmark_table_py_test
@@ -388,7 +470,9 @@ if(PYTHONINTERP_FOUND)
# Wasserstein
if(OT_FOUND AND PYBIND11_FOUND)
- add_gudhi_py_test(test_wasserstein_distance)
+ if(TORCH_FOUND AND EAGERPY_FOUND)
+ add_gudhi_py_test(test_wasserstein_distance)
+ endif()
add_gudhi_py_test(test_wasserstein_barycenter)
endif()
@@ -400,69 +484,16 @@ if(PYTHONINTERP_FOUND)
# Time Delay
add_gudhi_py_test(test_time_delay)
- # Documentation generation is available through sphinx - requires all modules
- if(SPHINX_PATH)
- if(MATPLOTLIB_FOUND)
- if(NUMPY_FOUND)
- if(SCIPY_FOUND)
- if(SKLEARN_FOUND)
- if(OT_FOUND)
- if(PYBIND11_FOUND)
- if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/")
- # User warning - Sphinx is a static pages generator, and configured to work fine with user_version
- # Images and biblio warnings because not found on developper version
- if (GUDHI_PYTHON_PATH STREQUAL "src/python")
- set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developper version. Images and biblio will miss")
- endif()
- # sphinx target requires gudhi.so, because conf.py reads gudhi version from it
- add_custom_target(sphinx
- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/doc
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${SPHINX_PATH} -b html ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/sphinx
- DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/gudhi.so"
- COMMENT "${GUDHI_SPHINX_MESSAGE}" VERBATIM)
-
- add_test(NAME sphinx_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${SPHINX_PATH} -b doctest ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/doctest)
-
- # Set missing or not modules
- set(GUDHI_MODULES ${GUDHI_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MODULES")
- else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 4.11.0")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- else(PYBIND11_FOUND)
- message("++ Python documentation module will not be compiled because pybind11 was not found")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(PYBIND11_FOUND)
- else(OT_FOUND)
- message("++ Python documentation module will not be compiled because POT was not found")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(OT_FOUND)
- else(SKLEARN_FOUND)
- message("++ Python documentation module will not be compiled because scikit-learn was not found")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(SKLEARN_FOUND)
- else(SCIPY_FOUND)
- message("++ Python documentation module will not be compiled because scipy was not found")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(SCIPY_FOUND)
- else(NUMPY_FOUND)
- message("++ Python documentation module will not be compiled because numpy was not found")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(NUMPY_FOUND)
- else(MATPLOTLIB_FOUND)
- message("++ Python documentation module will not be compiled because matplotlib was not found")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(MATPLOTLIB_FOUND)
- else(SPHINX_PATH)
- message("++ Python documentation module will not be compiled because sphinx and sphinxcontrib-bibtex were not found")
- set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(SPHINX_PATH)
+ # DTM
+ if(SCIPY_FOUND AND SKLEARN_FOUND AND TORCH_FOUND AND HNSWLIB_FOUND AND PYKEOPS_FOUND AND EAGERPY_FOUND)
+ add_gudhi_py_test(test_knn)
+ add_gudhi_py_test(test_dtm)
+ endif()
+ # Weighted Rips
+ if(SCIPY_FOUND)
+ add_gudhi_py_test(test_weighted_rips)
+ endif()
# Set missing or not modules
set(GUDHI_MODULES ${GUDHI_MODULES} "python" CACHE INTERNAL "GUDHI_MODULES")
diff --git a/src/python/doc/alpha_complex_sum.inc b/src/python/doc/alpha_complex_sum.inc
index 9e6414d0..3aba0d71 100644
--- a/src/python/doc/alpha_complex_sum.inc
+++ b/src/python/doc/alpha_complex_sum.inc
@@ -1,17 +1,17 @@
.. table::
:widths: 30 40 30
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+
- | .. figure:: | Alpha complex is a simplicial complex constructed from the finite | :Author: Vincent Rouvreau |
- | ../../doc/Alpha_complex/alpha_complex_representation.png | cells of a Delaunay Triangulation. | |
- | :alt: Alpha complex representation | | :Since: GUDHI 2.0.0 |
- | :figclass: align-center | The filtration value of each simplex is computed as the **square** of | |
- | | the circumradius of the simplex if the circumsphere is empty (the | :License: MIT (`GPL v3 </licensing/>`_) |
- | | simplex is then said to be Gabriel), and as the minimum of the | |
- | | filtration values of the codimension 1 cofaces that make it not | :Requires: `Eigen <installation.html#eigen>`__ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`__ :math:`\geq` 4.11.0 |
- | | Gabriel otherwise. | |
- | | | |
- | | For performances reasons, it is advised to use CGAL ≥ 5.0.0. | |
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+
- | * :doc:`alpha_complex_user` | * :doc:`alpha_complex_ref` |
- +----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ +----------------------------------------------------------------+-------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+
+ | .. figure:: | Alpha complex is a simplicial complex constructed from the finite | :Author: Vincent Rouvreau |
+ | ../../doc/Alpha_complex/alpha_complex_representation.png | cells of a Delaunay Triangulation. | |
+ | :alt: Alpha complex representation | | :Since: GUDHI 2.0.0 |
+ | :figclass: align-center | The filtration value of each simplex is computed as the **square** of | |
+ | | the circumradius of the simplex if the circumsphere is empty (the | :License: MIT (`GPL v3 </licensing/>`_) |
+ | | simplex is then said to be Gabriel), and as the minimum of the | |
+ | | filtration values of the codimension 1 cofaces that make it not | :Requires: `Eigen <installation.html#eigen>`_ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0 |
+ | | Gabriel otherwise. | |
+ | | | |
+ | | For performances reasons, it is advised to use CGAL :math:`\geq` 5.0.0. | |
+ +----------------------------------------------------------------+-------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+
+ | * :doc:`alpha_complex_user` | * :doc:`alpha_complex_ref` |
+ +----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst
index 265a82d2..d49f45b4 100644
--- a/src/python/doc/alpha_complex_user.rst
+++ b/src/python/doc/alpha_complex_user.rst
@@ -10,9 +10,9 @@ Definition
.. include:: alpha_complex_sum.inc
`AlphaComplex` is constructing a :doc:`SimplexTree <simplex_tree_ref>` using
-`Delaunay Triangulation <http://doc.cgal.org/latest/Triangulation/index.html#Chapter_Triangulations>`_
-:cite:`cgal:hdj-t-19b` from `CGAL <http://www.cgal.org/>`_ (the Computational Geometry Algorithms Library
-:cite:`cgal:eb-19b`).
+`Delaunay Triangulation <http://doc.cgal.org/latest/Triangulation/index.html#Chapter_Triangulations>`_
+:cite:`cgal:hdj-t-19b` from the `Computational Geometry Algorithms Library <http://www.cgal.org/>`_
+:cite:`cgal:eb-19b`.
Remarks
^^^^^^^
@@ -89,25 +89,28 @@ In order to build the alpha complex, first, a Simplex tree is built from the cel
Filtration value computation algorithm
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- **for** i : dimension :math:`\rightarrow` 0 **do**
- **for all** :math:`\sigma` of dimension i
- **if** filtration(:math:`\sigma`) is NaN **then**
- filtration(:math:`\sigma`) = :math:`\alpha^2(\sigma)`
- **end if**
+.. code-block:: vim
+
+ for i : dimension → 0 do
+ for all σ of dimension i
+ if filtration(σ) is NaN then
+ filtration(σ) = α²(σ)
+ end if
+ for all τ face of σ do // propagate alpha filtration value
+ if filtration(Ï„) is not NaN then
+ filtration(τ) = min( filtration(τ), filtration(σ) )
+ else
+ if τ is not Gabriel for σ then
+ filtration(τ) = filtration(σ)
+ end if
+ end if
+ end for
+ end for
+ end for
+
+ make_filtration_non_decreasing()
+ prune_above_filtration()
- *//propagate alpha filtration value*
-
- **for all** :math:`\tau` face of :math:`\sigma`
- **if** filtration(:math:`\tau`) is not NaN **then**
- filtration(:math:`\tau`) = filtration(:math:`\sigma`)
- **end if**
- **end for**
- **end for**
- **end for**
-
- make_filtration_non_decreasing()
-
- prune_above_filtration()
Dimension 2
^^^^^^^^^^^
@@ -203,9 +206,3 @@ the program output is:
[4, 5, 6] -> 22.74
[3, 6] -> 30.25
-CGAL citations
---------------
-
-.. bibliography:: ../../biblio/how_to_cite_cgal.bib
- :filter: docname in docnames
- :style: unsrt
diff --git a/src/python/doc/bottleneck_distance_sum.inc b/src/python/doc/bottleneck_distance_sum.inc
index 0de4625c..77dc368d 100644
--- a/src/python/doc/bottleneck_distance_sum.inc
+++ b/src/python/doc/bottleneck_distance_sum.inc
@@ -1,14 +1,14 @@
.. table::
:widths: 30 40 30
- +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+
- | .. figure:: | Bottleneck distance measures the similarity between two persistence | :Author: François Godi |
- | ../../doc/Bottleneck_distance/perturb_pd.png | diagrams. It's the shortest distance b for which there exists a | |
- | :figclass: align-center | perfect matching between the points of the two diagrams (+ all the | :Since: GUDHI 2.0.0 |
- | | diagonal points) such that any couple of matched points are at | |
- | Bottleneck distance is the length of | distance at most b, where the distance between points is the sup | :License: MIT (`GPL v3 </licensing/>`_) |
- | the longest edge | norm in :math:`\mathbb{R}^2`. | |
- | | | :Requires: `CGAL <installation.html#cgal>`__ :math:`\geq` 4.11.0 |
- +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+
- | * :doc:`bottleneck_distance_user` | |
- +-----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+
+ +-----------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------------------------------------------+
+ | .. figure:: | Bottleneck distance measures the similarity between two persistence | :Author: François Godi |
+ | ../../doc/Bottleneck_distance/perturb_pd.png | diagrams. It's the shortest distance b for which there exists a | |
+ | :figclass: align-center | perfect matching between the points of the two diagrams (+ all the | :Since: GUDHI 2.0.0 |
+ | | diagonal points) such that any couple of matched points are at | |
+ | Bottleneck distance is the length of | distance at most b, where the distance between points is the sup | :License: MIT (`GPL v3 </licensing/>`_) |
+ | the longest edge | norm in :math:`\mathbb{R}^2`. | |
+ | | | :Requires: `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0 |
+ +-----------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------------------------------------------+
+ | * :doc:`bottleneck_distance_user` | |
+ +-----------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/bottleneck_distance_user.rst b/src/python/doc/bottleneck_distance_user.rst
index 206fcb63..89da89d3 100644
--- a/src/python/doc/bottleneck_distance_user.rst
+++ b/src/python/doc/bottleneck_distance_user.rst
@@ -66,9 +66,3 @@ The output is:
Bottleneck distance approximation = 0.81
Bottleneck distance value = 0.75
-Bibliography
-------------
-
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docname in docnames
- :style: unsrt
diff --git a/src/python/doc/cubical_complex_user.rst b/src/python/doc/cubical_complex_user.rst
index e8c94bf6..3fd4e27a 100644
--- a/src/python/doc/cubical_complex_user.rst
+++ b/src/python/doc/cubical_complex_user.rst
@@ -91,7 +91,7 @@ Currently one input from a text file is used. It uses a format inspired from the
we allow any filtration values. As a consequence one cannot use ``-1``'s to indicate missing cubes. If you have
missing cubes in your complex, please set their filtration to :math:`+\infty` (aka. ``inf`` in the file).
-The file format is described in details in :ref:`Perseus file format` file format section.
+The file format is described in details in `Perseus file format <fileformats.html#perseus>`_ section.
.. testcode::
@@ -120,7 +120,7 @@ conditions are imposed in all directions, then complex :math:`\mathcal{K}` becam
various constructors from the file Bitmap_cubical_complex_periodic_boundary_conditions_base.h to construct cubical
complex with periodic boundary conditions.
-One can also use Perseus style input files (see :doc:`Perseus <fileformats>`) for the specific periodic case:
+One can also use Perseus style input files (see `Perseus file format <fileformats.html#perseus>`_) for the specific periodic case:
.. testcode::
@@ -158,10 +158,3 @@ Examples.
---------
End user programs are available in python/example/ folder.
-
-Bibliography
-------------
-
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docname in docnames
- :style: unsrt
diff --git a/src/python/doc/fileformats.rst b/src/python/doc/fileformats.rst
index 345dfdba..ae1b00f3 100644
--- a/src/python/doc/fileformats.rst
+++ b/src/python/doc/fileformats.rst
@@ -80,8 +80,6 @@ Here is a simple sample file in the 3D case::
1. 1. 1.
-.. _Perseus file format:
-
Perseus
*******
diff --git a/src/python/doc/index.rst b/src/python/doc/index.rst
index c153cdfc..13e51047 100644
--- a/src/python/doc/index.rst
+++ b/src/python/doc/index.rst
@@ -86,10 +86,3 @@ Point cloud utilities
*********************
.. include:: point_cloud_sum.inc
-
-Bibliography
-************
-
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docname in docnames
- :style: unsrt
diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst
index 48425d5e..de09c5b3 100644
--- a/src/python/doc/installation.rst
+++ b/src/python/doc/installation.rst
@@ -12,8 +12,8 @@ The easiest way to install the Python version of GUDHI is using
Compiling
*********
-The library uses c++14 and requires `Boost <https://www.boost.org/>`_ ≥ 1.56.0,
-`CMake <https://www.cmake.org/>`_ ≥ 3.1 to generate makefiles,
+The library uses c++14 and requires `Boost <https://www.boost.org/>`_ :math:`\geq` 1.56.0,
+`CMake <https://www.cmake.org/>`_ :math:`\geq` 3.1 to generate makefiles,
`NumPy <http://numpy.org>`_, `Cython <https://www.cython.org/>`_ and
`pybind11 <https://github.com/pybind/pybind11>`_ to compile
the GUDHI Python module.
@@ -21,7 +21,7 @@ It is a multi-platform library and compiles on Linux, Mac OSX and Visual
Studio 2017.
On `Windows <https://wiki.python.org/moin/WindowsCompilers>`_ , only Python
-≥ 3.5 are available because of the required Visual Studio version.
+:math:`\geq` 3.5 are available because of the required Visual Studio version.
On other systems, if you have several Python/python installed, the version 2.X
will be used by default, but you can force it by adding
@@ -30,7 +30,8 @@ will be used by default, but you can force it by adding
GUDHI Python module compilation
===============================
-To build the GUDHI Python module, run the following commands in a terminal:
+After making sure that the `Compilation dependencies`_ are properly installed,
+one can build the GUDHI Python module, by running the following commands in a terminal:
.. code-block:: bash
@@ -188,8 +189,14 @@ Run the following commands in a terminal:
Optional third-party library
****************************
+Compilation dependencies
+========================
+
+These third party dependencies are detected by `CMake <https://www.cmake.org/>`_.
+They have to be installed before performing the `GUDHI Python module compilation`_.
+
CGAL
-====
+----
Some GUDHI modules (cf. :doc:`modules list </index>`), and few examples
require `CGAL <https://www.cgal.org/>`_, a C++ library that provides easy
@@ -200,7 +207,7 @@ The procedure to install this library
according to your operating system is detailed
`here <http://doc.cgal.org/latest/Manual/installation.html>`_.
-The following examples requires CGAL version ≥ 4.11.0:
+The following examples require CGAL version :math:`\geq` 4.11.0:
.. only:: builder_html
@@ -212,14 +219,14 @@ The following examples requires CGAL version ≥ 4.11.0:
* :download:`euclidean_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py>`
Eigen
-=====
+-----
Some GUDHI modules (cf. :doc:`modules list </index>`), and few examples
require `Eigen <http://eigen.tuxfamily.org/>`_, a C++ template
library for linear algebra: matrices, vectors, numerical solvers, and related
algorithms.
-The following examples require `Eigen <http://eigen.tuxfamily.org/>`_ version ≥ 3.1.0:
+The following examples require `Eigen <http://eigen.tuxfamily.org/>`_ version :math:`\geq` 3.1.0:
.. only:: builder_html
@@ -229,8 +236,39 @@ The following examples require `Eigen <http://eigen.tuxfamily.org/>`_ version â‰
* :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>`
* :download:`euclidean_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py>`
+Threading Building Blocks
+-------------------------
+
+`Intel® TBB <https://www.threadingbuildingblocks.org/>`_ lets you easily write
+parallel C++ programs that take full advantage of multicore performance, that
+are portable and composable, and that have future-proof scalability.
+
+Having Intel® TBB installed is recommended to parallelize and accelerate some
+GUDHI computations.
+
+Run time dependencies
+=====================
+
+These third party dependencies are detected by Python `import` mechanism at run time.
+They can be installed when required.
+
+EagerPy
+-------
+
+Some Python functions can handle automatic differentiation (possibly only when
+a flag `enable_autodiff=True` is used). In order to reduce code duplication, we
+use `EagerPy <https://eagerpy.jonasrauber.de/>`_ which wraps arrays from
+PyTorch, TensorFlow and JAX in a common interface.
+
+Hnswlib
+-------
+
+:class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package
+`Hnswlib <https://github.com/nmslib/hnswlib>`_ as a backend if explicitly
+requested, to speed-up queries.
+
Matplotlib
-==========
+----------
The :doc:`persistence graphical tools </persistence_graphical_tools_user>`
module requires `Matplotlib <http://matplotlib.org>`_, a Python 2D plotting
@@ -251,37 +289,47 @@ The following examples require the `Matplotlib <http://matplotlib.org>`_:
* :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>`
* :download:`euclidean_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py>`
+PyKeOps
+-------
+
+:class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package
+`PyKeOps <https://www.kernel-operations.io/keops/python/>`_ as a backend if
+explicitly requested, to speed-up queries using a GPU.
+
Python Optimal Transport
-========================
+------------------------
The :doc:`Wasserstein distance </wasserstein_distance_user>`
module requires `POT <https://pot.readthedocs.io/>`_, a library that provides
several solvers for optimization problems related to Optimal Transport.
+PyTorch
+-------
+
+`PyTorch <https://pytorch.org/>`_ is currently only used as a dependency of
+`PyKeOps`_, and in some tests.
+
Scikit-learn
-============
+------------
The :doc:`persistence representations </representations>` module require
`scikit-learn <https://scikit-learn.org/>`_, a Python-based ecosystem of
open-source software for machine learning.
+:class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package
+`scikit-learn <https://scikit-learn.org/>`_ as a backend if explicitly
+requested.
+
SciPy
-=====
+-----
The :doc:`persistence graphical tools </persistence_graphical_tools_user>` and
:doc:`Wasserstein distance </wasserstein_distance_user>` modules require `SciPy
<http://scipy.org>`_, a Python-based ecosystem of open-source software for
mathematics, science, and engineering.
-Threading Building Blocks
-=========================
-
-`Intel® TBB <https://www.threadingbuildingblocks.org/>`_ lets you easily write
-parallel C++ programs that take full advantage of multicore performance, that
-are portable and composable, and that have future-proof scalability.
-
-Having Intel® TBB installed is recommended to parallelize and accelerate some
-GUDHI computations.
+:class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package
+`SciPy <http://scipy.org>`_ as a backend if explicitly requested.
Bug reports and contributions
*****************************
diff --git a/src/python/doc/nerve_gic_complex_ref.rst b/src/python/doc/nerve_gic_complex_ref.rst
index 6a81b7af..abde2e8c 100644
--- a/src/python/doc/nerve_gic_complex_ref.rst
+++ b/src/python/doc/nerve_gic_complex_ref.rst
@@ -12,10 +12,3 @@ Cover complexes reference manual
:show-inheritance:
.. automethod:: gudhi.CoverComplex.__init__
-
-Bibliography
-------------
-
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docname in docnames
- :style: unsrt
diff --git a/src/python/doc/nerve_gic_complex_sum.inc b/src/python/doc/nerve_gic_complex_sum.inc
index 7fe55aff..7db6c124 100644
--- a/src/python/doc/nerve_gic_complex_sum.inc
+++ b/src/python/doc/nerve_gic_complex_sum.inc
@@ -1,16 +1,16 @@
.. table::
:widths: 30 40 30
- +----------------------------------------------------------------+------------------------------------------------------------------------+------------------------------------------------------------------+
- | .. figure:: | Nerves and Graph Induced Complexes are cover complexes, i.e. | :Author: Mathieu Carrière |
- | ../../doc/Nerve_GIC/gicvisu.jpg | simplicial complexes that provably contain topological information | |
- | :alt: Graph Induced Complex of a point cloud. | about the input data. They can be computed with a cover of the data, | :Since: GUDHI 2.3.0 |
- | :figclass: align-center | that comes i.e. from the preimage of a family of intervals covering | |
- | | the image of a scalar-valued function defined on the data. | :License: MIT (`GPL v3 </licensing/>`_) |
- | | | |
- | | | :Requires: `CGAL <installation.html#cgal>`__ :math:`\geq` 4.11.0 |
- | | | |
- | | | |
- +----------------------------------------------------------------+------------------------------------------------------------------------+------------------------------------------------------------------+
- | * :doc:`nerve_gic_complex_user` | * :doc:`nerve_gic_complex_ref` |
- +----------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------+
+ +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------+
+ | .. figure:: | Nerves and Graph Induced Complexes are cover complexes, i.e. | :Author: Mathieu Carrière |
+ | ../../doc/Nerve_GIC/gicvisu.jpg | simplicial complexes that provably contain topological information | |
+ | :alt: Graph Induced Complex of a point cloud. | about the input data. They can be computed with a cover of the data, | :Since: GUDHI 2.3.0 |
+ | :figclass: align-center | that comes i.e. from the preimage of a family of intervals covering | |
+ | | the image of a scalar-valued function defined on the data. | :License: MIT (`GPL v3 </licensing/>`_) |
+ | | | |
+ | | | :Requires: `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0 |
+ | | | |
+ | | | |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------+
+ | * :doc:`nerve_gic_complex_user` | * :doc:`nerve_gic_complex_ref` |
+ +----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/nerve_gic_complex_user.rst b/src/python/doc/nerve_gic_complex_user.rst
index f709ce91..0e67fc78 100644
--- a/src/python/doc/nerve_gic_complex_user.rst
+++ b/src/python/doc/nerve_gic_complex_user.rst
@@ -13,7 +13,7 @@ Visualizations of the simplicial complexes can be done with either
neato (from `graphviz <http://www.graphviz.org/>`_),
`geomview <http://www.geomview.org/>`_,
`KeplerMapper <https://github.com/MLWave/kepler-mapper>`_.
-Input point clouds are assumed to be OFF files (cf. :doc:`fileformats`).
+Input point clouds are assumed to be OFF files (cf. `OFF file format <fileformats.html#off-file-format>`_).
Covers
------
@@ -313,10 +313,3 @@ the program outputs again SC.dot which gives the following visualization after u
:alt: Visualization with neato
Visualization with neato
-
-Bibliography
-------------
-
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docname in docnames
- :style: unsrt
diff --git a/src/python/doc/persistence_graphical_tools_sum.inc b/src/python/doc/persistence_graphical_tools_sum.inc
index b68d3d7e..7ff63ae2 100644
--- a/src/python/doc/persistence_graphical_tools_sum.inc
+++ b/src/python/doc/persistence_graphical_tools_sum.inc
@@ -1,14 +1,14 @@
.. table::
:widths: 30 40 30
- +-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+
- | .. figure:: | These graphical tools comes on top of persistence results and allows | :Author: Vincent Rouvreau, Theo Lacombe |
- | img/graphical_tools_representation.png | the user to display easily persistence barcode, diagram or density. | |
- | | | :Since: GUDHI 2.0.0 |
- | | Note that these functions return the matplotlib axis, allowing | |
- | | for further modifications (title, aspect, etc.) | :License: MIT |
- | | | |
- | | | :Requires: matplotlib, numpy and scipy |
- +-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+
- | * :doc:`persistence_graphical_tools_user` | * :doc:`persistence_graphical_tools_ref` |
- +-----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------+
+ +-----------------------------------------------------------------+-----------------------------------------------------------------------+---------------------------------------------------------+
+ | .. figure:: | These graphical tools comes on top of persistence results and allows | :Author: Vincent Rouvreau, Theo Lacombe |
+ | img/graphical_tools_representation.png | the user to display easily persistence barcode, diagram or density. | |
+ | | | :Since: GUDHI 2.0.0 |
+ | | Note that these functions return the matplotlib axis, allowing | |
+ | | for further modifications (title, aspect, etc.) | :License: MIT |
+ | | | |
+ | | | :Requires: `Matplotlib <installation.html#matplotlib>`_ |
+ +-----------------------------------------------------------------+-----------------------------------------------------------------------+---------------------------------------------------------+
+ | * :doc:`persistence_graphical_tools_user` | * :doc:`persistence_graphical_tools_ref` |
+ +-----------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/persistence_graphical_tools_user.rst b/src/python/doc/persistence_graphical_tools_user.rst
index 91e52703..b5a38eb1 100644
--- a/src/python/doc/persistence_graphical_tools_user.rst
+++ b/src/python/doc/persistence_graphical_tools_user.rst
@@ -12,9 +12,6 @@ Definition
Show persistence as a barcode
-----------------------------
-.. note::
- this function requires matplotlib and numpy to be available
-
This function can display the persistence result as a barcode:
.. plot::
@@ -36,9 +33,6 @@ This function can display the persistence result as a barcode:
Show persistence as a diagram
-----------------------------
-.. note::
- this function requires matplotlib and numpy to be available
-
This function can display the persistence result as a diagram:
.. plot::
@@ -73,8 +67,7 @@ of shape (N x 2) encoding a persistence diagram (in a given dimension).
Persistence density
-------------------
-.. note::
- this function requires matplotlib, numpy and scipy to be available
+:Requires: `SciPy <installation.html#scipy>`_
If you want more information on a specific dimension, for instance:
diff --git a/src/python/doc/persistent_cohomology_sum.inc b/src/python/doc/persistent_cohomology_sum.inc
index 0effb50f..a1ff2eee 100644
--- a/src/python/doc/persistent_cohomology_sum.inc
+++ b/src/python/doc/persistent_cohomology_sum.inc
@@ -12,7 +12,7 @@
| | | |
| | Computation of persistent cohomology using the algorithm of | |
| | :cite:`DBLP:journals/dcg/SilvaMV11` and | |
- | | :cite:`DBLP:journals/corr/abs-1208-5018` and the Compressed | |
+ | | :cite:`DBLP:conf/compgeom/DeyFW14` and the Compressed | |
| | Annotation Matrix implementation of | |
| | :cite:`DBLP:conf/esa/BoissonnatDM13`. | |
| | | |
diff --git a/src/python/doc/persistent_cohomology_user.rst b/src/python/doc/persistent_cohomology_user.rst
index 506fa3a7..a3f294b2 100644
--- a/src/python/doc/persistent_cohomology_user.rst
+++ b/src/python/doc/persistent_cohomology_user.rst
@@ -21,7 +21,7 @@ Definition
Computation of persistent cohomology using the algorithm of :cite:`DBLP:journals/dcg/SilvaMV11` and
-:cite:`DBLP:journals/corr/abs-1208-5018` and the Compressed Annotation Matrix implementation of
+:cite:`DBLP:conf/compgeom/DeyFW14` and the Compressed Annotation Matrix implementation of
:cite:`DBLP:conf/esa/BoissonnatDM13`.
The theory of homology consists in attaching to a topological space a sequence of (homology) groups, capturing global
@@ -111,10 +111,3 @@ We provide several example files: run these examples with -h for details on thei
* :download:`rips_complex_diagram_persistence_from_distance_matrix_file_example.py <../example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py>`
* :download:`random_cubical_complex_persistence_example.py <../example/random_cubical_complex_persistence_example.py>`
* :download:`tangential_complex_plain_homology_from_off_file_example.py <../example/tangential_complex_plain_homology_from_off_file_example.py>`
-
-Bibliography
-------------
-
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docname in docnames
- :style: unsrt
diff --git a/src/python/doc/point_cloud.rst b/src/python/doc/point_cloud.rst
index c0d4b303..ffd8f85b 100644
--- a/src/python/doc/point_cloud.rst
+++ b/src/python/doc/point_cloud.rst
@@ -16,15 +16,32 @@ File Readers
Subsampling
-----------
+:Requires: `Eigen <installation.html#eigen>`_ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0
+
.. automodule:: gudhi.subsampling
:members:
:special-members:
:show-inheritance:
-TimeDelayEmbedding
-------------------
+Time Delay Embedding
+--------------------
.. autoclass:: gudhi.point_cloud.timedelay.TimeDelayEmbedding
:members:
:special-members: __call__
+K nearest neighbors
+-------------------
+
+.. automodule:: gudhi.point_cloud.knn
+ :members:
+ :undoc-members:
+ :special-members: __init__
+
+Distance to measure
+-------------------
+
+.. automodule:: gudhi.point_cloud.dtm
+ :members:
+ :undoc-members:
+ :special-members: __init__
diff --git a/src/python/doc/point_cloud_sum.inc b/src/python/doc/point_cloud_sum.inc
index 0a159680..f955c3ab 100644
--- a/src/python/doc/point_cloud_sum.inc
+++ b/src/python/doc/point_cloud_sum.inc
@@ -1,15 +1,12 @@
.. table::
:widths: 30 40 30
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+
- | | :math:`(x_1, x_2, \ldots, x_d)` | Utilities to process point clouds: read from file, subsample, etc. | :Author: Vincent Rouvreau |
- | | :math:`(y_1, y_2, \ldots, y_d)` | | |
- | | | :Since: GUDHI 2.0.0 |
- | | | |
- | | | :License: MIT (`GPL v3 </licensing/>`_) |
- | | Parts of this package require CGAL. | |
- | | | :Requires: `Eigen <installation.html#eigen>`__ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`__ :math:`\geq` 4.11.0 |
- | | | |
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+
- | * :doc:`point_cloud` |
- +----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ +-----------------------------------+---------------------------------------------------------------+-------------------------------------------------------------------+
+ | | :math:`(x_1, x_2, \ldots, x_d)` | Utilities to process point clouds: read from file, subsample, | :Authors: Vincent Rouvreau, Marc Glisse, Masatoshi Takenouchi |
+ | | :math:`(y_1, y_2, \ldots, y_d)` | find neighbors, embed time series in higher dimension, | |
+ | | estimate a density, etc. | :Since: GUDHI 2.0.0 |
+ | | | |
+ | | | :License: MIT (`GPL v3 </licensing/>`_, BSD-3-Clause, Apache-2.0) |
+ +-----------------------------------+---------------------------------------------------------------+-------------------------------------------------------------------+
+ | * :doc:`point_cloud` |
+ +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/representations.rst b/src/python/doc/representations.rst
index 11dcbcf9..041e3247 100644
--- a/src/python/doc/representations.rst
+++ b/src/python/doc/representations.rst
@@ -10,7 +10,7 @@ Representations manual
This module, originally available at https://github.com/MathieuCarriere/sklearn-tda and named sklearn_tda, aims at bridging the gap between persistence diagrams and machine learning, by providing implementations of most of the vector representations for persistence diagrams in the literature, in a scikit-learn format. More specifically, it provides tools, using the scikit-learn standard interface, to compute distances and kernels on persistence diagrams, and to convert these diagrams into vectors in Euclidean space.
-A diagram is represented as a numpy array of shape (n,2), as can be obtained from :func:`~gudhi.SimplexTree.persistence_intervals_in_dimension` for instance. Points at infinity are represented as a numpy array of shape (n,1), storing only the birth time.
+A diagram is represented as a numpy array of shape (n,2), as can be obtained from :func:`~gudhi.SimplexTree.persistence_intervals_in_dimension` for instance. Points at infinity are represented as a numpy array of shape (n,1), storing only the birth time. The classes in this module can handle several persistence diagrams at once. In that case, the diagrams are provided as a list of numpy arrays. Note that it is not necessary for the diagrams to have the same number of points, i.e., for the corresponding arrays to have the same number of rows: all classes can handle arrays with different shapes.
A small example is provided
diff --git a/src/python/doc/representations_sum.inc b/src/python/doc/representations_sum.inc
index eac89b9d..323a0920 100644
--- a/src/python/doc/representations_sum.inc
+++ b/src/python/doc/representations_sum.inc
@@ -1,14 +1,14 @@
.. table::
:widths: 30 40 30
- +------------------------------------------------------------------+----------------------------------------------------------------+-----------------------------------------------+
- | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière |
- | img/sklearn-tda.png | diagrams, compatible with scikit-learn. | |
- | | | :Since: GUDHI 3.1.0 |
- | | | |
- | | | :License: MIT |
- | | | |
- | | | :Requires: scikit-learn |
- +------------------------------------------------------------------+----------------------------------------------------------------+-----------------------------------------------+
- | * :doc:`representations` |
- +------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------+
+ +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------+
+ | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière |
+ | img/sklearn-tda.png | diagrams, compatible with scikit-learn. | |
+ | | | :Since: GUDHI 3.1.0 |
+ | | | |
+ | | | :License: MIT |
+ | | | |
+ | | | :Requires: `Scikit-learn <installation.html#scikit-learn>`_ |
+ +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------+
+ | * :doc:`representations` |
+ +------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/rips_complex_ref.rst b/src/python/doc/rips_complex_ref.rst
index 22b5616c..5f3e46c1 100644
--- a/src/python/doc/rips_complex_ref.rst
+++ b/src/python/doc/rips_complex_ref.rst
@@ -12,3 +12,16 @@ Rips complex reference manual
:show-inheritance:
.. automethod:: gudhi.RipsComplex.__init__
+
+.. _weighted-rips-complex-reference-manual:
+
+======================================
+Weighted Rips complex reference manual
+======================================
+
+.. autoclass:: gudhi.weighted_rips_complex.WeightedRipsComplex
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+ .. automethod:: gudhi.weighted_rips_complex.WeightedRipsComplex.__init__
diff --git a/src/python/doc/rips_complex_sum.inc b/src/python/doc/rips_complex_sum.inc
index 6feb74cd..f7580714 100644
--- a/src/python/doc/rips_complex_sum.inc
+++ b/src/python/doc/rips_complex_sum.inc
@@ -11,6 +11,9 @@
| | | |
| | This complex can be built from a point cloud and a distance function, | |
| | or from a distance matrix. | |
+ | | | |
+ | | Weighted Rips complex constructs a simplicial complex from a distance | |
+ | | matrix and weights on vertices. | |
+----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------+
| * :doc:`rips_complex_user` | * :doc:`rips_complex_ref` |
+----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/rips_complex_user.rst b/src/python/doc/rips_complex_user.rst
index c4bbcfb6..819568be 100644
--- a/src/python/doc/rips_complex_user.rst
+++ b/src/python/doc/rips_complex_user.rst
@@ -348,9 +348,53 @@ until dimension 1 - one skeleton graph in other words), the output is:
bottleneck distance and persistence graphical tool will not work properly,
this is a known issue.
-Bibliography
-------------
+Weighted Rips Complex
+---------------------
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docname in docnames
- :style: unsrt
+`WeightedRipsComplex <rips_complex_ref.html#weighted-rips-complex-reference-manual>`_ builds a simplicial complex from a distance matrix and weights on vertices.
+
+
+Example from a distance matrix and weights
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The following example computes the weighted Rips filtration associated with a distance matrix and weights on vertices.
+
+.. testcode::
+
+ from gudhi.weighted_rips_complex import WeightedRipsComplex
+ dist = [[], [1]]
+ weights = [1, 100]
+ w_rips = WeightedRipsComplex(distance_matrix=dist, weights=weights)
+ st = w_rips.create_simplex_tree(max_dimension=2)
+ print(list(st.get_filtration()))
+
+The output is:
+
+.. testoutput::
+
+ [([0], 2.0), ([1], 200.0), ([0, 1], 200.0)]
+
+Example from a point cloud combined with DistanceToMeasure
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Combining with DistanceToMeasure, one can compute the DTM-filtration of a point set, as in `this notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-DTM-filtrations.ipynb>`_.
+
+.. testcode::
+
+ import numpy as np
+ from scipy.spatial.distance import cdist
+ from gudhi.point_cloud.dtm import DistanceToMeasure
+ from gudhi.weighted_rips_complex import WeightedRipsComplex
+ pts = np.array([[2.0, 2.0], [0.0, 1.0], [3.0, 4.0]])
+ dist = cdist(pts,pts)
+ dtm = DistanceToMeasure(2, q=2, metric="precomputed")
+ r = dtm.fit_transform(dist)
+ w_rips = WeightedRipsComplex(distance_matrix=dist, weights=r)
+ st = w_rips.create_simplex_tree(max_dimension=2)
+ print(st.persistence())
+
+The output is:
+
+.. testoutput::
+
+ [(0, (3.1622776601683795, inf)), (0, (3.1622776601683795, 5.39834563766817)), (0, (3.1622776601683795, 5.39834563766817))]
diff --git a/src/python/doc/simplex_tree_ref.rst b/src/python/doc/simplex_tree_ref.rst
index 9eb8c199..46b2c1e5 100644
--- a/src/python/doc/simplex_tree_ref.rst
+++ b/src/python/doc/simplex_tree_ref.rst
@@ -8,7 +8,6 @@ Simplex tree reference manual
.. autoclass:: gudhi.SimplexTree
:members:
- :undoc-members:
:show-inheritance:
.. automethod:: gudhi.SimplexTree.__init__
diff --git a/src/python/doc/simplex_tree_user.rst b/src/python/doc/simplex_tree_user.rst
index 1b272c35..3df7617f 100644
--- a/src/python/doc/simplex_tree_user.rst
+++ b/src/python/doc/simplex_tree_user.rst
@@ -66,10 +66,3 @@ The output is:
([1, 2], 4.0)
([1], 0.0)
([2], 4.0)
-
-Bibliography
-------------
-
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docname in docnames
- :style: unsrt
diff --git a/src/python/doc/tangential_complex_sum.inc b/src/python/doc/tangential_complex_sum.inc
index 45ce2a66..22314a2d 100644
--- a/src/python/doc/tangential_complex_sum.inc
+++ b/src/python/doc/tangential_complex_sum.inc
@@ -1,14 +1,14 @@
.. table::
:widths: 30 40 30
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+
- | .. figure:: | A Tangential Delaunay complex is a simplicial complex designed to | :Author: Clément Jamin |
- | ../../doc/Tangential_complex/tc_examples.png | reconstruct a :math:`k`-dimensional manifold embedded in :math:`d`- | |
- | :figclass: align-center | dimensional Euclidean space. The input is a point sample coming from | :Since: GUDHI 2.0.0 |
- | | an unknown manifold. The running time depends only linearly on the | |
- | | extrinsic dimension :math:`d` and exponentially on the intrinsic | :License: MIT (`GPL v3 </licensing/>`_) |
- | | dimension :math:`k`. | |
- | | | :Requires: `Eigen <installation.html#eigen>`__ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`__ :math:`\geq` 4.11.0 |
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------+
- | * :doc:`tangential_complex_user` | * :doc:`tangential_complex_ref` |
- +----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+
+ | .. figure:: | A Tangential Delaunay complex is a simplicial complex designed to | :Author: Clément Jamin |
+ | ../../doc/Tangential_complex/tc_examples.png | reconstruct a :math:`k`-dimensional manifold embedded in :math:`d`- | |
+ | :figclass: align-center | dimensional Euclidean space. The input is a point sample coming from | :Since: GUDHI 2.0.0 |
+ | | an unknown manifold. The running time depends only linearly on the | |
+ | | extrinsic dimension :math:`d` and exponentially on the intrinsic | :License: MIT (`GPL v3 </licensing/>`_) |
+ | | dimension :math:`k`. | |
+ | | | :Requires: `Eigen <installation.html#eigen>`_ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0 |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+
+ | * :doc:`tangential_complex_user` | * :doc:`tangential_complex_ref` |
+ +----------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/tangential_complex_user.rst b/src/python/doc/tangential_complex_user.rst
index cf8199cc..3d45473b 100644
--- a/src/python/doc/tangential_complex_user.rst
+++ b/src/python/doc/tangential_complex_user.rst
@@ -194,11 +194,3 @@ The output is:
Tangential contains 4 vertices.
Inconsistencies has been fixed.
-
-
-Bibliography
-------------
-
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docname in docnames
- :style: unsrt
diff --git a/src/python/doc/wasserstein_distance_sum.inc b/src/python/doc/wasserstein_distance_sum.inc
index f9308e5e..c41de017 100644
--- a/src/python/doc/wasserstein_distance_sum.inc
+++ b/src/python/doc/wasserstein_distance_sum.inc
@@ -1,14 +1,12 @@
.. table::
:widths: 30 40 30
- +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+
- | .. figure:: | The q-Wasserstein distance measures the similarity between two | :Author: Theo Lacombe |
- | ../../doc/Bottleneck_distance/perturb_pd.png | persistence diagrams using the sum of all edges lengths (instead of | |
- | :figclass: align-center | the maximum). It allows to define sophisticated objects such as | :Since: GUDHI 3.1.0 |
- | | barycenters of a family of persistence diagrams. | |
- | | | :License: MIT |
- | | | |
- | | | :Requires: Python Optimal Transport (POT) :math:`\geq` 0.5.1 |
- +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+
- | * :doc:`wasserstein_distance_user` | |
- +-----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+
+ +-----------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------------------+
+ | .. figure:: | The q-Wasserstein distance measures the similarity between two | :Author: Theo Lacombe, Marc Glisse |
+ | ../../doc/Bottleneck_distance/perturb_pd.png | persistence diagrams using the sum of all edges lengths (instead of | |
+ | :figclass: align-center | the maximum). It allows to define sophisticated objects such as | :Since: GUDHI 3.1.0 |
+ | | barycenters of a family of persistence diagrams. | |
+ | | | :License: MIT, BSD-3-Clause |
+ +-----------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------------------+
+ | * :doc:`wasserstein_distance_user` | |
+ +-----------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst
index c24da74d..96ec7872 100644
--- a/src/python/doc/wasserstein_distance_user.rst
+++ b/src/python/doc/wasserstein_distance_user.rst
@@ -17,12 +17,21 @@ are measured in norm p, for :math:`1 \leq p \leq \infty`.
Distance Functions
------------------
-This first implementation uses the Python Optimal Transport library and is based
-on ideas from "Large Scale Computation of Means and Cluster for Persistence
+
+Optimal Transport
+*****************
+
+:Requires: `Python Optimal Transport <installation.html#python-optimal-transport>`_ (POT) :math:`\geq` 0.5.1
+
+This first implementation uses the `Python Optimal Transport <installation.html#python-optimal-transport>`_
+library and is based on ideas from "Large Scale Computation of Means and Cluster for Persistence
Diagrams via Optimal Transport" :cite:`10.5555/3327546.3327645`.
.. autofunction:: gudhi.wasserstein.wasserstein_distance
+Hera
+****
+
This other implementation comes from `Hera
<https://bitbucket.org/grey_narn/hera/src/master/>`_ (BSD-3-Clause) which is
based on "Geometry Helps to Compare Persistence Diagrams"
@@ -94,6 +103,8 @@ The output is:
Barycenters
-----------
+:Requires: `Python Optimal Transport <installation.html#python-optimal-transport>`_ (POT) :math:`\geq` 0.5.1
+
A Frechet mean (or barycenter) is a generalization of the arithmetic
mean in a non linear space such as the one of persistence diagrams.
Given a set of persistence diagrams :math:`\mu_1 \dots \mu_n`, it is
@@ -164,10 +175,3 @@ The output is:
[[0.27916667 0.55416667]
[0.7375 0.7625 ]
[0.2375 0.2625 ]]
-
-Bibliography
-------------
-
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docname in docnames
- :style: unsrt
diff --git a/src/python/doc/witness_complex_sum.inc b/src/python/doc/witness_complex_sum.inc
index 34d4df4a..4416fec0 100644
--- a/src/python/doc/witness_complex_sum.inc
+++ b/src/python/doc/witness_complex_sum.inc
@@ -1,18 +1,18 @@
.. table::
:widths: 30 40 30
- +-------------------------------------------------------------------+----------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+
- | .. figure:: | Witness complex :math:`Wit(W,L)` is a simplicial complex defined on | :Author: Siargey Kachanovich |
- | ../../doc/Witness_complex/Witness_complex_representation.png | two sets of points in :math:`\mathbb{R}^D`. | |
- | :alt: Witness complex representation | | :Since: GUDHI 2.0.0 |
- | :figclass: align-center | The data structure is described in | |
- | | :cite:`boissonnatmariasimplextreealgorithmica`. | :License: MIT (`GPL v3 </licensing/>`_ for Euclidean versions only) |
- | | | |
- | | | :Requires: `Eigen <installation.html#eigen>`__ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`__ :math:`\geq` 4.11.0 for Euclidean versions only |
- +-------------------------------------------------------------------+----------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------+
- | * :doc:`witness_complex_user` | * :doc:`witness_complex_ref` |
- | | * :doc:`strong_witness_complex_ref` |
- | | * :doc:`euclidean_witness_complex_ref` |
- | | * :doc:`euclidean_strong_witness_complex_ref` |
- +-------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ +-------------------------------------------------------------------+----------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+ | .. figure:: | Witness complex :math:`Wit(W,L)` is a simplicial complex defined on | :Author: Siargey Kachanovich |
+ | ../../doc/Witness_complex/Witness_complex_representation.png | two sets of points in :math:`\mathbb{R}^D`. | |
+ | :alt: Witness complex representation | | :Since: GUDHI 2.0.0 |
+ | :figclass: align-center | The data structure is described in | |
+ | | :cite:`boissonnatmariasimplextreealgorithmica`. | :License: MIT (`GPL v3 </licensing/>`_ for Euclidean versions only) |
+ | | | |
+ | | | :Requires: `Eigen <installation.html#eigen>`_ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0 for Euclidean versions only |
+ +-------------------------------------------------------------------+----------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+
+ | * :doc:`witness_complex_user` | * :doc:`witness_complex_ref` |
+ | | * :doc:`strong_witness_complex_ref` |
+ | | * :doc:`euclidean_witness_complex_ref` |
+ | | * :doc:`euclidean_strong_witness_complex_ref` |
+ +-------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/witness_complex_user.rst b/src/python/doc/witness_complex_user.rst
index 799f5444..08dcd288 100644
--- a/src/python/doc/witness_complex_user.rst
+++ b/src/python/doc/witness_complex_user.rst
@@ -126,10 +126,3 @@ Example2: Computing persistence using strong relaxed witness complex
Here is an example of constructing a strong witness complex filtration and computing persistence on it:
* :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>`
-
-Bibliography
-------------
-
-.. bibliography:: ../../biblio/bibliography.bib
- :filter: docname in docnames
- :style: unsrt
diff --git a/src/python/doc/zbibliography.rst b/src/python/doc/zbibliography.rst
new file mode 100644
index 00000000..e23fcf25
--- /dev/null
+++ b/src/python/doc/zbibliography.rst
@@ -0,0 +1,10 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+Bibliography
+------------
+
+.. bibliography:: ../../biblio/bibliography.bib
+ :style: plain
+
diff --git a/src/python/example/alpha_complex_from_points_example.py b/src/python/example/alpha_complex_from_points_example.py
index 73faf17c..465632eb 100755
--- a/src/python/example/alpha_complex_from_points_example.py
+++ b/src/python/example/alpha_complex_from_points_example.py
@@ -46,9 +46,6 @@ if simplex_tree.find([4]):
else:
print("[4] Not found...")
-# Some insertions, simplex_tree needs to initialize filtrations
-simplex_tree.initialize_filtration()
-
print("dimension=", simplex_tree.dimension())
print("filtrations=")
for simplex_with_filtration in simplex_tree.get_filtration():
diff --git a/src/python/example/alpha_rips_persistence_bottleneck_distance.py b/src/python/example/alpha_rips_persistence_bottleneck_distance.py
index f156826d..3e12b0d5 100755
--- a/src/python/example/alpha_rips_persistence_bottleneck_distance.py
+++ b/src/python/example/alpha_rips_persistence_bottleneck_distance.py
@@ -5,6 +5,7 @@ import argparse
import math
import errno
import os
+import numpy as np
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
which is released under MIT.
@@ -56,7 +57,7 @@ with open(args.file, "r") as f:
message = "Number of simplices=" + repr(rips_stree.num_simplices())
print(message)
- rips_diag = rips_stree.persistence()
+ rips_stree.compute_persistence()
print("##############################################################")
print("AlphaComplex creation from points read in a OFF file")
@@ -72,18 +73,13 @@ with open(args.file, "r") as f:
message = "Number of simplices=" + repr(alpha_stree.num_simplices())
print(message)
- alpha_diag = alpha_stree.persistence()
+ alpha_stree.compute_persistence()
max_b_distance = 0.0
for dim in range(args.max_dimension):
# Alpha persistence values needs to be transform because filtration
# values are alpha square values
- funcs = [math.sqrt, math.sqrt]
- alpha_intervals = []
- for interval in alpha_stree.persistence_intervals_in_dimension(dim):
- alpha_intervals.append(
- map(lambda func, value: func(value), funcs, interval)
- )
+ alpha_intervals = np.sqrt(alpha_stree.persistence_intervals_in_dimension(dim))
rips_intervals = rips_stree.persistence_intervals_in_dimension(dim)
bottleneck_distance = gudhi.bottleneck_distance(
diff --git a/src/python/example/diagram_vectorizations_distances_kernels.py b/src/python/example/diagram_vectorizations_distances_kernels.py
index 119072eb..c4a71a7a 100755
--- a/src/python/example/diagram_vectorizations_distances_kernels.py
+++ b/src/python/example/diagram_vectorizations_distances_kernels.py
@@ -9,26 +9,25 @@ from gudhi.representations import DiagramSelector, Clamping, Landscape, Silhouet
TopologicalVector, DiagramScaler, BirthPersistenceTransform,\
PersistenceImage, PersistenceWeightedGaussianKernel, Entropy, \
PersistenceScaleSpaceKernel, SlicedWassersteinDistance,\
- SlicedWassersteinKernel, BottleneckDistance, PersistenceFisherKernel
+ SlicedWassersteinKernel, BottleneckDistance, PersistenceFisherKernel, WassersteinDistance
-D = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.], [0., np.inf], [5., np.inf]])
-diags = [D]
+D1 = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.], [0., np.inf], [5., np.inf]])
-diags = DiagramSelector(use=True, point_type="finite").fit_transform(diags)
-diags = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diags)
-diags = DiagramScaler(use=True, scalers=[([1], Clamping(maximum=.9))]).fit_transform(diags)
+proc1 = DiagramSelector(use=True, point_type="finite")
+proc2 = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())])
+proc3 = DiagramScaler(use=True, scalers=[([1], Clamping(maximum=.9))])
+D1 = proc3(proc2(proc1(D1)))
-D = diags[0]
-plt.scatter(D[:,0],D[:,1])
+plt.scatter(D1[:,0], D1[:,1])
plt.plot([0.,1.],[0.,1.])
plt.title("Test Persistence Diagram for vector methods")
plt.show()
LS = Landscape(resolution=1000)
-L = LS.fit_transform(diags)
-plt.plot(L[0][:1000])
-plt.plot(L[0][1000:2000])
-plt.plot(L[0][2000:3000])
+L = LS(D1)
+plt.plot(L[:1000])
+plt.plot(L[1000:2000])
+plt.plot(L[2000:3000])
plt.title("Landscape")
plt.show()
@@ -36,50 +35,39 @@ def pow(n):
return lambda x: np.power(x[1]-x[0],n)
SH = Silhouette(resolution=1000, weight=pow(2))
-sh = SH.fit_transform(diags)
-plt.plot(sh[0])
+plt.plot(SH(D1))
plt.title("Silhouette")
plt.show()
BC = BettiCurve(resolution=1000)
-bc = BC.fit_transform(diags)
-plt.plot(bc[0])
+plt.plot(BC(D1))
plt.title("Betti Curve")
plt.show()
CP = ComplexPolynomial(threshold=-1, polynomial_type="T")
-cp = CP.fit_transform(diags)
-print("Complex polynomial is " + str(cp[0,:]))
+print("Complex polynomial is " + str(CP(D1)))
TV = TopologicalVector(threshold=-1)
-tv = TV.fit_transform(diags)
-print("Topological vector is " + str(tv[0,:]))
+print("Topological vector is " + str(TV(D1)))
PI = PersistenceImage(bandwidth=.1, weight=lambda x: x[1], im_range=[0,1,0,1], resolution=[100,100])
-pi = PI.fit_transform(diags)
-plt.imshow(np.flip(np.reshape(pi[0], [100,100]), 0))
+plt.imshow(np.flip(np.reshape(PI(D1), [100,100]), 0))
plt.title("Persistence Image")
plt.show()
ET = Entropy(mode="scalar")
-et = ET.fit_transform(diags)
-print("Entropy statistic is " + str(et[0,:]))
+print("Entropy statistic is " + str(ET(D1)))
ET = Entropy(mode="vector", normalized=False)
-et = ET.fit_transform(diags)
-plt.plot(et[0])
+plt.plot(ET(D1))
plt.title("Entropy function")
plt.show()
-D = np.array([[1.,5.],[3.,6.],[2.,7.]])
-diags2 = [D]
+D2 = np.array([[1.,5.],[3.,6.],[2.,7.]])
+D2 = proc3(proc2(proc1(D2)))
-diags2 = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diags2)
-
-D = diags[0]
-plt.scatter(D[:,0],D[:,1])
-D = diags2[0]
-plt.scatter(D[:,0],D[:,1])
+plt.scatter(D1[:,0], D1[:,1])
+plt.scatter(D2[:,0], D2[:,1])
plt.plot([0.,1.],[0.,1.])
plt.title("Test Persistence Diagrams for kernel methods")
plt.show()
@@ -88,46 +76,34 @@ def arctan(C,p):
return lambda x: C*np.arctan(np.power(x[1], p))
PWG = PersistenceWeightedGaussianKernel(bandwidth=1., kernel_approx=None, weight=arctan(1.,1.))
-X = PWG.fit(diags)
-Y = PWG.transform(diags2)
-print("PWG kernel is " + str(Y[0][0]))
+print("PWG kernel is " + str(PWG(D1, D2)))
PWG = PersistenceWeightedGaussianKernel(kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])), weight=arctan(1.,1.))
-X = PWG.fit(diags)
-Y = PWG.transform(diags2)
-print("Approximate PWG kernel is " + str(Y[0][0]))
+print("Approximate PWG kernel is " + str(PWG(D1, D2)))
PSS = PersistenceScaleSpaceKernel(bandwidth=1.)
-X = PSS.fit(diags)
-Y = PSS.transform(diags2)
-print("PSS kernel is " + str(Y[0][0]))
+print("PSS kernel is " + str(PSS(D1, D2)))
PSS = PersistenceScaleSpaceKernel(kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])))
-X = PSS.fit(diags)
-Y = PSS.transform(diags2)
-print("Approximate PSS kernel is " + str(Y[0][0]))
+print("Approximate PSS kernel is " + str(PSS(D1, D2)))
sW = SlicedWassersteinDistance(num_directions=100)
-X = sW.fit(diags)
-Y = sW.transform(diags2)
-print("SW distance is " + str(Y[0][0]))
+print("SW distance is " + str(sW(D1, D2)))
SW = SlicedWassersteinKernel(num_directions=100, bandwidth=1.)
-X = SW.fit(diags)
-Y = SW.transform(diags2)
-print("SW kernel is " + str(Y[0][0]))
+print("SW kernel is " + str(SW(D1, D2)))
+
+W = WassersteinDistance(order=2, internal_p=2, mode="pot")
+print("Wasserstein distance (POT) is " + str(W(D1, D2)))
+
+W = WassersteinDistance(order=2, internal_p=2, mode="hera", delta=0.0001)
+print("Wasserstein distance (hera) is " + str(W(D1, D2)))
W = BottleneckDistance(epsilon=.001)
-X = W.fit(diags)
-Y = W.transform(diags2)
-print("Bottleneck distance is " + str(Y[0][0]))
+print("Bottleneck distance is " + str(W(D1, D2)))
PF = PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1.)
-X = PF.fit(diags)
-Y = PF.transform(diags2)
-print("PF kernel is " + str(Y[0][0]))
+print("PF kernel is " + str(PF(D1, D2)))
PF = PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1., kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])))
-X = PF.fit(diags)
-Y = PF.transform(diags2)
-print("Approximate PF kernel is " + str(Y[0][0]))
+print("Approximate PF kernel is " + str(PF(D1, D2)))
diff --git a/src/python/example/simplex_tree_example.py b/src/python/example/simplex_tree_example.py
index 34833899..c4635dc5 100755
--- a/src/python/example/simplex_tree_example.py
+++ b/src/python/example/simplex_tree_example.py
@@ -42,7 +42,6 @@ print("simplices=")
for simplex_with_filtration in st.get_simplices():
print("(%s, %.2f)" % tuple(simplex_with_filtration))
-st.initialize_filtration()
print("filtration=")
for simplex_with_filtration in st.get_filtration():
print("(%s, %.2f)" % tuple(simplex_with_filtration))
diff --git a/src/python/gudhi/alpha_complex.pyx b/src/python/gudhi/alpha_complex.pyx
index e04dc652..d75e374a 100644
--- a/src/python/gudhi/alpha_complex.pyx
+++ b/src/python/gudhi/alpha_complex.pyx
@@ -27,11 +27,11 @@ __license__ = "GPL v3"
cdef extern from "Alpha_complex_interface.h" namespace "Gudhi":
cdef cppclass Alpha_complex_interface "Gudhi::alpha_complex::Alpha_complex_interface":
- Alpha_complex_interface(vector[vector[double]] points) except +
+ Alpha_complex_interface(vector[vector[double]] points) nogil except +
# bool from_file is a workaround for cython to find the correct signature
- Alpha_complex_interface(string off_file, bool from_file) except +
- vector[double] get_point(int vertex) except +
- void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, double max_alpha_square) except +
+ Alpha_complex_interface(string off_file, bool from_file) nogil except +
+ vector[double] get_point(int vertex) nogil except +
+ void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, double max_alpha_square) nogil except +
# AlphaComplex python interface
cdef class AlphaComplex:
@@ -70,6 +70,7 @@ cdef class AlphaComplex:
# The real cython constructor
def __cinit__(self, points = None, off_file = ''):
+ cdef vector[vector[double]] pts
if off_file:
if os.path.isfile(off_file):
self.thisptr = new Alpha_complex_interface(
@@ -80,7 +81,9 @@ cdef class AlphaComplex:
if points is None:
# Empty Alpha construction
points=[]
- self.thisptr = new Alpha_complex_interface(points)
+ pts = points
+ with nogil:
+ self.thisptr = new Alpha_complex_interface(pts)
def __dealloc__(self):
@@ -113,6 +116,8 @@ cdef class AlphaComplex:
:rtype: SimplexTree
"""
stree = SimplexTree()
+ cdef double mas = max_alpha_square
cdef intptr_t stree_int_ptr=stree.thisptr
- self.thisptr.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr, max_alpha_square)
+ with nogil:
+ self.thisptr.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr, mas)
return stree
diff --git a/src/python/gudhi/bottleneck.cc b/src/python/gudhi/bottleneck.cc
new file mode 100644
index 00000000..732cb9a8
--- /dev/null
+++ b/src/python/gudhi/bottleneck.cc
@@ -0,0 +1,50 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Marc Glisse
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <gudhi/Bottleneck.h>
+
+#include <pybind11_diagram_utils.h>
+
+double bottleneck(Dgm d1, Dgm d2, double epsilon)
+{
+ // I *think* the call to request() has to be before releasing the GIL.
+ auto diag1 = numpy_to_range_of_pairs(d1);
+ auto diag2 = numpy_to_range_of_pairs(d2);
+
+ py::gil_scoped_release release;
+
+ return Gudhi::persistence_diagram::bottleneck_distance(diag1, diag2, epsilon);
+}
+
+PYBIND11_MODULE(bottleneck, m) {
+ m.attr("__license__") = "GPL v3";
+ m.def("bottleneck_distance", &bottleneck,
+ py::arg("diagram_1"), py::arg("diagram_2"),
+ py::arg("e") = (std::numeric_limits<double>::min)(),
+ R"pbdoc(
+ This function returns the point corresponding to a given vertex.
+
+ :param diagram_1: The first diagram.
+ :type diagram_1: numpy array of shape (m,2)
+ :param diagram_2: The second diagram.
+ :type diagram_2: numpy array of shape (n,2)
+ :param e: If `e` is 0, this uses an expensive algorithm to compute the
+ exact distance.
+ If `e` is not 0, it asks for an additive `e`-approximation, and
+ currently also allows a small multiplicative error (the last 2 or 3
+ bits of the mantissa may be wrong). This version of the algorithm takes
+ advantage of the limited precision of `double` and is usually a lot
+ faster to compute, whatever the value of `e`.
+ Thus, by default, `e` is the smallest positive double.
+ :type e: float
+ :rtype: float
+ :returns: the bottleneck distance.
+ )pbdoc");
+}
diff --git a/src/python/gudhi/bottleneck.pyx b/src/python/gudhi/bottleneck.pyx
deleted file mode 100644
index af011e88..00000000
--- a/src/python/gudhi/bottleneck.pyx
+++ /dev/null
@@ -1,48 +0,0 @@
-# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
-# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
-# Author(s): Vincent Rouvreau
-#
-# Copyright (C) 2016 Inria
-#
-# Modification(s):
-# - YYYY/MM Author: Description of the modification
-
-from cython cimport numeric
-from libcpp.vector cimport vector
-from libcpp.utility cimport pair
-import os
-
-__author__ = "Vincent Rouvreau"
-__copyright__ = "Copyright (C) 2016 Inria"
-__license__ = "GPL v3"
-
-cdef extern from "Bottleneck_distance_interface.h" namespace "Gudhi::persistence_diagram":
- double bottleneck(vector[pair[double, double]], vector[pair[double, double]], double)
- double bottleneck(vector[pair[double, double]], vector[pair[double, double]])
-
-def bottleneck_distance(diagram_1, diagram_2, e=None):
- """This function returns the point corresponding to a given vertex.
-
- :param diagram_1: The first diagram.
- :type diagram_1: vector[pair[double, double]]
- :param diagram_2: The second diagram.
- :type diagram_2: vector[pair[double, double]]
- :param e: If `e` is 0, this uses an expensive algorithm to compute the
- exact distance.
- If `e` is not 0, it asks for an additive `e`-approximation, and
- currently also allows a small multiplicative error (the last 2 or 3
- bits of the mantissa may be wrong). This version of the algorithm takes
- advantage of the limited precision of `double` and is usually a lot
- faster to compute, whatever the value of `e`.
-
- Thus, by default, `e` is the smallest positive double.
- :type e: float
- :rtype: float
- :returns: the bottleneck distance.
- """
- if e is None:
- # Default value is the smallest double value (not 0, 0 is for exact version)
- return bottleneck(diagram_1, diagram_2)
- else:
- # Can be 0 for exact version
- return bottleneck(diagram_1, diagram_2, e)
diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx
index d5ad1266..28fbe3af 100644
--- a/src/python/gudhi/cubical_complex.pyx
+++ b/src/python/gudhi/cubical_complex.pyx
@@ -27,18 +27,20 @@ __license__ = "MIT"
cdef extern from "Cubical_complex_interface.h" namespace "Gudhi":
cdef cppclass Bitmap_cubical_complex_base_interface "Gudhi::Cubical_complex::Cubical_complex_interface<>":
- Bitmap_cubical_complex_base_interface(vector[unsigned] dimensions, vector[double] top_dimensional_cells)
- Bitmap_cubical_complex_base_interface(string perseus_file)
- int num_simplices()
- int dimension()
+ Bitmap_cubical_complex_base_interface(vector[unsigned] dimensions, vector[double] top_dimensional_cells) nogil
+ Bitmap_cubical_complex_base_interface(string perseus_file) nogil
+ int num_simplices() nogil
+ int dimension() nogil
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Cubical_complex::Cubical_complex_interface<>>":
- Cubical_complex_persistence_interface(Bitmap_cubical_complex_base_interface * st, bool persistence_dim_max)
- vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence)
- vector[int] betti_numbers()
- vector[int] persistent_betti_numbers(double from_value, double to_value)
- vector[pair[double,double]] intervals_in_dimension(int dimension)
+ Cubical_complex_persistence_interface(Bitmap_cubical_complex_base_interface * st, bool persistence_dim_max) nogil
+ void compute_persistence(int homology_coeff_field, double min_persistence) nogil
+ vector[pair[int, pair[double, double]]] get_persistence() nogil
+ vector[vector[int]] cofaces_of_cubical_persistence_pairs() nogil
+ vector[int] betti_numbers() nogil
+ vector[int] persistent_betti_numbers(double from_value, double to_value) nogil
+ vector[pair[double,double]] intervals_in_dimension(int dimension) nogil
# CubicalComplex python interface
cdef class CubicalComplex:
@@ -78,7 +80,7 @@ cdef class CubicalComplex:
perseus_file=''):
if ((dimensions is not None) and (top_dimensional_cells is not None)
and (perseus_file == '')):
- self.thisptr = new Bitmap_cubical_complex_base_interface(dimensions, top_dimensional_cells)
+ self._construct_from_cells(dimensions, top_dimensional_cells)
elif ((dimensions is None) and (top_dimensional_cells is not None)
and (perseus_file == '')):
top_dimensional_cells = np.array(top_dimensional_cells,
@@ -86,11 +88,11 @@ cdef class CubicalComplex:
order = 'F')
dimensions = top_dimensional_cells.shape
top_dimensional_cells = top_dimensional_cells.ravel(order='F')
- self.thisptr = new Bitmap_cubical_complex_base_interface(dimensions, top_dimensional_cells)
+ self._construct_from_cells(dimensions, top_dimensional_cells)
elif ((dimensions is None) and (top_dimensional_cells is None)
and (perseus_file != '')):
if os.path.isfile(perseus_file):
- self.thisptr = new Bitmap_cubical_complex_base_interface(perseus_file.encode('utf-8'))
+ self._construct_from_file(perseus_file.encode('utf-8'))
else:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
perseus_file)
@@ -99,6 +101,14 @@ cdef class CubicalComplex:
"top_dimensional_cells or from a Perseus-style file name.",
file=sys.stderr)
+ def _construct_from_cells(self, vector[unsigned] dimensions, vector[double] top_dimensional_cells):
+ with nogil:
+ self.thisptr = new Bitmap_cubical_complex_base_interface(dimensions, top_dimensional_cells)
+
+ def _construct_from_file(self, string filename):
+ with nogil:
+ self.thisptr = new Bitmap_cubical_complex_base_interface(filename)
+
def __dealloc__(self):
if self.thisptr != NULL:
del self.thisptr
@@ -129,8 +139,34 @@ cdef class CubicalComplex:
"""
return self.thisptr.dimension()
+ def compute_persistence(self, homology_coeff_field=11, min_persistence=0):
+ """This function computes the persistence of the complex, so it can be
+ accessed through :func:`persistent_betti_numbers`,
+ :func:`persistence_intervals_in_dimension`, etc. This function is
+ equivalent to :func:`persistence` when you do not want the list
+ :func:`persistence` returns.
+
+ :param homology_coeff_field: The homology coefficient field. Must be a
+ prime number
+ :type homology_coeff_field: int.
+ :param min_persistence: The minimum persistence value to take into
+ account (strictly greater than min_persistence). Default value is
+ 0.0.
+ Sets min_persistence to -1.0 to see all values.
+ :type min_persistence: float.
+ :returns: Nothing.
+ """
+ if self.pcohptr != NULL:
+ del self.pcohptr
+ assert self.__is_defined()
+ cdef int field = homology_coeff_field
+ cdef double minp = min_persistence
+ with nogil:
+ self.pcohptr = new Cubical_complex_persistence_interface(self.thisptr, 1)
+ self.pcohptr.compute_persistence(field, minp)
+
def persistence(self, homology_coeff_field=11, min_persistence=0):
- """This function returns the persistence of the complex.
+ """This function computes and returns the persistence of the complex.
:param homology_coeff_field: The homology coefficient field. Must be a
prime number
@@ -143,30 +179,75 @@ cdef class CubicalComplex:
:returns: list of pairs(dimension, pair(birth, death)) -- the
persistence of the complex.
"""
- if self.pcohptr != NULL:
- del self.pcohptr
- if self.thisptr != NULL:
- self.pcohptr = new Cubical_complex_persistence_interface(self.thisptr, True)
- cdef vector[pair[int, pair[double, double]]] persistence_result
- if self.pcohptr != NULL:
- persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence)
- return persistence_result
+ self.compute_persistence(homology_coeff_field, min_persistence)
+ return self.pcohptr.get_persistence()
+
+ def cofaces_of_persistence_pairs(self):
+ """A persistence interval is described by a pair of cells, one that creates the
+ feature and one that kills it. The filtration values of those 2 cells give coordinates
+ for a point in a persistence diagram, or a bar in a barcode. Structurally, in the
+ cubical complexes provided here, the filtration value of any cell is the minimum of the
+ filtration values of the maximal cells that contain it. Connecting persistence diagram
+ coordinates to the corresponding value in the input (i.e. the filtration values of
+ the top-dimensional cells) is useful for differentiation purposes.
+
+ This function returns a list of pairs of top-dimensional cells corresponding to
+ the persistence birth and death cells of the filtration. The cells are represented by
+ their indices in the input list of top-dimensional cells (and not their indices in the
+ internal datastructure that includes non-maximal cells). Note that when two adjacent
+ top-dimensional cells have the same filtration value, we arbitrarily return one of the two
+ when calling the function on one of their common faces.
+
+ :returns: The top-dimensional cells/cofaces of the positive and negative cells,
+ together with the corresponding homological dimension, in two lists of numpy arrays of integers.
+ The first list contains the regular persistence pairs, grouped by dimension.
+ It contains numpy arrays of shape [number_of_persistence_points, 2].
+ The indices of the arrays in the list correspond to the homological dimensions, and the
+ integers of each row in each array correspond to: (index of positive top-dimensional cell,
+ index of negative top-dimensional cell).
+ The second list contains the essential features, grouped by dimension.
+ It contains numpy arrays of shape [number_of_persistence_points, 1].
+ The indices of the arrays in the list correspond to the homological dimensions, and the
+ integers of each row in each array correspond to: (index of positive top-dimensional cell).
+ """
+
+ assert self.pcohptr != NULL, "compute_persistence() must be called before cofaces_of_persistence_pairs()"
+
+ cdef vector[vector[int]] persistence_result
+ output = [[],[]]
+ with nogil:
+ persistence_result = self.pcohptr.cofaces_of_cubical_persistence_pairs()
+ pr = np.array(persistence_result)
+
+ ess_ind = np.argwhere(pr[:,2] == -1)[:,0]
+ ess = pr[ess_ind]
+ max_h = max(ess[:,0])+1 if len(ess) > 0 else 0
+ for h in range(max_h):
+ hidxs = np.argwhere(ess[:,0] == h)[:,0]
+ output[1].append(ess[hidxs][:,1])
+
+ reg_ind = np.setdiff1d(np.array(range(len(pr))), ess_ind)
+ reg = pr[reg_ind]
+ max_h = max(reg[:,0])+1 if len(reg) > 0 else 0
+ for h in range(max_h):
+ hidxs = np.argwhere(reg[:,0] == h)[:,0]
+ output[0].append(reg[hidxs][:,1:])
+
+ return output
def betti_numbers(self):
"""This function returns the Betti numbers of the complex.
:returns: list of int -- The Betti numbers ([B0, B1, ..., Bn]).
- :note: betti_numbers function requires persistence function to be
+ :note: betti_numbers function requires :func:`compute_persistence` function to be
launched first.
:note: betti_numbers function always returns [1, 0, 0, ...] as infinity
filtration cubes are not removed from the complex.
"""
- cdef vector[int] bn_result
- if self.pcohptr != NULL:
- bn_result = self.pcohptr.betti_numbers()
- return bn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before betti_numbers()"
+ return self.pcohptr.betti_numbers()
def persistent_betti_numbers(self, from_value, to_value):
"""This function returns the persistent Betti numbers of the complex.
@@ -181,13 +262,11 @@ cdef class CubicalComplex:
:returns: list of int -- The persistent Betti numbers ([B0, B1, ...,
Bn]).
- :note: persistent_betti_numbers function requires persistence
+ :note: persistent_betti_numbers function requires :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[int] pbn_result
- if self.pcohptr != NULL:
- pbn_result = self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
- return pbn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistent_betti_numbers()"
+ return self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
def persistence_intervals_in_dimension(self, dimension):
"""This function returns the persistence intervals of the complex in a
@@ -198,13 +277,8 @@ cdef class CubicalComplex:
:returns: The persistence intervals.
:rtype: numpy array of dimension 2
- :note: intervals_in_dim function requires persistence function to be
+ :note: intervals_in_dim function requires :func:`compute_persistence` function to be
launched first.
"""
- cdef vector[pair[double,double]] intervals_result
- if self.pcohptr != NULL:
- intervals_result = self.pcohptr.intervals_in_dimension(dimension)
- else:
- print("intervals_in_dim function requires persistence function"
- " to be launched first.", file=sys.stderr)
- return np.array(intervals_result)
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()"
+ return np.array(self.pcohptr.intervals_in_dimension(dimension))
diff --git a/src/python/gudhi/hera.cc b/src/python/gudhi/hera.cc
index 0d562b4c..ea80a9a8 100644
--- a/src/python/gudhi/hera.cc
+++ b/src/python/gudhi/hera.cc
@@ -8,35 +8,20 @@
* - YYYY/MM Author: Description of the modification
*/
-#include <pybind11/pybind11.h>
-#include <pybind11/numpy.h>
-
-#include <boost/range/iterator_range.hpp>
-
#include <wasserstein.h> // Hera
-#include <array>
-
-namespace py = pybind11;
-typedef py::array_t<double, py::array::c_style | py::array::forcecast> Dgm;
+#include <pybind11_diagram_utils.h>
double wasserstein_distance(
Dgm d1, Dgm d2,
double wasserstein_power, double internal_p,
double delta)
{
- py::buffer_info buf1 = d1.request();
- py::buffer_info buf2 = d2.request();
- // shape (n,2) or (0) for empty
- if((buf1.ndim!=2 || buf1.shape[1]!=2) && (buf1.ndim!=1 || buf1.shape[0]!=0))
- throw std::runtime_error("Diagram 1 must be an array of size n x 2");
- if((buf2.ndim!=2 || buf2.shape[1]!=2) && (buf2.ndim!=1 || buf2.shape[0]!=0))
- throw std::runtime_error("Diagram 2 must be an array of size n x 2");
- typedef std::array<double, 2> Point;
- auto p1 = (Point*)buf1.ptr;
- auto p2 = (Point*)buf2.ptr;
- auto diag1 = boost::make_iterator_range(p1, p1+buf1.shape[0]);
- auto diag2 = boost::make_iterator_range(p2, p2+buf2.shape[0]);
+ // I *think* the call to request() has to be before releasing the GIL.
+ auto diag1 = numpy_to_range_of_pairs(d1);
+ auto diag2 = numpy_to_range_of_pairs(d2);
+
+ py::gil_scoped_release release;
hera::AuctionParams<double> params;
params.wasserstein_power = wasserstein_power;
diff --git a/src/python/gudhi/periodic_cubical_complex.pyx b/src/python/gudhi/periodic_cubical_complex.pyx
index fd08b976..d353d2af 100644
--- a/src/python/gudhi/periodic_cubical_complex.pyx
+++ b/src/python/gudhi/periodic_cubical_complex.pyx
@@ -24,18 +24,20 @@ __license__ = "MIT"
cdef extern from "Cubical_complex_interface.h" namespace "Gudhi":
cdef cppclass Periodic_cubical_complex_base_interface "Gudhi::Cubical_complex::Cubical_complex_interface<Gudhi::cubical_complex::Bitmap_cubical_complex_periodic_boundary_conditions_base<double>>":
- Periodic_cubical_complex_base_interface(vector[unsigned] dimensions, vector[double] top_dimensional_cells, vector[bool] periodic_dimensions)
- Periodic_cubical_complex_base_interface(string perseus_file)
- int num_simplices()
- int dimension()
+ Periodic_cubical_complex_base_interface(vector[unsigned] dimensions, vector[double] top_dimensional_cells, vector[bool] periodic_dimensions) nogil
+ Periodic_cubical_complex_base_interface(string perseus_file) nogil
+ int num_simplices() nogil
+ int dimension() nogil
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Periodic_cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Cubical_complex::Cubical_complex_interface<Gudhi::cubical_complex::Bitmap_cubical_complex_periodic_boundary_conditions_base<double>>>":
- Periodic_cubical_complex_persistence_interface(Periodic_cubical_complex_base_interface * st, bool persistence_dim_max)
- vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence)
- vector[int] betti_numbers()
- vector[int] persistent_betti_numbers(double from_value, double to_value)
- vector[pair[double,double]] intervals_in_dimension(int dimension)
+ Periodic_cubical_complex_persistence_interface(Periodic_cubical_complex_base_interface * st, bool persistence_dim_max) nogil
+ void compute_persistence(int homology_coeff_field, double min_persistence) nogil
+ vector[pair[int, pair[double, double]]] get_persistence() nogil
+ vector[vector[int]] cofaces_of_cubical_persistence_pairs() nogil
+ vector[int] betti_numbers() nogil
+ vector[int] persistent_betti_numbers(double from_value, double to_value) nogil
+ vector[pair[double,double]] intervals_in_dimension(int dimension) nogil
# PeriodicCubicalComplex python interface
cdef class PeriodicCubicalComplex:
@@ -79,9 +81,7 @@ cdef class PeriodicCubicalComplex:
periodic_dimensions=None, perseus_file=''):
if ((dimensions is not None) and (top_dimensional_cells is not None)
and (periodic_dimensions is not None) and (perseus_file == '')):
- self.thisptr = new Periodic_cubical_complex_base_interface(dimensions,
- top_dimensional_cells,
- periodic_dimensions)
+ self._construct_from_cells(dimensions, top_dimensional_cells, periodic_dimensions)
elif ((dimensions is None) and (top_dimensional_cells is not None)
and (periodic_dimensions is not None) and (perseus_file == '')):
top_dimensional_cells = np.array(top_dimensional_cells,
@@ -89,13 +89,11 @@ cdef class PeriodicCubicalComplex:
order = 'F')
dimensions = top_dimensional_cells.shape
top_dimensional_cells = top_dimensional_cells.ravel(order='F')
- self.thisptr = new Periodic_cubical_complex_base_interface(dimensions,
- top_dimensional_cells,
- periodic_dimensions)
+ self._construct_from_cells(dimensions, top_dimensional_cells, periodic_dimensions)
elif ((dimensions is None) and (top_dimensional_cells is None)
and (periodic_dimensions is None) and (perseus_file != '')):
if os.path.isfile(perseus_file):
- self.thisptr = new Periodic_cubical_complex_base_interface(perseus_file.encode('utf-8'))
+ self._construct_from_file(perseus_file.encode('utf-8'))
else:
print("file " + perseus_file + " not found.", file=sys.stderr)
else:
@@ -104,6 +102,14 @@ cdef class PeriodicCubicalComplex:
"top_dimensional_cells and periodic_dimensions or from "
"a Perseus-style file name.", file=sys.stderr)
+ def _construct_from_cells(self, vector[unsigned] dimensions, vector[double] top_dimensional_cells, vector[bool] periodic_dimensions):
+ with nogil:
+ self.thisptr = new Periodic_cubical_complex_base_interface(dimensions, top_dimensional_cells, periodic_dimensions)
+
+ def _construct_from_file(self, string filename):
+ with nogil:
+ self.thisptr = new Periodic_cubical_complex_base_interface(filename)
+
def __dealloc__(self):
if self.thisptr != NULL:
del self.thisptr
@@ -134,8 +140,34 @@ cdef class PeriodicCubicalComplex:
"""
return self.thisptr.dimension()
+ def compute_persistence(self, homology_coeff_field=11, min_persistence=0):
+ """This function computes the persistence of the complex, so it can be
+ accessed through :func:`persistent_betti_numbers`,
+ :func:`persistence_intervals_in_dimension`, etc. This function is
+ equivalent to :func:`persistence` when you do not want the list
+ :func:`persistence` returns.
+
+ :param homology_coeff_field: The homology coefficient field. Must be a
+ prime number
+ :type homology_coeff_field: int.
+ :param min_persistence: The minimum persistence value to take into
+ account (strictly greater than min_persistence). Default value is
+ 0.0.
+ Sets min_persistence to -1.0 to see all values.
+ :type min_persistence: float.
+ :returns: Nothing.
+ """
+ if self.pcohptr != NULL:
+ del self.pcohptr
+ assert self.__is_defined()
+ cdef int field = homology_coeff_field
+ cdef double minp = min_persistence
+ with nogil:
+ self.pcohptr = new Periodic_cubical_complex_persistence_interface(self.thisptr, 1)
+ self.pcohptr.compute_persistence(field, minp)
+
def persistence(self, homology_coeff_field=11, min_persistence=0):
- """This function returns the persistence of the complex.
+ """This function computes and returns the persistence of the complex.
:param homology_coeff_field: The homology coefficient field. Must be a
prime number
@@ -148,30 +180,73 @@ cdef class PeriodicCubicalComplex:
:returns: list of pairs(dimension, pair(birth, death)) -- the
persistence of the complex.
"""
- if self.pcohptr != NULL:
- del self.pcohptr
- if self.thisptr != NULL:
- self.pcohptr = new Periodic_cubical_complex_persistence_interface(self.thisptr, True)
- cdef vector[pair[int, pair[double, double]]] persistence_result
- if self.pcohptr != NULL:
- persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence)
- return persistence_result
+ self.compute_persistence(homology_coeff_field, min_persistence)
+ return self.pcohptr.get_persistence()
+
+ def cofaces_of_persistence_pairs(self):
+ """A persistence interval is described by a pair of cells, one that creates the
+ feature and one that kills it. The filtration values of those 2 cells give coordinates
+ for a point in a persistence diagram, or a bar in a barcode. Structurally, in the
+ cubical complexes provided here, the filtration value of any cell is the minimum of the
+ filtration values of the maximal cells that contain it. Connecting persistence diagram
+ coordinates to the corresponding value in the input (i.e. the filtration values of
+ the top-dimensional cells) is useful for differentiation purposes.
+
+ This function returns a list of pairs of top-dimensional cells corresponding to
+ the persistence birth and death cells of the filtration. The cells are represented by
+ their indices in the input list of top-dimensional cells (and not their indices in the
+ internal datastructure that includes non-maximal cells). Note that when two adjacent
+ top-dimensional cells have the same filtration value, we arbitrarily return one of the two
+ when calling the function on one of their common faces.
+
+ :returns: The top-dimensional cells/cofaces of the positive and negative cells,
+ together with the corresponding homological dimension, in two lists of numpy arrays of integers.
+ The first list contains the regular persistence pairs, grouped by dimension.
+ It contains numpy arrays of shape [number_of_persistence_points, 2].
+ The indices of the arrays in the list correspond to the homological dimensions, and the
+ integers of each row in each array correspond to: (index of positive top-dimensional cell,
+ index of negative top-dimensional cell).
+ The second list contains the essential features, grouped by dimension.
+ It contains numpy arrays of shape [number_of_persistence_points, 1].
+ The indices of the arrays in the list correspond to the homological dimensions, and the
+ integers of each row in each array correspond to: (index of positive top-dimensional cell).
+ """
+ assert self.pcohptr != NULL, "compute_persistence() must be called before cofaces_of_persistence_pairs()"
+ cdef vector[vector[int]] persistence_result
+
+ output = [[],[]]
+ with nogil:
+ persistence_result = self.pcohptr.cofaces_of_cubical_persistence_pairs()
+ pr = np.array(persistence_result)
+
+ ess_ind = np.argwhere(pr[:,2] == -1)[:,0]
+ ess = pr[ess_ind]
+ max_h = max(ess[:,0])+1 if len(ess) > 0 else 0
+ for h in range(max_h):
+ hidxs = np.argwhere(ess[:,0] == h)[:,0]
+ output[1].append(ess[hidxs][:,1])
+
+ reg_ind = np.setdiff1d(np.array(range(len(pr))), ess_ind)
+ reg = pr[reg_ind]
+ max_h = max(reg[:,0])+1 if len(reg) > 0 else 0
+ for h in range(max_h):
+ hidxs = np.argwhere(reg[:,0] == h)[:,0]
+ output[0].append(reg[hidxs][:,1:])
+ return output
def betti_numbers(self):
"""This function returns the Betti numbers of the complex.
:returns: list of int -- The Betti numbers ([B0, B1, ..., Bn]).
- :note: betti_numbers function requires persistence function to be
+ :note: betti_numbers function requires :func:`compute_persistence` function to be
launched first.
- :note: betti_numbers function always returns [1, 0, 0, ...] as infinity
+ :note: This function always returns the Betti numbers of a torus as infinity
filtration cubes are not removed from the complex.
"""
- cdef vector[int] bn_result
- if self.pcohptr != NULL:
- bn_result = self.pcohptr.betti_numbers()
- return bn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before betti_numbers()"
+ return self.pcohptr.betti_numbers()
def persistent_betti_numbers(self, from_value, to_value):
"""This function returns the persistent Betti numbers of the complex.
@@ -186,13 +261,11 @@ cdef class PeriodicCubicalComplex:
:returns: list of int -- The persistent Betti numbers ([B0, B1, ...,
Bn]).
- :note: persistent_betti_numbers function requires persistence
+ :note: persistent_betti_numbers function requires :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[int] pbn_result
- if self.pcohptr != NULL:
- pbn_result = self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
- return pbn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistent_betti_numbers()"
+ return self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
def persistence_intervals_in_dimension(self, dimension):
"""This function returns the persistence intervals of the complex in a
@@ -203,13 +276,8 @@ cdef class PeriodicCubicalComplex:
:returns: The persistence intervals.
:rtype: numpy array of dimension 2
- :note: intervals_in_dim function requires persistence function to be
+ :note: intervals_in_dim function requires :func:`compute_persistence` function to be
launched first.
"""
- cdef vector[pair[double,double]] intervals_result
- if self.pcohptr != NULL:
- intervals_result = self.pcohptr.intervals_in_dimension(dimension)
- else:
- print("intervals_in_dim function requires persistence function"
- " to be launched first.", file=sys.stderr)
- return np.array(intervals_result)
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()"
+ return np.array(self.pcohptr.intervals_in_dimension(dimension))
diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py
index cc3db467..6a74a6ca 100644
--- a/src/python/gudhi/persistence_graphical_tools.py
+++ b/src/python/gudhi/persistence_graphical_tools.py
@@ -72,11 +72,11 @@ def plot_persistence_barcode(
"""This function plots the persistence bar code from persistence values list
, a np.array of shape (N x 2) (representing a diagram
in a single homology dimension),
- or from a :doc:`persistence file <fileformats>`.
+ or from a `persistence diagram <fileformats.html#persistence-diagram>`_ file.
:param persistence: Persistence intervals values list. Can be grouped by dimension or not.
:type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death).
- :param persistence_file: A :doc:`persistence file <fileformats>` style name
+ :param persistence_file: A `persistence diagram <fileformats.html#persistence-diagram>`_ file style name
(reset persistence if both are set).
:type persistence_file: string
:param alpha: barcode transparency value (0.0 transparent through 1.0
@@ -109,9 +109,6 @@ def plot_persistence_barcode(
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
-
- persistence = _array_handler(persistence)
-
if persistence_file != "":
if path.isfile(persistence_file):
# Reset persistence
@@ -126,6 +123,8 @@ def plot_persistence_barcode(
print("file " + persistence_file + " not found.")
return None
+ persistence = _array_handler(persistence)
+
if max_barcodes != 1000:
print("Deprecated parameter. It has been replaced by max_intervals")
max_intervals = max_barcodes
@@ -214,11 +213,11 @@ def plot_persistence_diagram(
):
"""This function plots the persistence diagram from persistence values
list, a np.array of shape (N x 2) representing a diagram in a single
- homology dimension, or from a :doc:`persistence file <fileformats>`.
+ homology dimension, or from a `persistence diagram <fileformats.html#persistence-diagram>`_ file`.
:param persistence: Persistence intervals values list. Can be grouped by dimension or not.
:type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death).
- :param persistence_file: A :doc:`persistence file <fileformats>` style name
+ :param persistence_file: A `persistence diagram <fileformats.html#persistence-diagram>`_ file style name
(reset persistence if both are set).
:type persistence_file: string
:param alpha: plot transparency value (0.0 transparent through 1.0
@@ -255,8 +254,6 @@ def plot_persistence_diagram(
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
- persistence = _array_handler(persistence)
-
if persistence_file != "":
if path.isfile(persistence_file):
# Reset persistence
@@ -271,6 +268,8 @@ def plot_persistence_diagram(
print("file " + persistence_file + " not found.")
return None
+ persistence = _array_handler(persistence)
+
if max_plots != 1000:
print("Deprecated parameter. It has been replaced by max_intervals")
max_intervals = max_plots
@@ -369,17 +368,19 @@ def plot_persistence_density(
"""This function plots the persistence density from persistence
values list, np.array of shape (N x 2) representing a diagram
in a single homology dimension,
- or from a :doc:`persistence file <fileformats>`. Be
- aware that this function does not distinguish the dimension, it is
+ or from a `persistence diagram <fileformats.html#persistence-diagram>`_ file.
+ Be aware that this function does not distinguish the dimension, it is
up to you to select the required one. This function also does not handle
degenerate data set (scipy correlation matrix inversion can fail).
+ :Requires: `SciPy <installation.html#scipy>`_
+
:param persistence: Persistence intervals values list.
Can be grouped by dimension or not.
:type persistence: an array of (dimension, array of (birth, death))
or an array of (birth, death).
- :param persistence_file: A :doc:`persistence file <fileformats>`
- style name (reset persistence if both are set).
+ :param persistence_file: A `persistence diagram <fileformats.html#persistence-diagram>`_
+ file style name (reset persistence if both are set).
:type persistence_file: string
:param nbins: Evaluate a gaussian kde on a regular grid of nbins x
nbins over data extents (default is 300)
@@ -425,8 +426,6 @@ def plot_persistence_density(
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
- persistence = _array_handler(persistence)
-
if persistence_file != "":
if dimension is None:
# All dimension case
@@ -440,6 +439,7 @@ def plot_persistence_density(
return None
if len(persistence) > 0:
+ persistence = _array_handler(persistence)
persistence_dim = np.array(
[
(dim_interval[1][0], dim_interval[1][1])
diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py
new file mode 100644
index 00000000..55ac58e6
--- /dev/null
+++ b/src/python/gudhi/point_cloud/dtm.py
@@ -0,0 +1,179 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Marc Glisse
+#
+# Copyright (C) 2020 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+from .knn import KNearestNeighbors
+import numpy as np
+
+__author__ = "Marc Glisse"
+__copyright__ = "Copyright (C) 2020 Inria"
+__license__ = "MIT"
+
+
+class DistanceToMeasure:
+ """
+ Class to compute the distance to the empirical measure defined by a point set, as introduced in :cite:`dtm`.
+ """
+
+ def __init__(self, k, q=2, **kwargs):
+ """
+ Args:
+ k (int): number of neighbors (possibly including the point itself).
+ q (float): order used to compute the distance to measure. Defaults to 2.
+ kwargs: same parameters as :class:`~gudhi.point_cloud.knn.KNearestNeighbors`, except that
+ metric="neighbors" means that :func:`transform` expects an array with the distances
+ to the k nearest neighbors.
+ """
+ self.k = k
+ self.q = q
+ self.params = kwargs
+
+ def fit_transform(self, X, y=None):
+ return self.fit(X).transform(X)
+
+ def fit(self, X, y=None):
+ """
+ Args:
+ X (numpy.array): coordinates for mass points.
+ """
+ if self.params.setdefault("metric", "euclidean") != "neighbors":
+ self.knn = KNearestNeighbors(
+ self.k, return_index=False, return_distance=True, sort_results=False, **self.params
+ )
+ self.knn.fit(X)
+ return self
+
+ def transform(self, X):
+ """
+ Args:
+ X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed",
+ or distances to the k nearest neighbors if metric is "neighbors" (if the array has more
+ than k columns, the remaining ones are ignored).
+
+ Returns:
+ numpy.array: a 1-d array with, for each point of X, its distance to the measure defined
+ by the argument of :func:`fit`.
+ """
+ if self.params["metric"] == "neighbors":
+ distances = X[:, : self.k]
+ else:
+ distances = self.knn.transform(X)
+ distances = distances ** self.q
+ dtm = distances.sum(-1) / self.k
+ dtm = dtm ** (1.0 / self.q)
+ # We compute too many powers, 1/p in knn then q in dtm, 1/q in dtm then q or some log in the caller.
+ # Add option to skip the final root?
+ return dtm
+
+
+class DTMDensity:
+ """
+ Density estimator based on the distance to the empirical measure defined by a point set, as defined
+ in :cite:`dtmdensity`. Note that this implementation only renormalizes when asked, and the renormalization
+ only works for a Euclidean metric, so in other cases the total measure may not be 1.
+
+ .. note:: When the dimension is high, using it as an exponent can quickly lead to under- or overflows.
+ We recommend using a small fixed value instead in those cases, even if it won't have the same nice
+ theoretical properties as the dimension.
+ """
+
+ def __init__(self, k=None, weights=None, q=None, dim=None, normalize=False, n_samples=None, **kwargs):
+ """
+ Args:
+ k (int): number of neighbors (possibly including the point itself). Optional if it can be guessed
+ from weights or metric="neighbors".
+ weights (numpy.array): weights of each of the k neighbors, optional. They are supposed to sum to 1.
+ q (float): order used to compute the distance to measure. Defaults to dim.
+ dim (float): final exponent representing the dimension. Defaults to the dimension, and must be specified
+ when the dimension cannot be read from the input (metric is "neighbors" or "precomputed").
+ normalize (bool): normalize the density so it corresponds to a probability measure on â„ᵈ.
+ Only available for the Euclidean metric, defaults to False.
+ n_samples (int): number of sample points used for fitting. Only needed if `normalize` is True and
+ metric is "neighbors".
+ kwargs: same parameters as :class:`~gudhi.point_cloud.knn.KNearestNeighbors`, except that
+ metric="neighbors" means that :func:`transform` expects an array with the distances to
+ the k nearest neighbors.
+ """
+ if weights is None:
+ self.k = k
+ if k is None:
+ assert kwargs.get("metric") == "neighbors", 'Must specify k or weights, unless metric is "neighbors"'
+ self.weights = None
+ else:
+ self.weights = np.full(k, 1.0 / k)
+ else:
+ self.weights = weights
+ self.k = len(weights)
+ assert k is None or k == self.k, "k differs from the length of weights"
+ self.q = q
+ self.dim = dim
+ self.params = kwargs
+ self.normalize = normalize
+ self.n_samples = n_samples
+
+ def fit_transform(self, X, y=None):
+ return self.fit(X).transform(X)
+
+ def fit(self, X, y=None):
+ """
+ Args:
+ X (numpy.array): coordinates for mass points.
+ """
+ if self.params.setdefault("metric", "euclidean") != "neighbors":
+ self.knn = KNearestNeighbors(
+ self.k, return_index=False, return_distance=True, sort_results=False, **self.params
+ )
+ self.knn.fit(X)
+ if self.params["metric"] != "precomputed":
+ self.n_samples = len(X)
+ return self
+
+ def transform(self, X):
+ """
+ Args:
+ X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed",
+ or distances to the k nearest neighbors if metric is "neighbors" (if the array has more
+ than k columns, the remaining ones are ignored).
+ """
+ q = self.q
+ dim = self.dim
+ if dim is None:
+ assert self.params["metric"] not in {
+ "neighbors",
+ "precomputed",
+ }, "dim not specified and cannot guess the dimension"
+ dim = len(X[0])
+ if q is None:
+ q = dim
+ k = self.k
+ weights = self.weights
+ if self.params["metric"] == "neighbors":
+ distances = np.asarray(X)
+ if weights is None:
+ k = distances.shape[1]
+ weights = np.full(k, 1.0 / k)
+ else:
+ distances = distances[:, :k]
+ else:
+ distances = self.knn.transform(X)
+ distances = distances ** q
+ dtm = (distances * weights).sum(-1)
+ if self.normalize:
+ dtm /= (np.arange(1, k + 1) ** (q / dim) * weights).sum()
+ density = dtm ** (-dim / q)
+ if self.normalize:
+ import math
+
+ if self.params["metric"] == "precomputed":
+ self.n_samples = len(X[0])
+ # Volume of d-ball
+ Vd = math.pi ** (dim / 2) / math.gamma(dim / 2 + 1)
+ density /= self.n_samples * Vd
+ return density
+ # We compute too many powers, 1/p in knn then q in dtm, d/q in dtm then whatever in the caller.
+ # Add option to skip the final root?
diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py
new file mode 100644
index 00000000..4652fe80
--- /dev/null
+++ b/src/python/gudhi/point_cloud/knn.py
@@ -0,0 +1,332 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Marc Glisse
+#
+# Copyright (C) 2020 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy
+
+# TODO: https://github.com/facebookresearch/faiss
+
+__author__ = "Marc Glisse"
+__copyright__ = "Copyright (C) 2020 Inria"
+__license__ = "MIT"
+
+
+class KNearestNeighbors:
+ """
+ Class wrapping several implementations for computing the k nearest neighbors in a point set.
+
+ :Requires: `PyKeOps <installation.html#pykeops>`_, `SciPy <installation.html#scipy>`_,
+ `Scikit-learn <installation.html#scikit-learn>`_, and/or `Hnswlib <installation.html#hnswlib>`_
+ in function of the selected `implementation`.
+ """
+
+ def __init__(self, k, return_index=True, return_distance=False, metric="euclidean", **kwargs):
+ """
+ Args:
+ k (int): number of neighbors (possibly including the point itself).
+ return_index (bool): if True, return the index of each neighbor.
+ return_distance (bool): if True, return the distance to each neighbor.
+ implementation (str): choice of the library that does the real work.
+
+ * 'keops' for a brute-force, CUDA implementation through pykeops. Useful when the dimension becomes large (10+) but the number of points remains low (less than a million). Only "minkowski" and its aliases are supported.
+ * 'ckdtree' for scipy's cKDTree. Only "minkowski" and its aliases are supported.
+ * 'sklearn' for scikit-learn's NearestNeighbors. Note that this provides in particular an option algorithm="brute".
+ * 'hnsw' for hnswlib.Index. It can be very fast but does not provide guarantees. Only supports "euclidean" for now.
+ * None will try to select a sensible one (scipy if possible, scikit-learn otherwise).
+ metric (str): see `sklearn.neighbors.NearestNeighbors`.
+ eps (float): relative error when computing nearest neighbors with the cKDTree.
+ p (float): norm L^p on input points (including numpy.inf) if metric is "minkowski". Defaults to 2.
+ n_jobs (int): number of jobs to schedule for parallel processing of nearest neighbors on the CPU.
+ If -1 is given all processors are used. Default: 1.
+ sort_results (bool): if True, then distances and indices of each point are
+ sorted on return, so that the first column contains the closest points.
+ Otherwise, neighbors are returned in an arbitrary order. Defaults to True.
+ enable_autodiff (bool): if the input is a torch.tensor, jax.numpy.ndarray or tensorflow.Tensor, this
+ instructs the function to compute distances in a way that works with automatic differentiation.
+ This is experimental, not supported for all metrics, and requires the package EagerPy.
+ Defaults to False.
+ kwargs: additional parameters are forwarded to the backends.
+ """
+ self.k = k
+ self.return_index = return_index
+ self.return_distance = return_distance
+ self.metric = metric
+ self.params = kwargs
+ # canonicalize
+ if metric == "euclidean":
+ self.params["p"] = 2
+ self.metric = "minkowski"
+ elif metric == "manhattan":
+ self.params["p"] = 1
+ self.metric = "minkowski"
+ elif metric == "chebyshev":
+ self.params["p"] = numpy.inf
+ self.metric = "minkowski"
+ elif metric == "minkowski":
+ self.params["p"] = kwargs.get("p", 2)
+ if self.params.get("implementation") in {"keops", "ckdtree"}:
+ assert self.metric == "minkowski"
+ if self.params.get("implementation") == "hnsw":
+ assert self.metric == "minkowski" and self.params["p"] == 2
+ if not self.params.get("implementation"):
+ if self.metric == "minkowski":
+ self.params["implementation"] = "ckdtree"
+ else:
+ self.params["implementation"] = "sklearn"
+ if not return_distance:
+ self.params["enable_autodiff"] = False
+
+ def fit_transform(self, X, y=None):
+ return self.fit(X).transform(X)
+
+ def fit(self, X, y=None):
+ """
+ Args:
+ X (numpy.array): coordinates for reference points.
+ """
+ self.ref_points = X
+ if self.params.get("enable_autodiff", False):
+ import eagerpy as ep
+
+ X = ep.astensor(X)
+ if self.params["implementation"] != "keops" or not isinstance(X, ep.PyTorchTensor):
+ # I don't know a clever way to reuse a GPU tensor from tensorflow in pytorch
+ # without copying to/from the CPU.
+ X = X.numpy()
+ if self.params["implementation"] == "ckdtree":
+ # sklearn could handle this, but it is much slower
+ from scipy.spatial import cKDTree
+
+ self.kdtree = cKDTree(X)
+
+ if self.params["implementation"] == "sklearn" and self.metric != "precomputed":
+ # FIXME: sklearn badly handles "precomputed"
+ from sklearn.neighbors import NearestNeighbors
+
+ nargs = {
+ k: v for k, v in self.params.items() if k in {"p", "n_jobs", "metric_params", "algorithm", "leaf_size"}
+ }
+ self.nn = NearestNeighbors(self.k, metric=self.metric, **nargs)
+ self.nn.fit(X)
+
+ if self.params["implementation"] == "hnsw":
+ import hnswlib
+
+ self.graph = hnswlib.Index("l2", len(X[0])) # Actually returns squared distances
+ self.graph.init_index(
+ len(X), **{k: v for k, v in self.params.items() if k in {"ef_construction", "M", "random_seed"}}
+ )
+ n = self.params.get("num_threads")
+ if n is None:
+ n = self.params.get("n_jobs", 1)
+ self.params["num_threads"] = n
+ self.graph.add_items(X, num_threads=n)
+
+ return self
+
+ def transform(self, X):
+ """
+ Args:
+ X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed".
+
+ Returns:
+ numpy.array: if return_index, an array of shape (len(X), k) with the indices (in the argument
+ of :func:`fit`) of the k nearest neighbors to the points of X. If return_distance, an array of the
+ same shape with the distances to those neighbors. If both, a tuple with the two arrays, in this order.
+ """
+ if self.params.get("enable_autodiff", False):
+ # pykeops does not support autodiff for kmin yet, but when it does in the future,
+ # we may want a special path.
+ import eagerpy as ep
+
+ save_return_index = self.return_index
+ self.return_index = True
+ self.return_distance = False
+ self.params["enable_autodiff"] = False
+ try:
+ newX = ep.astensor(X)
+ if self.params["implementation"] != "keops" or (
+ not isinstance(newX, ep.PyTorchTensor) and not isinstance(newX, ep.NumPyTensor)
+ ):
+ newX = newX.numpy()
+ else:
+ newX = newX.raw
+ neighbors = self.transform(newX)
+ finally:
+ self.return_index = save_return_index
+ self.return_distance = True
+ self.params["enable_autodiff"] = True
+ # We can implement more later as needed
+ assert self.metric == "minkowski"
+ p = self.params["p"]
+ Y = ep.astensor(self.ref_points)
+ neighbor_pts = Y[
+ neighbors,
+ ]
+ diff = neighbor_pts - X[:, None, :]
+ if isinstance(diff, ep.PyTorchTensor):
+ # https://github.com/jonasrauber/eagerpy/issues/6
+ distances = ep.astensor(diff.raw.norm(p, -1))
+ else:
+ distances = diff.norms.lp(p, -1)
+ if self.return_index:
+ return neighbors, distances.raw
+ else:
+ return distances.raw
+
+ metric = self.metric
+ k = self.k
+
+ if metric == "precomputed":
+ # scikit-learn could handle that, but they insist on calling fit() with an unused square array, which is too unnatural.
+ if self.return_index:
+ n_jobs = self.params.get("n_jobs", 1)
+ # Supposedly numpy can be compiled with OpenMP and handle this, but nobody does that?!
+ if n_jobs == 1:
+ neighbors = numpy.argpartition(X, k - 1)[:, 0:k]
+ if self.params.get("sort_results", True):
+ X = numpy.take_along_axis(X, neighbors, axis=-1)
+ ngb_order = numpy.argsort(X, axis=-1)
+ neighbors = numpy.take_along_axis(neighbors, ngb_order, axis=-1)
+ else:
+ ngb_order = neighbors
+ if self.return_distance:
+ distances = numpy.take_along_axis(X, ngb_order, axis=-1)
+ return neighbors, distances
+ else:
+ return neighbors
+ else:
+ from joblib import Parallel, delayed, effective_n_jobs
+ from sklearn.utils import gen_even_slices
+
+ slices = gen_even_slices(len(X), effective_n_jobs(n_jobs))
+ parallel = Parallel(prefer="threads", n_jobs=n_jobs)
+ if self.params.get("sort_results", True):
+
+ def func(M):
+ neighbors = numpy.argpartition(M, k - 1)[:, 0:k]
+ Y = numpy.take_along_axis(M, neighbors, axis=-1)
+ ngb_order = numpy.argsort(Y, axis=-1)
+ return numpy.take_along_axis(neighbors, ngb_order, axis=-1)
+
+ else:
+
+ def func(M):
+ return numpy.argpartition(M, k - 1)[:, 0:k]
+
+ neighbors = numpy.concatenate(parallel(delayed(func)(X[s]) for s in slices))
+ if self.return_distance:
+ distances = numpy.take_along_axis(X, neighbors, axis=-1)
+ return neighbors, distances
+ else:
+ return neighbors
+ if self.return_distance:
+ n_jobs = self.params.get("n_jobs", 1)
+ if n_jobs == 1:
+ distances = numpy.partition(X, k - 1)[:, 0:k]
+ if self.params.get("sort_results"):
+ # partition is not guaranteed to sort the lower half, although it often does
+ distances.sort(axis=-1)
+ else:
+ from joblib import Parallel, delayed, effective_n_jobs
+ from sklearn.utils import gen_even_slices
+
+ if self.params.get("sort_results"):
+
+ def func(M):
+ # Not partitioning in place, because we should not modify the user's array?
+ r = numpy.partition(M, k - 1)[:, 0:k]
+ r.sort(axis=-1)
+ return r
+
+ else:
+ func = lambda M: numpy.partition(M, k - 1)[:, 0:k]
+ slices = gen_even_slices(len(X), effective_n_jobs(n_jobs))
+ parallel = Parallel(prefer="threads", n_jobs=n_jobs)
+ distances = numpy.concatenate(parallel(delayed(func)(X[s]) for s in slices))
+ return distances
+ return None
+
+ if self.params["implementation"] == "hnsw":
+ ef = self.params.get("ef")
+ if ef is not None:
+ self.graph.set_ef(ef)
+ neighbors, distances = self.graph.knn_query(X, k, num_threads=self.params["num_threads"])
+ # The k nearest neighbors are always sorted. I couldn't find it in the doc, but the code calls searchKnn,
+ # which returns a priority_queue, and then fills the return array backwards with top/pop on the queue.
+ if self.return_index:
+ if self.return_distance:
+ return neighbors, numpy.sqrt(distances)
+ else:
+ return neighbors
+ if self.return_distance:
+ return numpy.sqrt(distances)
+ return None
+
+ if self.params["implementation"] == "keops":
+ import torch
+ from pykeops.torch import LazyTensor
+
+ # 'float64' is slow except on super expensive GPUs. Allow it with some param?
+ XX = torch.as_tensor(X, dtype=torch.float32)
+ if X is self.ref_points:
+ YY = XX
+ else:
+ YY = torch.as_tensor(self.ref_points, dtype=torch.float32)
+ p = self.params["p"]
+ if p == numpy.inf:
+ # Requires pykeops 1.4 or later
+ mat = (LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])).abs().max(-1)
+ elif p == 2: # Any even integer?
+ mat = ((LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])) ** p).sum(-1)
+ else:
+ mat = ((LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])).abs() ** p).sum(-1)
+
+ if self.return_index:
+ if self.return_distance:
+ distances, neighbors = mat.Kmin_argKmin(k, dim=1)
+ if p != numpy.inf:
+ distances = distances ** (1.0 / p)
+ return neighbors, distances
+ else:
+ neighbors = mat.argKmin(k, dim=1)
+ return neighbors
+ if self.return_distance:
+ distances = mat.Kmin(k, dim=1)
+ if p != numpy.inf:
+ distances = distances ** (1.0 / p)
+ return distances
+ return None
+
+ if self.params["implementation"] == "ckdtree":
+ qargs = {key: val for key, val in self.params.items() if key in {"p", "eps", "n_jobs"}}
+ distances, neighbors = self.kdtree.query(X, k=self.k, **qargs)
+ if k == 1:
+ # SciPy decided to squeeze the last dimension for k=1
+ distances = distances[:, None]
+ neighbors = neighbors[:, None]
+ if self.return_index:
+ if self.return_distance:
+ return neighbors, distances
+ else:
+ return neighbors
+ if self.return_distance:
+ return distances
+ return None
+
+ assert self.params["implementation"] == "sklearn"
+ if self.return_distance:
+ distances, neighbors = self.nn.kneighbors(X, return_distance=True)
+ if self.return_index:
+ return neighbors, distances
+ else:
+ return distances
+ if self.return_index:
+ neighbors = self.nn.kneighbors(X, return_distance=False)
+ return neighbors
+ return None
diff --git a/src/python/gudhi/point_cloud/timedelay.py b/src/python/gudhi/point_cloud/timedelay.py
index f01df442..5292e752 100644
--- a/src/python/gudhi/point_cloud/timedelay.py
+++ b/src/python/gudhi/point_cloud/timedelay.py
@@ -10,9 +10,8 @@ import numpy as np
class TimeDelayEmbedding:
- """Point cloud transformation class.
- Embeds time-series data in the R^d according to [Takens' Embedding Theorem]
- (https://en.wikipedia.org/wiki/Takens%27s_theorem) and obtains the
+ """Point cloud transformation class. Embeds time-series data in the R^d according to
+ `Takens' Embedding Theorem <https://en.wikipedia.org/wiki/Takens%27s_theorem>`_ and obtains the
coordinates of each point.
Parameters
diff --git a/src/python/gudhi/representations/kernel_methods.py b/src/python/gudhi/representations/kernel_methods.py
index bfc83aff..596f4f07 100644
--- a/src/python/gudhi/representations/kernel_methods.py
+++ b/src/python/gudhi/representations/kernel_methods.py
@@ -9,13 +9,83 @@
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
-from sklearn.metrics import pairwise_distances
-from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance
+from sklearn.metrics import pairwise_distances, pairwise_kernels
+from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance, _sklearn_wrapper, pairwise_persistence_diagram_distances, _sliced_wasserstein_distance, _persistence_fisher_distance
+from .preprocessing import Padding
#############################################
# Kernel methods ############################
#############################################
+def _persistence_weighted_gaussian_kernel(D1, D2, weight=lambda x: 1, kernel_approx=None, bandwidth=1.):
+ """
+ This is a function for computing the persistence weighted Gaussian kernel value from two persistence diagrams. The persistence weighted Gaussian kernel is computed by convolving the persistence diagram points with weighted Gaussian kernels. See http://proceedings.mlr.press/v48/kusano16.html for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved
+ weight: weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y].
+ kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+
+ Returns:
+ float: the persistence weighted Gaussian kernel value between persistence diagrams.
+ """
+ ws1 = np.array([weight(D1[j,:]) for j in range(len(D1))])
+ ws2 = np.array([weight(D2[j,:]) for j in range(len(D2))])
+ if kernel_approx is not None:
+ approx1 = np.sum(np.multiply(ws1[:,np.newaxis], kernel_approx.transform(D1)), axis=0)
+ approx2 = np.sum(np.multiply(ws2[:,np.newaxis], kernel_approx.transform(D2)), axis=0)
+ return (1./(np.sqrt(2*np.pi)*bandwidth)) * np.matmul(approx1, approx2.T)
+ else:
+ W = np.matmul(ws1[:,np.newaxis], ws2[np.newaxis,:])
+ E = (1./(np.sqrt(2*np.pi)*bandwidth)) * np.exp(-np.square(pairwise_distances(D1,D2))/(2*bandwidth*bandwidth))
+ return np.sum(np.multiply(W, E))
+
+def _persistence_scale_space_kernel(D1, D2, kernel_approx=None, bandwidth=1.):
+ """
+ This is a function for computing the persistence scale space kernel value from two persistence diagrams. The persistence scale space kernel is computed by adding the symmetric to the diagonal of each point in each persistence diagram, with negative weight, and then convolving the points with a Gaussian kernel. See https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Reininghaus_A_Stable_Multi-Scale_2015_CVPR_paper.pdf for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved
+ kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+
+ Returns:
+ float: the persistence scale space kernel value between persistence diagrams.
+ """
+ DD1 = np.concatenate([D1, D1[:,[1,0]]], axis=0)
+ DD2 = np.concatenate([D2, D2[:,[1,0]]], axis=0)
+ weight_pss = lambda x: 1 if x[1] >= x[0] else -1
+ return 0.5 * _persistence_weighted_gaussian_kernel(DD1, DD2, weight=weight_pss, kernel_approx=kernel_approx, bandwidth=bandwidth)
+
+def pairwise_persistence_diagram_kernels(X, Y=None, kernel="sliced_wasserstein", **kwargs):
+ """
+ This function computes the kernel matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2).
+
+ Parameters:
+ X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams.
+ Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise kernel values are computed from the first list only.
+ kernel: kernel to use. It can be either a string ("sliced_wasserstein", "persistence_scale_space", "persistence_weighted_gaussian", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. If it is a function, make sure that it is symmetric.
+ **kwargs: optional keyword parameters. Any further parameters are passed directly to the kernel function. See the docs of the various kernel classes in this module.
+
+ Returns:
+ numpy array of shape (nxm): kernel matrix.
+ """
+ XX = np.reshape(np.arange(len(X)), [-1,1])
+ YY = None if Y is None else np.reshape(np.arange(len(Y)), [-1,1])
+ if kernel == "sliced_wasserstein":
+ return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="sliced_wasserstein", num_directions=kwargs["num_directions"]) / kwargs["bandwidth"])
+ elif kernel == "persistence_fisher":
+ return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="persistence_fisher", kernel_approx=kwargs["kernel_approx"], bandwidth=kwargs["bandwidth"]) / kwargs["bandwidth_fisher"])
+ elif kernel == "persistence_scale_space":
+ return pairwise_kernels(XX, YY, metric=_sklearn_wrapper(_persistence_scale_space_kernel, X, Y, **kwargs))
+ elif kernel == "persistence_weighted_gaussian":
+ return pairwise_kernels(XX, YY, metric=_sklearn_wrapper(_persistence_weighted_gaussian_kernel, X, Y, **kwargs))
+ else:
+ return pairwise_kernels(XX, YY, metric=_sklearn_wrapper(metric, **kwargs))
+
class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
"""
This is a class for computing the sliced Wasserstein kernel matrix from a list of persistence diagrams. The sliced Wasserstein kernel is computed by exponentiating the corresponding sliced Wasserstein distance with a Gaussian kernel. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
@@ -29,7 +99,7 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the kernel computation (default 10).
"""
self.bandwidth = bandwidth
- self.sw_ = SlicedWassersteinDistance(num_directions=num_directions)
+ self.num_directions = num_directions
def fit(self, X, y=None):
"""
@@ -39,7 +109,7 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.sw_.fit(X, y)
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -52,7 +122,20 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein kernel values.
"""
- return np.exp(-self.sw_.transform(X)/self.bandwidth)
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="sliced_wasserstein", bandwidth=self.bandwidth, num_directions=self.num_directions)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply SlicedWassersteinKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: sliced Wasserstein kernel value.
+ """
+ return np.exp(-_sliced_wasserstein_distance(diag1, diag2, num_directions=self.num_directions)) / self.bandwidth
class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
"""
@@ -78,10 +161,7 @@ class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.diagrams_ = list(X)
- self.ws_ = [ np.array([self.weight(self.diagrams_[i][j,:]) for j in range(self.diagrams_[i].shape[0])]) for i in range(len(self.diagrams_)) ]
- if self.kernel_approx is not None:
- self.approx_ = np.concatenate([np.sum(np.multiply(self.ws_[i][:,np.newaxis], self.kernel_approx.transform(self.diagrams_[i])), axis=0)[np.newaxis,:] for i in range(len(self.diagrams_))])
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -94,31 +174,20 @@ class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence weighted Gaussian kernel values.
"""
- Xp = list(X)
- Xfit = np.zeros((len(Xp), len(self.diagrams_)))
- if len(self.diagrams_) == len(Xp) and np.all([np.array_equal(self.diagrams_[i], Xp[i]) for i in range(len(Xp))]):
- if self.kernel_approx is not None:
- Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(self.approx_, self.approx_.T)
- else:
- for i in range(len(self.diagrams_)):
- for j in range(i+1, len(self.diagrams_)):
- W = np.matmul(self.ws_[i][:,np.newaxis], self.ws_[j][np.newaxis,:])
- E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(self.diagrams_[i], self.diagrams_[j]))/(2*np.square(self.bandwidth)))
- Xfit[i,j] = np.sum(np.multiply(W, E))
- Xfit[j,i] = Xfit[i,j]
- else:
- ws = [ np.array([self.weight(Xp[i][j,:]) for j in range(Xp[i].shape[0])]) for i in range(len(Xp)) ]
- if self.kernel_approx is not None:
- approx = np.concatenate([np.sum(np.multiply(ws[i][:,np.newaxis], self.kernel_approx.transform(Xp[i])), axis=0)[np.newaxis,:] for i in range(len(Xp))])
- Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(approx, self.approx_.T)
- else:
- for i in range(len(Xp)):
- for j in range(len(self.diagrams_)):
- W = np.matmul(ws[i][:,np.newaxis], self.ws_[j][np.newaxis,:])
- E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(Xp[i], self.diagrams_[j]))/(2*np.square(self.bandwidth)))
- Xfit[i,j] = np.sum(np.multiply(W, E))
-
- return Xfit
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_weighted_gaussian", bandwidth=self.bandwidth, weight=self.weight, kernel_approx=self.kernel_approx)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceWeightedGaussianKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence weighted Gaussian kernel value.
+ """
+ return _persistence_weighted_gaussian_kernel(diag1, diag2, weight=self.weight, kernel_approx=self.kernel_approx, bandwidth=self.bandwidth)
class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
"""
@@ -132,7 +201,7 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved (default 1.)
kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
"""
- self.pwg_ = PersistenceWeightedGaussianKernel(bandwidth=bandwidth, weight=lambda x: 1 if x[1] >= x[0] else -1, kernel_approx=kernel_approx)
+ self.bandwidth, self.kernel_approx = bandwidth, kernel_approx
def fit(self, X, y=None):
"""
@@ -142,11 +211,7 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.diagrams_ = list(X)
- for i in range(len(self.diagrams_)):
- op_D = self.diagrams_[i][:,[1,0]]
- self.diagrams_[i] = np.concatenate([self.diagrams_[i], op_D], axis=0)
- self.pwg_.fit(X)
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -159,11 +224,20 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence scale space kernel values.
"""
- Xp = list(X)
- for i in range(len(Xp)):
- op_X = Xp[i][:,[1,0]]
- Xp[i] = np.concatenate([Xp[i], op_X], axis=0)
- return self.pwg_.transform(Xp)
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_scale_space", bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceScaleSpaceKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence scale space kernel value.
+ """
+ return _persistence_scale_space_kernel(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
"""
@@ -179,7 +253,7 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
"""
self.bandwidth = bandwidth
- self.pf_ = PersistenceFisherDistance(bandwidth=bandwidth_fisher, kernel_approx=kernel_approx)
+ self.bandwidth_fisher, self.kernel_approx = bandwidth_fisher, kernel_approx
def fit(self, X, y=None):
"""
@@ -189,7 +263,7 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.pf_.fit(X, y)
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -202,5 +276,18 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher kernel values.
"""
- return np.exp(-self.pf_.transform(X)/self.bandwidth)
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_fisher", bandwidth=self.bandwidth, bandwidth_fisher=self.bandwidth_fisher, kernel_approx=self.kernel_approx)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceFisherKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence Fisher kernel value.
+ """
+ return np.exp(-_persistence_fisher_distance(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)) / self.bandwidth_fisher
diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py
index 5f9ec6ab..8a32f7e9 100644
--- a/src/python/gudhi/representations/metrics.py
+++ b/src/python/gudhi/representations/metrics.py
@@ -10,17 +10,168 @@
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import pairwise_distances
-try:
- from .. import bottleneck_distance
- USE_GUDHI = True
-except ImportError:
- USE_GUDHI = False
- print("Gudhi built without CGAL: BottleneckDistance will return a null matrix")
+from gudhi.hera import wasserstein_distance as hera_wasserstein_distance
+from .preprocessing import Padding
#############################################
# Metrics ###################################
#############################################
+def _sliced_wasserstein_distance(D1, D2, num_directions):
+ """
+ This is a function for computing the sliced Wasserstein distance from two persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation.
+
+ Returns:
+ float: the sliced Wasserstein distance between persistence diagrams.
+ """
+ thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1]
+ lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
+ approx1 = np.matmul(D1, lines)
+ approx_diag1 = np.matmul(np.broadcast_to(D1.sum(-1,keepdims=True)/2,(len(D1),2)), lines)
+ approx2 = np.matmul(D2, lines)
+ approx_diag2 = np.matmul(np.broadcast_to(D2.sum(-1,keepdims=True)/2,(len(D2),2)), lines)
+ A = np.sort(np.concatenate([approx1, approx_diag2], axis=0), axis=0)
+ B = np.sort(np.concatenate([approx2, approx_diag1], axis=0), axis=0)
+ L1 = np.sum(np.abs(A-B), axis=0)
+ return np.mean(L1)
+
+def _compute_persistence_diagram_projections(X, num_directions):
+ """
+ This is a function for projecting the points of a list of persistence diagrams (as well as their diagonal projections) onto a fixed number of lines sampled uniformly on [-pi/2, pi/2]. This function can be used as a preprocessing step in order to speed up the running time for computing all pairwise sliced Wasserstein distances / kernel values on a list of persistence diagrams.
+
+ Parameters:
+ X (list of n numpy arrays of shape (numx2)): list of persistence diagrams.
+ num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation.
+
+ Returns:
+ list of n numpy arrays of shape (2*numx2): list of projected persistence diagrams.
+ """
+ thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1]
+ lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
+ XX = [np.vstack([np.matmul(D, lines), np.matmul(np.matmul(D, .5 * np.ones((2,2))), lines)]) for D in X]
+ return XX
+
+def _sliced_wasserstein_distance_on_projections(D1, D2):
+ """
+ This is a function for computing the sliced Wasserstein distance between two persistence diagrams that have already been projected onto some lines. It simply amounts to comparing the sorted projections with the 1-norm, and averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
+
+ Parameters:
+ D1: (2n x number_of_lines) numpy.array containing the n projected points of the first diagram, and the n projections of their diagonal projections.
+ D2: (2m x number_of_lines) numpy.array containing the m projected points of the second diagram, and the m projections of their diagonal projections.
+
+ Returns:
+ float: the sliced Wasserstein distance between the projected persistence diagrams.
+ """
+ lim1, lim2 = int(len(D1)/2), int(len(D2)/2)
+ approx1, approx_diag1, approx2, approx_diag2 = D1[:lim1], D1[lim1:], D2[:lim2], D2[lim2:]
+ A = np.sort(np.concatenate([approx1, approx_diag2], axis=0), axis=0)
+ B = np.sort(np.concatenate([approx2, approx_diag1], axis=0), axis=0)
+ L1 = np.sum(np.abs(A-B), axis=0)
+ return np.mean(L1)
+
+def _persistence_fisher_distance(D1, D2, kernel_approx=None, bandwidth=1.):
+ """
+ This is a function for computing the persistence Fisher distance from two persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram). Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ bandwidth (float): bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions.
+ kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+
+ Returns:
+ float: the persistence Fisher distance between persistence diagrams.
+ """
+ projection = (1./2) * np.ones((2,2))
+ diagonal_projections1 = np.matmul(D1, projection)
+ diagonal_projections2 = np.matmul(D2, projection)
+ if kernel_approx is not None:
+ approx1 = kernel_approx.transform(D1)
+ approx_diagonal1 = kernel_approx.transform(diagonal_projections1)
+ approx2 = kernel_approx.transform(D2)
+ approx_diagonal2 = kernel_approx.transform(diagonal_projections2)
+ Z = np.concatenate([approx1, approx_diagonal1, approx2, approx_diagonal2], axis=0)
+ U, V = np.sum(np.concatenate([approx1, approx_diagonal2], axis=0), axis=0), np.sum(np.concatenate([approx2, approx_diagonal1], axis=0), axis=0)
+ vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ return np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ else:
+ Z = np.concatenate([D1, diagonal_projections1, D2, diagonal_projections2], axis=0)
+ U, V = np.concatenate([D1, diagonal_projections2], axis=0), np.concatenate([D2, diagonal_projections1], axis=0)
+ vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(bandwidth)))/(bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(bandwidth)))/(bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ return np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+
+def _sklearn_wrapper(metric, X, Y, **kwargs):
+ """
+ This function is a wrapper for any metric between two persistence diagrams that takes two numpy arrays of shapes (nx2) and (mx2) as arguments.
+ """
+ if Y is None:
+ def flat_metric(a, b):
+ return metric(X[int(a[0])], X[int(b[0])], **kwargs)
+ else:
+ def flat_metric(a, b):
+ return metric(X[int(a[0])], Y[int(b[0])], **kwargs)
+ return flat_metric
+
+PAIRWISE_DISTANCE_FUNCTIONS = {
+ "wasserstein": hera_wasserstein_distance,
+ "hera_wasserstein": hera_wasserstein_distance,
+ "persistence_fisher": _persistence_fisher_distance,
+}
+
+def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", **kwargs):
+ """
+ This function computes the distance matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2).
+
+ Parameters:
+ X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams.
+ Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise distances are computed from the first list only.
+ metric: distance to use. It can be either a string ("sliced_wasserstein", "wasserstein", "hera_wasserstein" (Wasserstein distance computed with Hera---note that Hera is also used for the default option "wasserstein"), "pot_wasserstein" (Wasserstein distance computed with POT), "bottleneck", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. If it is a function, make sure that it is symmetric and that it outputs 0 if called on the same two arrays.
+ **kwargs: optional keyword parameters. Any further parameters are passed directly to the distance function. See the docs of the various distance classes in this module.
+
+ Returns:
+ numpy array of shape (nxm): distance matrix
+ """
+ XX = np.reshape(np.arange(len(X)), [-1,1])
+ YY = None if Y is None else np.reshape(np.arange(len(Y)), [-1,1])
+ if metric == "bottleneck":
+ try:
+ from .. import bottleneck_distance
+ return pairwise_distances(XX, YY, metric=_sklearn_wrapper(bottleneck_distance, X, Y, **kwargs))
+ except ImportError:
+ print("Gudhi built without CGAL")
+ raise
+ elif metric == "pot_wasserstein":
+ try:
+ from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance
+ return pairwise_distances(XX, YY, metric=_sklearn_wrapper(pot_wasserstein_distance, X, Y, **kwargs))
+ except ImportError:
+ print("POT (Python Optimal Transport) is not installed. Please install POT or use metric='wasserstein' or metric='hera_wasserstein'")
+ raise
+ elif metric == "sliced_wasserstein":
+ Xproj = _compute_persistence_diagram_projections(X, **kwargs)
+ Yproj = None if Y is None else _compute_persistence_diagram_projections(Y, **kwargs)
+ return pairwise_distances(XX, YY, metric=_sklearn_wrapper(_sliced_wasserstein_distance_on_projections, Xproj, Yproj))
+ elif type(metric) == str:
+ return pairwise_distances(XX, YY, metric=_sklearn_wrapper(PAIRWISE_DISTANCE_FUNCTIONS[metric], X, Y, **kwargs))
+ else:
+ return pairwise_distances(XX, YY, metric=_sklearn_wrapper(metric, X, Y, **kwargs))
+
class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
"""
This is a class for computing the sliced Wasserstein distance matrix from a list of persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally integrating over all possible lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
@@ -33,8 +184,6 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation (default 10).
"""
self.num_directions = num_directions
- thetas = np.linspace(-np.pi/2, np.pi/2, num=self.num_directions+1)[np.newaxis,:-1]
- self.lines_ = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
def fit(self, X, y=None):
"""
@@ -45,9 +194,6 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
y (n x 1 array): persistence diagram labels (unused).
"""
self.diagrams_ = X
- self.approx_ = [np.matmul(X[i], self.lines_) for i in range(len(X))]
- diag_proj = (1./2) * np.ones((2,2))
- self.approx_diag_ = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))]
return self
def transform(self, X):
@@ -60,31 +206,26 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein distances.
"""
- Xfit = np.zeros((len(X), len(self.approx_)))
- if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
- for i in range(len(self.approx_)):
- for j in range(i+1, len(self.approx_)):
- A = np.sort(np.concatenate([self.approx_[i], self.approx_diag_[j]], axis=0), axis=0)
- B = np.sort(np.concatenate([self.approx_[j], self.approx_diag_[i]], axis=0), axis=0)
- L1 = np.sum(np.abs(A-B), axis=0)
- Xfit[i,j] = np.mean(L1)
- Xfit[j,i] = Xfit[i,j]
- else:
- diag_proj = (1./2) * np.ones((2,2))
- approx = [np.matmul(X[i], self.lines_) for i in range(len(X))]
- approx_diag = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))]
- for i in range(len(approx)):
- for j in range(len(self.approx_)):
- A = np.sort(np.concatenate([approx[i], self.approx_diag_[j]], axis=0), axis=0)
- B = np.sort(np.concatenate([self.approx_[j], approx_diag[i]], axis=0), axis=0)
- L1 = np.sum(np.abs(A-B), axis=0)
- Xfit[i,j] = np.mean(L1)
+ return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="sliced_wasserstein", num_directions=self.num_directions)
- return Xfit
+ def __call__(self, diag1, diag2):
+ """
+ Apply SlicedWassersteinDistance on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: sliced Wasserstein distance.
+ """
+ return _sliced_wasserstein_distance(diag1, diag2, num_directions=self.num_directions)
class BottleneckDistance(BaseEstimator, TransformerMixin):
"""
- This is a class for computing the bottleneck distance matrix from a list of persistence diagrams.
+ This is a class for computing the bottleneck distance matrix from a list of persistence diagrams.
+
+ :Requires: `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0
"""
def __init__(self, epsilon=None):
"""
@@ -116,34 +257,26 @@ class BottleneckDistance(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise bottleneck distances.
"""
- num_diag1 = len(X)
-
- #if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
- if X is self.diagrams_:
- matrix = np.zeros((num_diag1, num_diag1))
-
- if USE_GUDHI:
- for i in range(num_diag1):
- for j in range(i+1, num_diag1):
- matrix[i,j] = bottleneck_distance(X[i], X[j], self.epsilon)
- matrix[j,i] = matrix[i,j]
- else:
- print("Gudhi built without CGAL: returning a null matrix")
-
- else:
- num_diag2 = len(self.diagrams_)
- matrix = np.zeros((num_diag1, num_diag2))
+ Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric="bottleneck", e=self.epsilon)
+ return Xfit
- if USE_GUDHI:
- for i in range(num_diag1):
- for j in range(num_diag2):
- matrix[i,j] = bottleneck_distance(X[i], self.diagrams_[j], self.epsilon)
- else:
- print("Gudhi built without CGAL: returning a null matrix")
+ def __call__(self, diag1, diag2):
+ """
+ Apply BottleneckDistance on a single pair of persistence diagrams and outputs the result.
- Xfit = matrix
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
- return Xfit
+ Returns:
+ float: bottleneck distance.
+ """
+ try:
+ from .. import bottleneck_distance
+ return bottleneck_distance(diag1, diag2, e=self.epsilon)
+ except ImportError:
+ print("Gudhi built without CGAL")
+ raise
class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
"""
@@ -168,11 +301,6 @@ class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
y (n x 1 array): persistence diagram labels (unused).
"""
self.diagrams_ = X
- projection = (1./2) * np.ones((2,2))
- self.diagonal_projections_ = [np.matmul(X[i], projection) for i in range(len(X))]
- if self.kernel_approx is not None:
- self.approx_ = [self.kernel_approx.transform(X[i]) for i in range(len(X))]
- self.approx_diagonal_ = [self.kernel_approx.transform(self.diagonal_projections_[i]) for i in range(len(X))]
return self
def transform(self, X):
@@ -185,60 +313,83 @@ class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher distances.
"""
- Xfit = np.zeros((len(X), len(self.diagrams_)))
- if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
- for i in range(len(self.diagrams_)):
- for j in range(i+1, len(self.diagrams_)):
- if self.kernel_approx is not None:
- Z = np.concatenate([self.approx_[i], self.approx_diagonal_[i], self.approx_[j], self.approx_diagonal_[j]], axis=0)
- U, V = np.sum(np.concatenate([self.approx_[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], self.approx_diagonal_[i]], axis=0), axis=0)
- vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
- Xfit[j,i] = Xfit[i,j]
- else:
- Z = np.concatenate([self.diagrams_[i], self.diagonal_projections_[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0)
- U, V = np.concatenate([self.diagrams_[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], self.diagonal_projections_[i]], axis=0)
- vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
- Xfit[j,i] = Xfit[i,j]
+ return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="persistence_fisher", bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceFisherDistance on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence Fisher distance.
+ """
+ return _persistence_fisher_distance(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
+
+class WassersteinDistance(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the Wasserstein distance matrix from a list of persistence diagrams.
+ """
+ def __init__(self, order=2, internal_p=2, mode="pot", delta=0.01):
+ """
+ Constructor for the WassersteinDistance class.
+
+ Parameters:
+ order (int): exponent for Wasserstein, default value is 2., see :func:`gudhi.wasserstein.wasserstein_distance`.
+ internal_p (int): ground metric on the (upper-half) plane (i.e. norm l_p in R^2), default value is 2 (euclidean norm), see :func:`gudhi.wasserstein.wasserstein_distance`.
+ mode (str): method for computing Wasserstein distance. Either "pot" or "hera".
+ delta (float): relative error 1+delta. Used only if mode == "hera".
+ """
+ self.order, self.internal_p, self.mode = order, internal_p, mode
+ self.metric = "pot_wasserstein" if mode == "pot" else "hera_wasserstein"
+ self.delta = delta
+
+ def fit(self, X, y=None):
+ """
+ Fit the WassersteinDistance class on a list of persistence diagrams: persistence diagrams are stored in a numpy array called **diagrams**.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.diagrams_ = X
+ return self
+
+ def transform(self, X):
+ """
+ Compute all Wasserstein distances between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise Wasserstein distances.
+ """
+ if self.metric == "hera_wasserstein":
+ Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric=self.metric, order=self.order, internal_p=self.internal_p, delta=self.delta)
else:
- projection = (1./2) * np.ones((2,2))
- diagonal_projections = [np.matmul(X[i], projection) for i in range(len(X))]
- if self.kernel_approx is not None:
- approx = [self.kernel_approx.transform(X[i]) for i in range(len(X))]
- approx_diagonal = [self.kernel_approx.transform(diagonal_projections[i]) for i in range(len(X))]
- for i in range(len(X)):
- for j in range(len(self.diagrams_)):
- if self.kernel_approx is not None:
- Z = np.concatenate([approx[i], approx_diagonal[i], self.approx_[j], self.approx_diagonal_[j]], axis=0)
- U, V = np.sum(np.concatenate([approx[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], approx_diagonal[i]], axis=0), axis=0)
- vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
- else:
- Z = np.concatenate([X[i], diagonal_projections[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0)
- U, V = np.concatenate([X[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], diagonal_projections[i]], axis=0)
- vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric=self.metric, order=self.order, internal_p=self.internal_p, matching=False)
return Xfit
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply WassersteinDistance on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: Wasserstein distance.
+ """
+ if self.metric == "hera_wasserstein":
+ return hera_wasserstein_distance(diag1, diag2, order=self.order, internal_p=self.internal_p, delta=self.delta)
+ else:
+ try:
+ from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance
+ return pot_wasserstein_distance(diag1, diag2, order=self.order, internal_p=self.internal_p, matching=False)
+ except ImportError:
+ print("POT (Python Optimal Transport) is not installed. Please install POT or use metric='wasserstein' or metric='hera_wasserstein'")
+ raise
diff --git a/src/python/gudhi/representations/preprocessing.py b/src/python/gudhi/representations/preprocessing.py
index a39b00e4..a8545349 100644
--- a/src/python/gudhi/representations/preprocessing.py
+++ b/src/python/gudhi/representations/preprocessing.py
@@ -54,6 +54,18 @@ class BirthPersistenceTransform(BaseEstimator, TransformerMixin):
Xfit.append(new_diag)
return Xfit
+ def __call__(self, diag):
+ """
+ Apply BirthPersistenceTransform on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: transformed persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class Clamping(BaseEstimator, TransformerMixin):
"""
This is a class for clamping values. It can be used as a parameter for the DiagramScaler class, for instance if you want to clamp abscissae or ordinates of persistence diagrams.
@@ -142,6 +154,18 @@ class DiagramScaler(BaseEstimator, TransformerMixin):
Xfit[i][:,I] = np.squeeze(scaler.transform(np.reshape(Xfit[i][:,I], [-1,1])))
return Xfit
+ def __call__(self, diag):
+ """
+ Apply DiagramScaler on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: transformed persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class Padding(BaseEstimator, TransformerMixin):
"""
This is a class for padding a list of persistence diagrams with dummy points, so that all persistence diagrams end up with the same number of points.
@@ -186,6 +210,18 @@ class Padding(BaseEstimator, TransformerMixin):
Xfit = X
return Xfit
+ def __call__(self, diag):
+ """
+ Apply Padding on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: padded persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class ProminentPoints(BaseEstimator, TransformerMixin):
"""
This is a class for removing points that are close or far from the diagonal in persistence diagrams. If persistence diagrams are n x 2 numpy arrays (i.e. persistence diagrams with ordinary features), points are ordered and thresholded by distance-to-diagonal. If persistence diagrams are n x 1 numpy arrays (i.e. persistence diagrams with essential features), points are not ordered and thresholded by first coordinate.
@@ -259,6 +295,18 @@ class ProminentPoints(BaseEstimator, TransformerMixin):
Xfit = X
return Xfit
+ def __call__(self, diag):
+ """
+ Apply ProminentPoints on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: thresholded persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class DiagramSelector(BaseEstimator, TransformerMixin):
"""
This is a class for extracting finite or essential points in persistence diagrams.
@@ -303,3 +351,15 @@ class DiagramSelector(BaseEstimator, TransformerMixin):
else:
Xfit = X
return Xfit
+
+ def __call__(self, diag):
+ """
+ Apply DiagramSelector on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: extracted persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py
index fe26dbe2..46fee086 100644
--- a/src/python/gudhi/representations/vector_methods.py
+++ b/src/python/gudhi/representations/vector_methods.py
@@ -81,6 +81,18 @@ class PersistenceImage(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply PersistenceImage on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (number of pixels = **resolution[0]** x **resolution[1]**):: output persistence image.
+ """
+ return self.fit_transform([diag])[0,:]
+
class Landscape(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence landscapes from a list of persistence diagrams. A persistence landscape is a collection of 1D piecewise-linear functions computed from the rank function associated to the persistence diagram. These piecewise-linear functions are then sampled evenly on a given range and the corresponding vectors of samples are concatenated and returned. See http://jmlr.org/papers/v16/bubenik15a.html for more details.
@@ -170,6 +182,18 @@ class Landscape(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply Landscape on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (number of samples = **num_landscapes** x **resolution**): output persistence landscape.
+ """
+ return self.fit_transform([diag])[0,:]
+
class Silhouette(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence silhouettes from a list of persistence diagrams. A persistence silhouette is computed by taking a weighted average of the collection of 1D piecewise-linear functions given by the persistence landscapes, and then by evenly sampling this average on a given range. Finally, the corresponding vector of samples is returned. See https://arxiv.org/abs/1312.0308 for more details.
@@ -248,6 +272,18 @@ class Silhouette(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply Silhouette on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**resolution**): output persistence silhouette.
+ """
+ return self.fit_transform([diag])[0,:]
+
class BettiCurve(BaseEstimator, TransformerMixin):
"""
This is a class for computing Betti curves from a list of persistence diagrams. A Betti curve is a 1D piecewise-constant function obtained from the rank function. It is sampled evenly on a given range and the vector of samples is returned. See https://www.researchgate.net/publication/316604237_Time_Series_Classification_via_Topological_Data_Analysis for more details.
@@ -308,6 +344,18 @@ class BettiCurve(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply BettiCurve on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**resolution**): output Betti curve.
+ """
+ return self.fit_transform([diag])[0,:]
+
class Entropy(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence entropy. Persistence entropy is a statistic for persistence diagrams inspired from Shannon entropy. This statistic can also be used to compute a feature vector, called the entropy summary function. See https://arxiv.org/pdf/1803.08304.pdf for more details. Note that a previous implementation was contributed by Manuel Soriano-Trigueros.
@@ -378,6 +426,18 @@ class Entropy(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply Entropy on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (1 if **mode** = "scalar" else **resolution**): output entropy.
+ """
+ return self.fit_transform([diag])[0,:]
+
class TopologicalVector(BaseEstimator, TransformerMixin):
"""
This is a class for computing topological vectors from a list of persistence diagrams. The topological vector associated to a persistence diagram is the sorted vector of a slight modification of the pairwise distances between the persistence diagram points. See https://diglib.eg.org/handle/10.1111/cgf12692 for more details.
@@ -431,6 +491,18 @@ class TopologicalVector(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply TopologicalVector on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**threshold**): output topological vector.
+ """
+ return self.fit_transform([diag])[0,:]
+
class ComplexPolynomial(BaseEstimator, TransformerMixin):
"""
This is a class for computing complex polynomials from a list of persistence diagrams. The persistence diagram points are seen as the roots of some complex polynomial, whose coefficients are returned in a complex vector. See https://link.springer.com/chapter/10.1007%2F978-3-319-23231-7_27 for more details.
@@ -490,3 +562,15 @@ class ComplexPolynomial(BaseEstimator, TransformerMixin):
coeff = np.array(coeff[::-1])[1:]
Xfit[d, :min(thresh, coeff.shape[0])] = coeff[:min(thresh, coeff.shape[0])]
return Xfit
+
+ def __call__(self, diag):
+ """
+ Apply ComplexPolynomial on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**threshold**): output complex vector of coefficients.
+ """
+ return self.fit_transform([diag])[0,:]
diff --git a/src/python/gudhi/rips_complex.pyx b/src/python/gudhi/rips_complex.pyx
index deb8057a..72e82c79 100644
--- a/src/python/gudhi/rips_complex.pyx
+++ b/src/python/gudhi/rips_complex.pyx
@@ -23,12 +23,12 @@ __license__ = "MIT"
cdef extern from "Rips_complex_interface.h" namespace "Gudhi":
cdef cppclass Rips_complex_interface "Gudhi::rips_complex::Rips_complex_interface":
- Rips_complex_interface()
- void init_points(vector[vector[double]] values, double threshold)
- void init_matrix(vector[vector[double]] values, double threshold)
- void init_points_sparse(vector[vector[double]] values, double threshold, double sparse)
- void init_matrix_sparse(vector[vector[double]] values, double threshold, double sparse)
- void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, int dim_max) except +
+ Rips_complex_interface() nogil
+ void init_points(vector[vector[double]] values, double threshold) nogil
+ void init_matrix(vector[vector[double]] values, double threshold) nogil
+ void init_points_sparse(vector[vector[double]] values, double threshold, double sparse) nogil
+ void init_matrix_sparse(vector[vector[double]] values, double threshold, double sparse) nogil
+ void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, int dim_max) nogil except +
# RipsComplex python interface
cdef class RipsComplex:
@@ -97,6 +97,7 @@ cdef class RipsComplex:
"""
stree = SimplexTree()
cdef intptr_t stree_int_ptr=stree.thisptr
- self.thisref.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr,
- max_dimension)
+ cdef int maxdim = max_dimension
+ with nogil:
+ self.thisref.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr, maxdim)
return stree
diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd
index 595f22bb..e748ac40 100644
--- a/src/python/gudhi/simplex_tree.pxd
+++ b/src/python/gudhi/simplex_tree.pxd
@@ -25,55 +25,56 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
pass
cdef cppclass Simplex_tree_simplices_iterator "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>::Complex_simplex_iterator":
- Simplex_tree_simplices_iterator()
- Simplex_tree_simplex_handle& operator*()
- Simplex_tree_simplices_iterator operator++()
- bint operator!=(Simplex_tree_simplices_iterator)
+ Simplex_tree_simplices_iterator() nogil
+ Simplex_tree_simplex_handle& operator*() nogil
+ Simplex_tree_simplices_iterator operator++() nogil
+ bint operator!=(Simplex_tree_simplices_iterator) nogil
cdef cppclass Simplex_tree_skeleton_iterator "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>::Skeleton_simplex_iterator":
- Simplex_tree_skeleton_iterator()
- Simplex_tree_simplex_handle& operator*()
- Simplex_tree_skeleton_iterator operator++()
- bint operator!=(Simplex_tree_skeleton_iterator)
+ Simplex_tree_skeleton_iterator() nogil
+ Simplex_tree_simplex_handle& operator*() nogil
+ Simplex_tree_skeleton_iterator operator++() nogil
+ bint operator!=(Simplex_tree_skeleton_iterator) nogil
cdef cppclass Simplex_tree_interface_full_featured "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>":
- Simplex_tree()
- double simplex_filtration(vector[int] simplex)
- void assign_simplex_filtration(vector[int] simplex, double filtration)
- void initialize_filtration()
- int num_vertices()
- int num_simplices()
- void set_dimension(int dimension)
- int dimension()
- int upper_bound_dimension()
- bool find_simplex(vector[int] simplex)
- bool insert_simplex_and_subfaces(vector[int] simplex,
- double filtration)
- vector[pair[vector[int], double]] get_star(vector[int] simplex)
- vector[pair[vector[int], double]] get_cofaces(vector[int] simplex,
- int dimension)
- void expansion(int max_dim) except +
- void remove_maximal_simplex(vector[int] simplex)
- bool prune_above_filtration(double filtration)
- bool make_filtration_non_decreasing()
- void compute_extended_filtration()
- vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(vector[pair[int, pair[double, double]]] dgm, double min_persistence)
+ Simplex_tree() nogil
+ double simplex_filtration(vector[int] simplex) nogil
+ void assign_simplex_filtration(vector[int] simplex, double filtration) nogil
+ void initialize_filtration() nogil
+ int num_vertices() nogil
+ int num_simplices() nogil
+ void set_dimension(int dimension) nogil
+ int dimension() nogil
+ int upper_bound_dimension() nogil
+ bool find_simplex(vector[int] simplex) nogil
+ bool insert(vector[int] simplex, double filtration) nogil
+ vector[pair[vector[int], double]] get_star(vector[int] simplex) nogil
+ vector[pair[vector[int], double]] get_cofaces(vector[int] simplex, int dimension) nogil
+ void expansion(int max_dim) nogil except +
+ void remove_maximal_simplex(vector[int] simplex) nogil
+ bool prune_above_filtration(double filtration) nogil
+ bool make_filtration_non_decreasing() nogil
+ void compute_extended_filtration() nogil
+ vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(vector[pair[int, pair[double, double]]] dgm, double min_persistence) nogil
# Iterators over Simplex tree
- pair[vector[int], double] get_simplex_and_filtration(Simplex_tree_simplex_handle f_simplex)
- Simplex_tree_simplices_iterator get_simplices_iterator_begin()
- Simplex_tree_simplices_iterator get_simplices_iterator_end()
- vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_begin()
- vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_end()
- Simplex_tree_skeleton_iterator get_skeleton_iterator_begin(int dimension)
- Simplex_tree_skeleton_iterator get_skeleton_iterator_end(int dimension)
+ pair[vector[int], double] get_simplex_and_filtration(Simplex_tree_simplex_handle f_simplex) nogil
+ Simplex_tree_simplices_iterator get_simplices_iterator_begin() nogil
+ Simplex_tree_simplices_iterator get_simplices_iterator_end() nogil
+ vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_begin() nogil
+ vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_end() nogil
+ Simplex_tree_skeleton_iterator get_skeleton_iterator_begin(int dimension) nogil
+ Simplex_tree_skeleton_iterator get_skeleton_iterator_end(int dimension) nogil
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Simplex_tree<Gudhi::Simplex_tree_options_full_featured>>":
- Simplex_tree_persistence_interface(Simplex_tree_interface_full_featured * st, bool persistence_dim_max)
- vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence)
- vector[int] betti_numbers()
- vector[int] persistent_betti_numbers(double from_value, double to_value)
- vector[pair[double,double]] intervals_in_dimension(int dimension)
- void write_output_diagram(string diagram_file_name)
- vector[pair[vector[int], vector[int]]] persistence_pairs()
+ Simplex_tree_persistence_interface(Simplex_tree_interface_full_featured * st, bool persistence_dim_max) nogil
+ void compute_persistence(int homology_coeff_field, double min_persistence) nogil
+ vector[pair[int, pair[double, double]]] get_persistence() nogil
+ vector[int] betti_numbers() nogil
+ vector[int] persistent_betti_numbers(double from_value, double to_value) nogil
+ vector[pair[double,double]] intervals_in_dimension(int dimension) nogil
+ void write_output_diagram(string diagram_file_name) nogil except +
+ vector[pair[vector[int], vector[int]]] persistence_pairs() nogil
+ pair[vector[vector[int]], vector[vector[int]]] lower_star_generators() nogil
+ pair[vector[vector[int]], vector[vector[int]]] flag_generators() nogil
diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx
index cc3753e1..9cb24221 100644
--- a/src/python/gudhi/simplex_tree.pyx
+++ b/src/python/gudhi/simplex_tree.pyx
@@ -9,6 +9,7 @@
from cython.operator import dereference, preincrement
from libc.stdint cimport intptr_t
+import numpy
from numpy import array as np_array
cimport simplex_tree
@@ -32,7 +33,7 @@ cdef class SimplexTree:
cdef public intptr_t thisptr
# Get the pointer casted as it should be
- cdef Simplex_tree_interface_full_featured* get_ptr(self):
+ cdef Simplex_tree_interface_full_featured* get_ptr(self) nogil:
return <Simplex_tree_interface_full_featured*>(self.thisptr)
cdef Simplex_tree_persistence_interface * pcohptr
@@ -90,7 +91,7 @@ cdef class SimplexTree:
(with more :meth:`assign_filtration` or
:meth:`make_filtration_non_decreasing` for instance) before calling
any function that relies on the filtration property, like
- :meth:`initialize_filtration`.
+ :meth:`persistence`.
"""
self.get_ptr().assign_simplex_filtration(simplex, filtration)
@@ -98,17 +99,10 @@ cdef class SimplexTree:
"""This function initializes and sorts the simplicial complex
filtration vector.
- .. note::
-
- This function must be launched before
- :func:`persistence()<gudhi.SimplexTree.persistence>`,
- :func:`betti_numbers()<gudhi.SimplexTree.betti_numbers>`,
- :func:`persistent_betti_numbers()<gudhi.SimplexTree.persistent_betti_numbers>`,
- or :func:`get_filtration()<gudhi.SimplexTree.get_filtration>`
- after :func:`inserting<gudhi.SimplexTree.insert>` or
- :func:`removing<gudhi.SimplexTree.remove_maximal_simplex>`
- simplices.
+ .. deprecated:: 3.2.0
"""
+ import warnings
+ warnings.warn("Since Gudhi 3.2, calling SimplexTree.initialize_filtration is unnecessary.", DeprecationWarning)
self.get_ptr().initialize_filtration()
def num_vertices(self):
@@ -139,9 +133,9 @@ cdef class SimplexTree:
This function is not constant time because it can recompute
dimension if required (can be triggered by
- :func:`remove_maximal_simplex()<gudhi.SimplexTree.remove_maximal_simplex>`
+ :func:`remove_maximal_simplex`
or
- :func:`prune_above_filtration()<gudhi.SimplexTree.prune_above_filtration>`
+ :func:`prune_above_filtration`
methods).
"""
return self.get_ptr().dimension()
@@ -166,9 +160,9 @@ cdef class SimplexTree:
This function must be used with caution because it disables
dimension recomputation when required
(this recomputation can be triggered by
- :func:`remove_maximal_simplex()<gudhi.SimplexTree.remove_maximal_simplex>`
+ :func:`remove_maximal_simplex`
or
- :func:`prune_above_filtration()<gudhi.SimplexTree.prune_above_filtration>`
+ :func:`prune_above_filtration`
).
"""
self.get_ptr().set_dimension(<int>dimension)
@@ -182,10 +176,7 @@ cdef class SimplexTree:
:returns: true if the simplex was found, false otherwise.
:rtype: bool
"""
- cdef vector[int] csimplex
- for i in simplex:
- csimplex.push_back(i)
- return self.get_ptr().find_simplex(csimplex)
+ return self.get_ptr().find_simplex(simplex)
def insert(self, simplex, filtration=0.0):
"""This function inserts the given N-simplex and its subfaces with the
@@ -202,11 +193,7 @@ cdef class SimplexTree:
otherwise (whatever its original filtration value).
:rtype: bool
"""
- cdef vector[int] csimplex
- for i in simplex:
- csimplex.push_back(i)
- return self.get_ptr().insert_simplex_and_subfaces(csimplex,
- <double>filtration)
+ return self.get_ptr().insert(simplex, <double>filtration)
def get_simplices(self):
"""This function returns a generator with simplices and their given
@@ -308,17 +295,12 @@ cdef class SimplexTree:
.. note::
- Be aware that removing is shifting data in a flat_map
- (:func:`initialize_filtration()<gudhi.SimplexTree.initialize_filtration>` to be done).
-
- .. note::
-
The dimension of the simplicial complex may be lower after calling
remove_maximal_simplex than it was before. However,
- :func:`upper_bound_dimension()<gudhi.SimplexTree.upper_bound_dimension>`
+ :func:`upper_bound_dimension`
method will return the old value, which
remains a valid upper bound. If you care, you can call
- :func:`dimension()<gudhi.SimplexTree.dimension>`
+ :func:`dimension`
to recompute the exact dimension.
"""
self.get_ptr().remove_maximal_simplex(simplex)
@@ -334,24 +316,14 @@ cdef class SimplexTree:
.. note::
- Some simplex tree functions require the filtration to be valid.
- prune_above_filtration function is not launching
- :func:`initialize_filtration()<gudhi.SimplexTree.initialize_filtration>`
- but returns the filtration modification
- information. If the complex has changed , please call
- :func:`initialize_filtration()<gudhi.SimplexTree.initialize_filtration>`
- to recompute it.
-
- .. note::
-
Note that the dimension of the simplicial complex may be lower
after calling
- :func:`prune_above_filtration()<gudhi.SimplexTree.prune_above_filtration>`
+ :func:`prune_above_filtration`
than it was before. However,
- :func:`upper_bound_dimension()<gudhi.SimplexTree.upper_bound_dimension>`
+ :func:`upper_bound_dimension`
will return the old value, which remains a
valid upper bound. If you care, you can call
- :func:`dimension()<gudhi.SimplexTree.dimension>`
+ :func:`dimension`
method to recompute the exact dimension.
"""
return self.get_ptr().prune_above_filtration(filtration)
@@ -373,7 +345,9 @@ cdef class SimplexTree:
:param max_dim: The maximal dimension.
:type max_dim: int.
"""
- self.get_ptr().expansion(max_dim)
+ cdef int maxdim = max_dim
+ with nogil:
+ self.get_ptr().expansion(maxdim)
def make_filtration_non_decreasing(self):
"""This function ensures that each simplex has a higher filtration
@@ -382,17 +356,6 @@ cdef class SimplexTree:
:returns: True if any filtration value was modified,
False if the filtration was already non-decreasing.
:rtype: bool
-
-
- .. note::
-
- Some simplex tree functions require the filtration to be valid.
- make_filtration_non_decreasing function is not launching
- :func:`initialize_filtration()<gudhi.SimplexTree.initialize_filtration>`
- but returns the filtration modification
- information. If the complex has changed , please call
- :func:`initialize_filtration()<gudhi.SimplexTree.initialize_filtration>`
- to recompute it.
"""
return self.get_ptr().make_filtration_non_decreasing()
@@ -405,7 +368,7 @@ cdef class SimplexTree:
Note that after calling this function, the filtration
values are actually modified within the Simplex_tree.
- The function :func:`extended_persistence()<gudhi.SimplexTree.extended_persistence>`
+ The function :func:`extended_persistence`
retrieves the original values.
.. note::
@@ -413,7 +376,7 @@ cdef class SimplexTree:
Note that this code creates an extra vertex internally, so you should make sure that
the Simplex_tree does not contain a vertex with the largest possible value (i.e., 4294967295).
"""
- return self.get_ptr().compute_extended_filtration()
+ self.get_ptr().compute_extended_filtration()
def extended_persistence(self, homology_coeff_field=11, min_persistence=0):
"""This function retrieves good values for extended persistence, and separate the diagrams
@@ -427,11 +390,11 @@ cdef class SimplexTree:
0.0.
Sets min_persistence to -1.0 to see all values.
:type min_persistence: float.
- :returns: A list of four persistence diagrams in the format described in :func:`persistence()<gudhi.SimplexTree.persistence>`. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. See https://link.springer.com/article/10.1007/s10208-008-9027-z and/or section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes.
+ :returns: A list of four persistence diagrams in the format described in :func:`persistence`. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. See https://link.springer.com/article/10.1007/s10208-008-9027-z and/or section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes.
.. note::
- This function should be called only if :func:`extend_filtration()<gudhi.SimplexTree.extend_filtration>` has been called first!
+ This function should be called only if :func:`extend_filtration` has been called first!
.. note::
@@ -443,12 +406,13 @@ cdef class SimplexTree:
if self.pcohptr != NULL:
del self.pcohptr
self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), False)
- persistence_result = self.pcohptr.get_persistence(homology_coeff_field, -1.)
+ self.pcohptr.compute_persistence(homology_coeff_field, -1.)
+ persistence_result = self.pcohptr.get_persistence()
return self.get_ptr().compute_extended_persistence_subdiagrams(persistence_result, min_persistence)
def persistence(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False):
- """This function returns the persistence of the simplicial complex.
+ """This function computes and returns the persistence of the simplicial complex.
:param homology_coeff_field: The homology coefficient field. Must be a
prime number. Default value is 11.
@@ -456,7 +420,7 @@ cdef class SimplexTree:
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
0.0.
- Sets min_persistence to -1.0 to see all values.
+ Set min_persistence to -1.0 to see all values.
:type min_persistence: float.
:param persistence_dim_max: If true, the persistent homology for the
maximal dimension in the complex is computed. If false, it is
@@ -465,13 +429,36 @@ cdef class SimplexTree:
:returns: The persistence of the simplicial complex.
:rtype: list of pairs(dimension, pair(birth, death))
"""
+ self.compute_persistence(homology_coeff_field, min_persistence, persistence_dim_max)
+ return self.pcohptr.get_persistence()
+
+ def compute_persistence(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False):
+ """This function computes the persistence of the simplicial complex, so it can be accessed through
+ :func:`persistent_betti_numbers`, :func:`persistence_pairs`, etc. This function is equivalent to :func:`persistence`
+ when you do not want the list :func:`persistence` returns.
+
+ :param homology_coeff_field: The homology coefficient field. Must be a
+ prime number. Default value is 11.
+ :type homology_coeff_field: int.
+ :param min_persistence: The minimum persistence value to take into
+ account (strictly greater than min_persistence). Default value is
+ 0.0.
+ Sets min_persistence to -1.0 to see all values.
+ :type min_persistence: float.
+ :param persistence_dim_max: If true, the persistent homology for the
+ maximal dimension in the complex is computed. If false, it is
+ ignored. Default is false.
+ :type persistence_dim_max: bool
+ :returns: Nothing.
+ """
if self.pcohptr != NULL:
del self.pcohptr
- self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), persistence_dim_max)
- cdef vector[pair[int, pair[double, double]]] persistence_result
- if self.pcohptr != NULL:
- persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence)
- return persistence_result
+ cdef bool pdm = persistence_dim_max
+ cdef int coef = homology_coeff_field
+ cdef double minp = min_persistence
+ with nogil:
+ self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), pdm)
+ self.pcohptr.compute_persistence(coef, minp)
def betti_numbers(self):
"""This function returns the Betti numbers of the simplicial complex.
@@ -480,16 +467,11 @@ cdef class SimplexTree:
:rtype: list of int
:note: betti_numbers function requires
- :func:`persistence()<gudhi.SimplexTree.persistence>`
+ :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[int] bn_result
- if self.pcohptr != NULL:
- bn_result = self.pcohptr.betti_numbers()
- else:
- print("betti_numbers function requires persistence function"
- " to be launched first.")
- return bn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before betti_numbers()"
+ return self.pcohptr.betti_numbers()
def persistent_betti_numbers(self, from_value, to_value):
"""This function returns the persistent Betti numbers of the
@@ -506,16 +488,11 @@ cdef class SimplexTree:
:rtype: list of int
:note: persistent_betti_numbers function requires
- :func:`persistence()<gudhi.SimplexTree.persistence>`
+ :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[int] pbn_result
- if self.pcohptr != NULL:
- pbn_result = self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
- else:
- print("persistent_betti_numbers function requires persistence function"
- " to be launched first.")
- return pbn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistent_betti_numbers()"
+ return self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
def persistence_intervals_in_dimension(self, dimension):
"""This function returns the persistence intervals of the simplicial
@@ -527,16 +504,11 @@ cdef class SimplexTree:
:rtype: numpy array of dimension 2
:note: intervals_in_dim function requires
- :func:`persistence()<gudhi.SimplexTree.persistence>`
+ :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[pair[double,double]] intervals_result
- if self.pcohptr != NULL:
- intervals_result = self.pcohptr.intervals_in_dimension(dimension)
- else:
- print("intervals_in_dim function requires persistence function"
- " to be launched first.")
- return np_array(intervals_result)
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()"
+ return np_array(self.pcohptr.intervals_in_dimension(dimension))
def persistence_pairs(self):
"""This function returns a list of persistence birth and death simplices pairs.
@@ -545,33 +517,68 @@ cdef class SimplexTree:
:rtype: list of pair of list of int
:note: persistence_pairs function requires
- :func:`persistence()<gudhi.SimplexTree.persistence>`
+ :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[pair[vector[int],vector[int]]] persistence_pairs_result
- if self.pcohptr != NULL:
- persistence_pairs_result = self.pcohptr.persistence_pairs()
- else:
- print("persistence_pairs function requires persistence function"
- " to be launched first.")
- return persistence_pairs_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_pairs()"
+ return self.pcohptr.persistence_pairs()
- def write_persistence_diagram(self, persistence_file=''):
+ def write_persistence_diagram(self, persistence_file):
"""This function writes the persistence intervals of the simplicial
complex in a user given file name.
- :param persistence_file: The specific dimension.
+ :param persistence_file: Name of the file.
:type persistence_file: string.
:note: intervals_in_dim function requires
- :func:`persistence()<gudhi.SimplexTree.persistence>`
+ :func:`compute_persistence`
function to be launched first.
"""
- if self.pcohptr != NULL:
- if persistence_file != '':
- self.pcohptr.write_output_diagram(persistence_file.encode('utf-8'))
- else:
- print("persistence_file must be specified")
+ assert self.pcohptr != NULL, "compute_persistence() must be called before write_persistence_diagram()"
+ self.pcohptr.write_output_diagram(persistence_file.encode('utf-8'))
+
+ def lower_star_persistence_generators(self):
+ """Assuming this is a lower-star filtration, this function returns the persistence pairs,
+ where each simplex is replaced with the vertex that gave it its filtration value.
+
+ :returns: First the regular persistence pairs, grouped by dimension, with one vertex per extremity,
+ and second the essential features, grouped by dimension, with one vertex each
+ :rtype: Tuple[List[numpy.array[int] of shape (n,2)], List[numpy.array[int] of shape (m,)]]
+
+ :note: lower_star_persistence_generators requires that `persistence()` be called first.
+ """
+ assert self.pcohptr != NULL, "lower_star_persistence_generators() requires that persistence() be called first."
+ gen = self.pcohptr.lower_star_generators()
+ normal = [np_array(d).reshape(-1,2) for d in gen.first]
+ infinite = [np_array(d) for d in gen.second]
+ return (normal, infinite)
+
+ def flag_persistence_generators(self):
+ """Assuming this is a flag complex, this function returns the persistence pairs,
+ where each simplex is replaced with the vertices of the edges that gave it its filtration value.
+
+ :returns: First the regular persistence pairs of dimension 0, with one vertex for birth and two for death;
+ then the other regular persistence pairs, grouped by dimension, with 2 vertices per extremity;
+ then the connected components, with one vertex each;
+ finally the other essential features, grouped by dimension, with 2 vertices for birth.
+ :rtype: Tuple[numpy.array[int] of shape (n,3), List[numpy.array[int] of shape (m,4)], numpy.array[int] of shape (l,), List[numpy.array[int] of shape (k,2)]]
+
+ :note: flag_persistence_generators requires that `persistence()` be called first.
+ """
+ assert self.pcohptr != NULL, "flag_persistence_generators() requires that persistence() be called first."
+ gen = self.pcohptr.flag_generators()
+ if len(gen.first) == 0:
+ normal0 = numpy.empty((0,3))
+ normals = []
+ else:
+ l = iter(gen.first)
+ normal0 = np_array(next(l)).reshape(-1,3)
+ normals = [np_array(d).reshape(-1,4) for d in l]
+ if len(gen.second) == 0:
+ infinite0 = numpy.empty(0)
+ infinites = []
else:
- print("intervals_in_dim function requires persistence function"
- " to be launched first.")
+ l = iter(gen.second)
+ infinite0 = np_array(next(l))
+ infinites = [np_array(d).reshape(-1,2) for d in l]
+ return (normal0, normals, infinite0, infinites)
diff --git a/src/python/gudhi/wasserstein/barycenter.py b/src/python/gudhi/wasserstein/barycenter.py
index de7aea81..d67bcde7 100644
--- a/src/python/gudhi/wasserstein/barycenter.py
+++ b/src/python/gudhi/wasserstein/barycenter.py
@@ -18,8 +18,7 @@ from gudhi.wasserstein import wasserstein_distance
def _mean(x, m):
'''
:param x: a list of 2D-points, off diagonal, x_0... x_{k-1}
- :param m: total amount of points taken into account,
- that is we have (m-k) copies of diagonal
+ :param m: total amount of points taken into account, that is we have (m-k) copies of diagonal
:returns: the weighted mean of x with (m-k) copies of the diagonal
'''
k = len(x)
@@ -33,37 +32,26 @@ def _mean(x, m):
def lagrangian_barycenter(pdiagset, init=None, verbose=False):
'''
- :param pdiagset: a list of ``numpy.array`` of shape `(n x 2)`
- (`n` can variate), encoding a set of
- persistence diagrams with only finite coordinates.
+ :param pdiagset: a list of ``numpy.array`` of shape `(n x 2)` (`n` can variate), encoding a set of persistence
+ diagrams with only finite coordinates.
:param init: The initial value for barycenter estimate.
- If ``None``, init is made on a random diagram from the dataset.
- Otherwise, it can be an ``int``
- (then initialization is made on ``pdiagset[init]``)
- or a `(n x 2)` ``numpy.array`` enconding
- a persistence diagram with `n` points.
+ If ``None``, init is made on a random diagram from the dataset.
+ Otherwise, it can be an ``int`` (then initialization is made on ``pdiagset[init]``)
+ or a `(n x 2)` ``numpy.array`` enconding a persistence diagram with `n` points.
:type init: ``int``, or (n x 2) ``np.array``
- :param verbose: if ``True``, returns additional information about the
- barycenter.
+ :param verbose: if ``True``, returns additional information about the barycenter.
:type verbose: boolean
- :returns: If not verbose (default), a ``numpy.array`` encoding
- the barycenter estimate of pdiagset
- (local minimum of the energy function).
- If ``pdiagset`` is empty, returns ``None``.
- If verbose, returns a couple ``(Y, log)``
- where ``Y`` is the barycenter estimate,
- and ``log`` is a ``dict`` that contains additional informations:
-
- - `"groupings"`, a list of list of pairs ``(i,j)``.
- Namely, ``G[k] = [...(i, j)...]``, where ``(i,j)`` indicates
- that ``pdiagset[k][i]`` is matched to ``Y[j]``
- if ``i = -1`` or ``j = -1``, it means they
- represent the diagonal.
-
- - `"energy"`, ``float`` representing the Frechet energy value obtained.
- It is the mean of squared distances of observations to the output.
-
- - `"nb_iter"`, ``int`` number of iterations performed before convergence of the algorithm.
+ :returns: If not verbose (default), a ``numpy.array`` encoding the barycenter estimate of pdiagset
+ (local minimum of the energy function).
+ If ``pdiagset`` is empty, returns ``None``.
+ If verbose, returns a couple ``(Y, log)`` where ``Y`` is the barycenter estimate,
+ and ``log`` is a ``dict`` that contains additional informations:
+
+ - `"groupings"`, a list of list of pairs ``(i,j)``. Namely, ``G[k] = [...(i, j)...]``, where ``(i,j)`` indicates that `pdiagset[k][i]`` is matched to ``Y[j]`` if ``i = -1`` or ``j = -1``, it means they represent the diagonal.
+
+ - `"energy"`, ``float`` representing the Frechet energy value obtained. It is the mean of squared distances of observations to the output.
+
+ - `"nb_iter"`, ``int`` number of iterations performed before convergence of the algorithm.
'''
X = pdiagset # to shorten notations, not a copy
m = len(X) # number of diagrams we are averaging
@@ -156,4 +144,3 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False):
return Y, log
else:
return Y
-
diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py
index 35315939..89ecab1c 100644
--- a/src/python/gudhi/wasserstein/wasserstein.py
+++ b/src/python/gudhi/wasserstein/wasserstein.py
@@ -15,6 +15,8 @@ try:
except ImportError:
print("POT (Python Optimal Transport) package is not installed. Try to run $ conda install -c conda-forge pot ; or $ pip install POT")
+
+# Currently unused, but Théo says it is likely to be used again.
def _proj_on_diag(X):
'''
:param X: (n x 2) array encoding the points of a persistent diagram.
@@ -24,7 +26,19 @@ def _proj_on_diag(X):
return np.array([Z , Z]).T
-def _build_dist_matrix(X, Y, order=2., internal_p=2.):
+def _dist_to_diag(X, internal_p):
+ '''
+ :param X: (n x 2) array encoding the points of a persistent diagram.
+ :param internal_p: Ground metric (i.e. norm L^p).
+ :returns: (n) array encoding the (respective orthogonal) distances of the points to the diagonal
+
+ .. note::
+ Assumes that the points are above the diagonal.
+ '''
+ return (X[:, 1] - X[:, 0]) * 2 ** (1.0 / internal_p - 1)
+
+
+def _build_dist_matrix(X, Y, order, internal_p):
'''
:param X: (n x 2) numpy.array encoding the (points of the) first diagram.
:param Y: (m x 2) numpy.array encoding the second diagram.
@@ -36,16 +50,12 @@ def _build_dist_matrix(X, Y, order=2., internal_p=2.):
and its orthogonal projection onto the diagonal.
note also that C[n, m] = 0 (it costs nothing to move from the diagonal to the diagonal).
'''
- Xdiag = _proj_on_diag(X)
- Ydiag = _proj_on_diag(Y)
+ Cxd = _dist_to_diag(X, internal_p)**order
+ Cdy = _dist_to_diag(Y, internal_p)**order
if np.isinf(internal_p):
C = sc.cdist(X,Y, metric='chebyshev')**order
- Cxd = np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order
- Cdy = np.linalg.norm(Y - Ydiag, ord=internal_p, axis=1)**order
else:
C = sc.cdist(X,Y, metric='minkowski', p=internal_p)**order
- Cxd = np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order
- Cdy = np.linalg.norm(Y - Ydiag, ord=internal_p, axis=1)**order
Cf = np.hstack((C, Cxd[:,None]))
Cdy = np.append(Cdy, 0)
@@ -54,18 +64,31 @@ def _build_dist_matrix(X, Y, order=2., internal_p=2.):
return Cf
-def _perstot(X, order, internal_p):
+def _perstot_autodiff(X, order, internal_p):
+ '''
+ Version of _perstot that works on eagerpy tensors.
+ '''
+ return _dist_to_diag(X, internal_p).norms.lp(order)
+
+def _perstot(X, order, internal_p, enable_autodiff):
'''
:param X: (n x 2) numpy.array (points of a given diagram).
:param order: exponent for Wasserstein. Default value is 2.
:param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm).
+ :param enable_autodiff: If X is torch.tensor, tensorflow.Tensor or jax.numpy.ndarray, make the computation
+ transparent to automatic differentiation.
+ :type enable_autodiff: bool
:returns: float, the total persistence of the diagram (that is, its distance to the empty diagram).
'''
- Xdiag = _proj_on_diag(X)
- return (np.sum(np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order))**(1./order)
+ if enable_autodiff:
+ import eagerpy as ep
+
+ return _perstot_autodiff(ep.astensor(X), order, internal_p).raw
+ else:
+ return np.linalg.norm(_dist_to_diag(X, internal_p), ord=order)
-def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.):
+def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2., enable_autodiff=False):
'''
:param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points
(i.e. with infinite coordinate).
@@ -76,6 +99,13 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.):
:param order: exponent for Wasserstein; Default value is 2.
:param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2);
Default value is 2 (Euclidean norm).
+ :param enable_autodiff: If X and Y are torch.tensor, tensorflow.Tensor or jax.numpy.ndarray, make the computation
+ transparent to automatic differentiation. This requires the package EagerPy and is currently incompatible
+ with `matching=True`.
+
+ .. note:: This considers the function defined on the coordinates of the off-diagonal points of X and Y
+ and lets the various frameworks compute its gradient. It never pulls new points from the diagonal.
+ :type enable_autodiff: bool
:returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with
respect to the internal_p-norm as ground metric.
If matching is set to True, also returns the optimal matching between X and Y.
@@ -84,23 +114,31 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.):
m = len(Y)
# handle empty diagrams
- if X.size == 0:
- if Y.size == 0:
+ if n == 0:
+ if m == 0:
if not matching:
+ # What if enable_autodiff?
return 0.
else:
return 0., np.array([])
else:
if not matching:
- return _perstot(Y, order, internal_p)
+ return _perstot(Y, order, internal_p, enable_autodiff)
else:
- return _perstot(Y, order, internal_p), np.array([[-1, j] for j in range(m)])
- elif Y.size == 0:
+ return _perstot(Y, order, internal_p, enable_autodiff), np.array([[-1, j] for j in range(m)])
+ elif m == 0:
if not matching:
- return _perstot(X, order, internal_p)
+ return _perstot(X, order, internal_p, enable_autodiff)
else:
- return _perstot(X, order, internal_p), np.array([[i, -1] for i in range(n)])
+ return _perstot(X, order, internal_p, enable_autodiff), np.array([[i, -1] for i in range(n)])
+ if enable_autodiff:
+ import eagerpy as ep
+
+ X_orig = ep.astensor(X)
+ Y_orig = ep.astensor(Y)
+ X = X_orig.numpy()
+ Y = Y_orig.numpy()
M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p)
a = np.ones(n+1) # weight vector of the input diagram. Uniform here.
a[-1] = m
@@ -108,6 +146,7 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.):
b[-1] = n
if matching:
+ assert not enable_autodiff, "matching and enable_autodiff are currently incompatible"
P = ot.emd(a=a,b=b,M=M, numItermax=2000000)
ot_cost = np.sum(np.multiply(P,M))
P[-1, -1] = 0 # Remove matching corresponding to the diagonal
@@ -117,6 +156,23 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.):
match[:,1][match[:,1] >= m] = -1
return ot_cost ** (1./order) , match
+ if enable_autodiff:
+ P = ot.emd(a=a, b=b, M=M, numItermax=2000000)
+ pairs_X_Y = np.argwhere(P[:-1, :-1])
+ pairs_X_diag = np.nonzero(P[:-1, -1])
+ pairs_Y_diag = np.nonzero(P[-1, :-1])
+ dists = []
+ # empty arrays are not handled properly by the helpers, so we avoid calling them
+ if len(pairs_X_Y):
+ dists.append((Y_orig[pairs_X_Y[:, 1]] - X_orig[pairs_X_Y[:, 0]]).norms.lp(internal_p, axis=-1).norms.lp(order))
+ if len(pairs_X_diag):
+ dists.append(_perstot_autodiff(X_orig[pairs_X_diag], order, internal_p))
+ if len(pairs_Y_diag):
+ dists.append(_perstot_autodiff(Y_orig[pairs_Y_diag], order, internal_p))
+ dists = [dist.reshape(1) for dist in dists]
+ return ep.concatenate(dists).norms.lp(order).raw
+ # We can also concatenate the 3 vectors to compute just one norm.
+
# Comptuation of the otcost using the ot.emd2 library.
# Note: it is the Wasserstein distance to the power q.
# The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value?
diff --git a/src/python/gudhi/weighted_rips_complex.py b/src/python/gudhi/weighted_rips_complex.py
new file mode 100644
index 00000000..0541572b
--- /dev/null
+++ b/src/python/gudhi/weighted_rips_complex.py
@@ -0,0 +1,59 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Raphaël Tinarrage, Yuichi Ike, Masatoshi Takenouchi
+#
+# Copyright (C) 2020 Inria, Copyright (C) 2020 FUjitsu Laboratories Ltd.
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+from gudhi import SimplexTree
+
+class WeightedRipsComplex:
+ """
+ Class to generate a weighted Rips complex from a distance matrix and weights on vertices,
+ in the way described in :cite:`dtmfiltrations`.
+ Remark that all the filtration values are doubled compared to the definition in the paper
+ for the consistency with RipsComplex.
+ """
+ def __init__(self,
+ distance_matrix,
+ weights=None,
+ max_filtration=float('inf')):
+ """
+ Args:
+ distance_matrix (Sequence[Sequence[float]]): distance matrix (full square or lower triangular).
+ weights (Sequence[float]): (one half of) weight for each vertex.
+ max_filtration (float): specifies the maximal filtration value to be considered.
+ """
+ self.distance_matrix = distance_matrix
+ if weights is not None:
+ self.weights = weights
+ else:
+ self.weights = [0] * len(distance_matrix)
+ self.max_filtration = max_filtration
+
+ def create_simplex_tree(self, max_dimension):
+ """
+ Args:
+ max_dimension (int): graph expansion until this given dimension.
+ """
+ dist = self.distance_matrix
+ F = self.weights
+ num_pts = len(dist)
+
+ st = SimplexTree()
+
+ for i in range(num_pts):
+ if 2*F[i] <= self.max_filtration:
+ st.insert([i], 2*F[i])
+ for i in range(num_pts):
+ for j in range(i):
+ value = max(2*F[i], 2*F[j], dist[i][j] + F[i] + F[j])
+ # max is needed when F is not 1-Lipschitz
+ if value <= self.max_filtration:
+ st.insert([i,j], filtration=value)
+
+ st.expansion(max_dimension)
+ return st
+
diff --git a/src/python/include/Alpha_complex_interface.h b/src/python/include/Alpha_complex_interface.h
index 8614eee3..40de88f3 100644
--- a/src/python/include/Alpha_complex_interface.h
+++ b/src/python/include/Alpha_complex_interface.h
@@ -58,7 +58,6 @@ class Alpha_complex_interface {
void create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square) {
alpha_complex_->create_complex(*simplex_tree, max_alpha_square);
- simplex_tree->initialize_filtration();
}
private:
diff --git a/src/python/include/Euclidean_strong_witness_complex_interface.h b/src/python/include/Euclidean_strong_witness_complex_interface.h
index c1c72737..f94c51ef 100644
--- a/src/python/include/Euclidean_strong_witness_complex_interface.h
+++ b/src/python/include/Euclidean_strong_witness_complex_interface.h
@@ -50,12 +50,10 @@ class Euclidean_strong_witness_complex_interface {
void create_simplex_tree(Gudhi::Simplex_tree<>* simplex_tree, double max_alpha_square,
std::size_t limit_dimension) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square, limit_dimension);
- simplex_tree->initialize_filtration();
}
void create_simplex_tree(Gudhi::Simplex_tree<>* simplex_tree, double max_alpha_square) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square);
- simplex_tree->initialize_filtration();
}
std::vector<double> get_point(unsigned vh) {
diff --git a/src/python/include/Euclidean_witness_complex_interface.h b/src/python/include/Euclidean_witness_complex_interface.h
index 5d7dbdc2..4411ae79 100644
--- a/src/python/include/Euclidean_witness_complex_interface.h
+++ b/src/python/include/Euclidean_witness_complex_interface.h
@@ -49,12 +49,10 @@ class Euclidean_witness_complex_interface {
void create_simplex_tree(Gudhi::Simplex_tree<>* simplex_tree, double max_alpha_square, std::size_t limit_dimension) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square, limit_dimension);
- simplex_tree->initialize_filtration();
}
void create_simplex_tree(Gudhi::Simplex_tree<>* simplex_tree, double max_alpha_square) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square);
- simplex_tree->initialize_filtration();
}
std::vector<double> get_point(unsigned vh) {
diff --git a/src/python/include/Nerve_gic_interface.h b/src/python/include/Nerve_gic_interface.h
index 5e7f8ae6..ab14c318 100644
--- a/src/python/include/Nerve_gic_interface.h
+++ b/src/python/include/Nerve_gic_interface.h
@@ -29,7 +29,6 @@ class Nerve_gic_interface : public Cover_complex<std::vector<double>> {
public:
void create_simplex_tree(Simplex_tree_interface<>* simplex_tree) {
create_complex(*simplex_tree);
- simplex_tree->initialize_filtration();
}
void set_cover_from_Euclidean_Voronoi(int m) {
set_cover_from_Voronoi(Gudhi::Euclidean_distance(), m);
diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h
index 8c79e6f3..e5a3dfba 100644
--- a/src/python/include/Persistent_cohomology_interface.h
+++ b/src/python/include/Persistent_cohomology_interface.h
@@ -13,9 +13,11 @@
#include <gudhi/Persistent_cohomology.h>
+#include <cstdlib>
#include <vector>
#include <utility> // for std::pair
#include <algorithm> // for sort
+#include <unordered_map>
namespace Gudhi {
@@ -23,82 +25,204 @@ template<class FilteredComplex>
class Persistent_cohomology_interface : public
persistent_cohomology::Persistent_cohomology<FilteredComplex, persistent_cohomology::Field_Zp> {
private:
+ typedef persistent_cohomology::Persistent_cohomology<FilteredComplex, persistent_cohomology::Field_Zp> Base;
/*
* Compare two intervals by dimension, then by length.
*/
struct cmp_intervals_by_dim_then_length {
- explicit cmp_intervals_by_dim_then_length(FilteredComplex * sc)
- : sc_(sc) { }
-
template<typename Persistent_interval>
bool operator()(const Persistent_interval & p1, const Persistent_interval & p2) {
- if (sc_->dimension(get < 0 > (p1)) == sc_->dimension(get < 0 > (p2)))
- return (sc_->filtration(get < 1 > (p1)) - sc_->filtration(get < 0 > (p1))
- > sc_->filtration(get < 1 > (p2)) - sc_->filtration(get < 0 > (p2)));
+ if (std::get<0>(p1) == std::get<0>(p2)) {
+ auto& i1 = std::get<1>(p1);
+ auto& i2 = std::get<1>(p2);
+ return std::get<1>(i1) - std::get<0>(i1) > std::get<1>(i2) - std::get<0>(i2);
+ }
else
- return (sc_->dimension(get < 0 > (p1)) > sc_->dimension(get < 0 > (p2)));
+ return (std::get<0>(p1) > std::get<0>(p2));
+ // Why does this sort by decreasing dimension?
}
- FilteredComplex* sc_;
};
public:
- Persistent_cohomology_interface(FilteredComplex* stptr)
- : persistent_cohomology::Persistent_cohomology<FilteredComplex, persistent_cohomology::Field_Zp>(*stptr),
- stptr_(stptr) { }
-
- Persistent_cohomology_interface(FilteredComplex* stptr, bool persistence_dim_max)
- : persistent_cohomology::Persistent_cohomology<FilteredComplex,
- persistent_cohomology::Field_Zp>(*stptr, persistence_dim_max),
+ Persistent_cohomology_interface(FilteredComplex* stptr, bool persistence_dim_max=false)
+ : Base(*stptr, persistence_dim_max),
stptr_(stptr) { }
- std::vector<std::pair<int, std::pair<double, double>>> get_persistence(int homology_coeff_field,
- double min_persistence) {
- persistent_cohomology::Persistent_cohomology<FilteredComplex,
- persistent_cohomology::Field_Zp>::init_coefficients(homology_coeff_field);
- persistent_cohomology::Persistent_cohomology<FilteredComplex,
- persistent_cohomology::Field_Zp>::compute_persistent_cohomology(min_persistence);
-
- // Custom sort and output persistence
- cmp_intervals_by_dim_then_length cmp(stptr_);
- auto persistent_pairs = persistent_cohomology::Persistent_cohomology<FilteredComplex,
- persistent_cohomology::Field_Zp>::get_persistent_pairs();
- std::sort(std::begin(persistent_pairs), std::end(persistent_pairs), cmp);
+ // TODO: move to the constructors?
+ void compute_persistence(int homology_coeff_field, double min_persistence) {
+ Base::init_coefficients(homology_coeff_field);
+ Base::compute_persistent_cohomology(min_persistence);
+ }
+ std::vector<std::pair<int, std::pair<double, double>>> get_persistence() {
std::vector<std::pair<int, std::pair<double, double>>> persistence;
+ auto const& persistent_pairs = Base::get_persistent_pairs();
+ persistence.reserve(persistent_pairs.size());
for (auto pair : persistent_pairs) {
- persistence.push_back(std::make_pair(stptr_->dimension(get<0>(pair)),
- std::make_pair(stptr_->filtration(get<0>(pair)),
- stptr_->filtration(get<1>(pair)))));
+ persistence.emplace_back(stptr_->dimension(get<0>(pair)),
+ std::make_pair(stptr_->filtration(get<0>(pair)),
+ stptr_->filtration(get<1>(pair))));
}
+ // Custom sort and output persistence
+ cmp_intervals_by_dim_then_length cmp;
+ std::sort(std::begin(persistence), std::end(persistence), cmp);
return persistence;
}
- std::vector<std::pair<std::vector<int>, std::vector<int>>> persistence_pairs() {
- auto pairs = persistent_cohomology::Persistent_cohomology<FilteredComplex,
+ // This function computes the top-dimensional cofaces associated to the positive and negative
+ // simplices of a cubical complex. The output format is a vector of vectors of three integers,
+ // which are [homological dimension, index of top-dimensional coface of positive simplex,
+ // index of top-dimensional coface of negative simplex]. If the topological feature is essential,
+ // then the index of top-dimensional coface of negative simplex is arbitrarily set to -1.
+ std::vector<std::vector<int>> cofaces_of_cubical_persistence_pairs() {
+
+ // Warning: this function is meant to be used with CubicalComplex only!!
+
+ auto&& pairs = persistent_cohomology::Persistent_cohomology<FilteredComplex,
persistent_cohomology::Field_Zp>::get_persistent_pairs();
+ // Gather all top-dimensional cells and store their simplex handles
+ std::vector<std::size_t> max_splx;
+ for (auto splx : stptr_->top_dimensional_cells_range())
+ max_splx.push_back(splx);
+ // Sort these simplex handles and compute the ordering function
+ // This function allows to go directly from the simplex handle to the position of the corresponding top-dimensional cell in the input data
+ std::unordered_map<std::size_t, int> order;
+ //std::sort(max_splx.begin(), max_splx.end());
+ for (unsigned int i = 0; i < max_splx.size(); i++) order.emplace(max_splx[i], i);
+
+ std::vector<std::vector<int>> persistence_pairs;
+ for (auto pair : pairs) {
+ int h = stptr_->dimension(get<0>(pair));
+ // Recursively get the top-dimensional cell / coface associated to the persistence generator
+ std::size_t face0 = stptr_->get_top_dimensional_coface_of_a_cell(get<0>(pair));
+ // Retrieve the index of the corresponding top-dimensional cell in the input data
+ int splx0 = order[face0];
+
+ int splx1 = -1;
+ if (get<1>(pair) != stptr_->null_simplex()){
+ // Recursively get the top-dimensional cell / coface associated to the persistence generator
+ std::size_t face1 = stptr_->get_top_dimensional_coface_of_a_cell(get<1>(pair));
+ // Retrieve the index of the corresponding top-dimensional cell in the input data
+ splx1 = order[face1];
+ }
+ persistence_pairs.push_back({ h, splx0, splx1 });
+ }
+ return persistence_pairs;
+ }
+
+ std::vector<std::pair<std::vector<int>, std::vector<int>>> persistence_pairs() {
std::vector<std::pair<std::vector<int>, std::vector<int>>> persistence_pairs;
+ auto const& pairs = Base::get_persistent_pairs();
persistence_pairs.reserve(pairs.size());
+ std::vector<int> birth;
+ std::vector<int> death;
for (auto pair : pairs) {
- std::vector<int> birth;
+ birth.clear();
if (get<0>(pair) != stptr_->null_simplex()) {
for (auto vertex : stptr_->simplex_vertex_range(get<0>(pair))) {
birth.push_back(vertex);
}
}
- std::vector<int> death;
+ death.clear();
if (get<1>(pair) != stptr_->null_simplex()) {
+ death.reserve(birth.size()+1);
for (auto vertex : stptr_->simplex_vertex_range(get<1>(pair))) {
death.push_back(vertex);
}
}
- persistence_pairs.push_back(std::make_pair(birth, death));
+ persistence_pairs.emplace_back(birth, death);
}
return persistence_pairs;
}
+ // TODO: (possibly at the python level)
+ // - an option to return only some of those vectors?
+ typedef std::pair<std::vector<std::vector<int>>, std::vector<std::vector<int>>> Generators;
+
+ Generators lower_star_generators() {
+ Generators out;
+ // diags[i] should be interpreted as vector<array<int,2>>
+ auto& diags = out.first;
+ // diagsinf[i] should be interpreted as vector<int>
+ auto& diagsinf = out.second;
+ for (auto pair : Base::get_persistent_pairs()) {
+ auto s = std::get<0>(pair);
+ auto t = std::get<1>(pair);
+ int dim = stptr_->dimension(s);
+ auto v = stptr_->vertex_with_same_filtration(s);
+ if(t == stptr_->null_simplex()) {
+ while(diagsinf.size() < dim+1) diagsinf.emplace_back();
+ diagsinf[dim].push_back(v);
+ } else {
+ while(diags.size() < dim+1) diags.emplace_back();
+ auto w = stptr_->vertex_with_same_filtration(t);
+ auto& d = diags[dim];
+ d.insert(d.end(), { v, w });
+ }
+ }
+ return out;
+ }
+
+ // An alternative, to avoid those different sizes, would be to "pad" vertex generator v as (v, v) or (v, -1). When using it as index, this corresponds to adding the vertex filtration values either on the diagonal of the distance matrix, or as an extra row or column.
+ // We could also merge the vectors for different dimensions into a single one, with an extra column for the dimension (converted to type double).
+ Generators flag_generators() {
+ Generators out;
+ // diags[0] should be interpreted as vector<array<int,3>> and other diags[i] as vector<array<int,4>>
+ auto& diags = out.first;
+ // diagsinf[0] should be interpreted as vector<int> and other diagsinf[i] as vector<array<int,2>>
+ auto& diagsinf = out.second;
+ for (auto pair : Base::get_persistent_pairs()) {
+ auto s = std::get<0>(pair);
+ auto t = std::get<1>(pair);
+ int dim = stptr_->dimension(s);
+ bool infinite = t == stptr_->null_simplex();
+ if(infinite) {
+ if(dim == 0) {
+ auto v = *std::begin(stptr_->simplex_vertex_range(s));
+ if(diagsinf.size()==0)diagsinf.emplace_back();
+ diagsinf[0].push_back(v);
+ } else {
+ auto e = stptr_->edge_with_same_filtration(s);
+ auto&& e_vertices = stptr_->simplex_vertex_range(e);
+ auto i = std::begin(e_vertices);
+ auto v1 = *i;
+ auto v2 = *++i;
+ GUDHI_CHECK(++i==std::end(e_vertices), "must be an edge");
+ while(diagsinf.size() < dim+1) diagsinf.emplace_back();
+ auto& d = diagsinf[dim];
+ d.insert(d.end(), { v1, v2 });
+ }
+ } else {
+ auto et = stptr_->edge_with_same_filtration(t);
+ auto&& et_vertices = stptr_->simplex_vertex_range(et);
+ auto it = std::begin(et_vertices);
+ auto w1 = *it;
+ auto w2 = *++it;
+ GUDHI_CHECK(++it==std::end(et_vertices), "must be an edge");
+ if(dim == 0) {
+ auto v = *std::begin(stptr_->simplex_vertex_range(s));
+ if(diags.size()==0)diags.emplace_back();
+ auto& d = diags[0];
+ d.insert(d.end(), { v, w1, w2 });
+ } else {
+ auto es = stptr_->edge_with_same_filtration(s);
+ auto&& es_vertices = stptr_->simplex_vertex_range(es);
+ auto is = std::begin(es_vertices);
+ auto v1 = *is;
+ auto v2 = *++is;
+ GUDHI_CHECK(++is==std::end(es_vertices), "must be an edge");
+ while(diags.size() < dim+1) diags.emplace_back();
+ auto& d = diags[dim];
+ d.insert(d.end(), { v1, v2, w1, w2 });
+ }
+ }
+ }
+ return out;
+ }
+
private:
// A copy
FilteredComplex* stptr_;
diff --git a/src/python/include/Rips_complex_interface.h b/src/python/include/Rips_complex_interface.h
index a66b0e5b..d98b0226 100644
--- a/src/python/include/Rips_complex_interface.h
+++ b/src/python/include/Rips_complex_interface.h
@@ -53,7 +53,6 @@ class Rips_complex_interface {
rips_complex_->create_complex(*simplex_tree, dim_max);
else
sparse_rips_complex_->create_complex(*simplex_tree, dim_max);
- simplex_tree->initialize_filtration();
}
private:
diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h
index 1a18aed6..56d7c41d 100644
--- a/src/python/include/Simplex_tree_interface.h
+++ b/src/python/include/Simplex_tree_interface.h
@@ -16,8 +16,6 @@
#include <gudhi/Simplex_tree.h>
#include <gudhi/Points_off_io.h>
-#include "Persistent_cohomology_interface.h"
-
#include <iostream>
#include <vector>
#include <utility> // std::pair
@@ -43,16 +41,19 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
Extended_filtration_data efd;
- bool find_simplex(const Simplex& vh) {
- return (Base::find(vh) != Base::null_simplex());
+ bool find_simplex(const Simplex& simplex) {
+ return (Base::find(simplex) != Base::null_simplex());
}
- void assign_simplex_filtration(const Simplex& vh, Filtration_value filtration) {
- Base::assign_filtration(Base::find(vh), filtration);
+ void assign_simplex_filtration(const Simplex& simplex, Filtration_value filtration) {
+ Base::assign_filtration(Base::find(simplex), filtration);
+ Base::clear_filtration();
}
bool insert(const Simplex& simplex, Filtration_value filtration = 0) {
Insertion_result result = Base::insert_simplex_and_subfaces(simplex, filtration);
+ if (result.first != Base::null_simplex())
+ Base::clear_filtration();
return (result.second);
}
@@ -86,7 +87,7 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
void remove_maximal_simplex(const Simplex& simplex) {
Base::remove_maximal_simplex(Base::find(simplex));
- Base::initialize_filtration();
+ Base::clear_filtration();
}
Simplex_and_filtration get_simplex_and_filtration(Simplex_handle f_simplex) {
@@ -123,7 +124,6 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
void compute_extended_filtration() {
this->efd = this->extend_filtration();
- this->initialize_filtration();
return;
}
@@ -157,11 +157,6 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
return new_dgm;
}
- void create_persistence(Gudhi::Persistent_cohomology_interface<Base>* pcoh) {
- Base::initialize_filtration();
- pcoh = new Gudhi::Persistent_cohomology_interface<Base>(*this);
- }
-
// Iterator over the simplex tree
Complex_simplex_iterator get_simplices_iterator_begin() {
// this specific case works because the range is just a pair of iterators - won't work if range was a vector
diff --git a/src/python/include/Strong_witness_complex_interface.h b/src/python/include/Strong_witness_complex_interface.h
index cda5b514..e9ab0c7b 100644
--- a/src/python/include/Strong_witness_complex_interface.h
+++ b/src/python/include/Strong_witness_complex_interface.h
@@ -41,13 +41,11 @@ class Strong_witness_complex_interface {
void create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square,
std::size_t limit_dimension) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square, limit_dimension);
- simplex_tree->initialize_filtration();
}
void create_simplex_tree(Simplex_tree_interface<>* simplex_tree,
double max_alpha_square) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square);
- simplex_tree->initialize_filtration();
}
private:
diff --git a/src/python/include/Tangential_complex_interface.h b/src/python/include/Tangential_complex_interface.h
index 698226cc..b1afce94 100644
--- a/src/python/include/Tangential_complex_interface.h
+++ b/src/python/include/Tangential_complex_interface.h
@@ -90,7 +90,6 @@ class Tangential_complex_interface {
void create_simplex_tree(Simplex_tree<>* simplex_tree) {
tangential_complex_->create_complex<Gudhi::Simplex_tree<Gudhi::Simplex_tree_options_full_featured>>(*simplex_tree);
- simplex_tree->initialize_filtration();
}
void set_max_squared_edge_length(double max_squared_edge_length) {
diff --git a/src/python/include/Witness_complex_interface.h b/src/python/include/Witness_complex_interface.h
index 45e14253..76947e53 100644
--- a/src/python/include/Witness_complex_interface.h
+++ b/src/python/include/Witness_complex_interface.h
@@ -41,13 +41,11 @@ class Witness_complex_interface {
void create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square,
std::size_t limit_dimension) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square, limit_dimension);
- simplex_tree->initialize_filtration();
}
void create_simplex_tree(Simplex_tree_interface<>* simplex_tree,
double max_alpha_square) {
witness_complex_->create_complex(*simplex_tree, max_alpha_square);
- simplex_tree->initialize_filtration();
}
private:
diff --git a/src/python/include/pybind11_diagram_utils.h b/src/python/include/pybind11_diagram_utils.h
new file mode 100644
index 00000000..d9627258
--- /dev/null
+++ b/src/python/include/pybind11_diagram_utils.h
@@ -0,0 +1,39 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Marc Glisse
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <pybind11/pybind11.h>
+#include <pybind11/numpy.h>
+
+#include <boost/range/counting_range.hpp>
+#include <boost/range/adaptor/transformed.hpp>
+
+namespace py = pybind11;
+typedef py::array_t<double> Dgm;
+
+// Get m[i,0] and m[i,1] as a pair
+static auto pairify(void* p, ssize_t h, ssize_t w) {
+ return [=](ssize_t i){
+ char* birth = (char*)p + i * h;
+ char* death = birth + w;
+ return std::make_pair(*(double*)birth, *(double*)death);
+ };
+}
+
+inline auto numpy_to_range_of_pairs(py::array_t<double> dgm) {
+ py::buffer_info buf = dgm.request();
+ // shape (n,2) or (0) for empty
+ if((buf.ndim!=2 || buf.shape[1]!=2) && (buf.ndim!=1 || buf.shape[0]!=0))
+ throw std::runtime_error("Diagram must be an array of size n x 2");
+ // In the case of shape (0), avoid reading non-existing strides[1] even if we won't use it.
+ ssize_t stride1 = buf.ndim == 2 ? buf.strides[1] : 0;
+ auto cnt = boost::counting_range<ssize_t>(0, buf.shape[0]);
+ return boost::adaptors::transform(cnt, pairify(buf.ptr, buf.strides[0], stride1));
+ // Be careful that the returned range cannot contain references to dead temporaries.
+}
diff --git a/src/python/setup.py.in b/src/python/setup.py.in
index f968bd59..b9f4e3f0 100644
--- a/src/python/setup.py.in
+++ b/src/python/setup.py.in
@@ -18,7 +18,8 @@ __author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
-modules = [@GUDHI_PYTHON_MODULES_TO_COMPILE@]
+cython_modules = [@GUDHI_CYTHON_MODULES@]
+pybind11_modules = [@GUDHI_PYBIND11_MODULES@]
source_dir='@CMAKE_CURRENT_SOURCE_DIR@/gudhi/'
extra_compile_args=[@GUDHI_PYTHON_EXTRA_COMPILE_ARGS@]
@@ -30,7 +31,7 @@ runtime_library_dirs=[@GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS@]
# Create ext_modules list from module list
ext_modules = []
-for module in modules:
+for module in cython_modules:
ext_modules.append(Extension(
'gudhi.' + module,
sources = [source_dir + module + '.pyx',],
@@ -45,15 +46,21 @@ for module in modules:
ext_modules = cythonize(ext_modules)
-ext_modules.append(Extension(
- 'gudhi.hera',
- sources = [source_dir + 'hera.cc'],
- language = 'c++',
- include_dirs = include_dirs +
- ['@HERA_WASSERSTEIN_INCLUDE_DIR@',
- pybind11.get_include(False), pybind11.get_include(True)],
- extra_compile_args=extra_compile_args + [@GUDHI_PYBIND11_EXTRA_COMPILE_ARGS@],
- ))
+for module in pybind11_modules:
+ my_include_dirs = include_dirs + [pybind11.get_include(False), pybind11.get_include(True)]
+ if module == 'hera':
+ my_include_dirs = ['@HERA_WASSERSTEIN_INCLUDE_DIR@'] + my_include_dirs
+ ext_modules.append(Extension(
+ 'gudhi.' + module,
+ sources = [source_dir + module + '.cc'],
+ language = 'c++',
+ include_dirs = my_include_dirs,
+ extra_compile_args=extra_compile_args + [@GUDHI_PYBIND11_EXTRA_COMPILE_ARGS@],
+ extra_link_args=extra_link_args,
+ libraries=libraries,
+ library_dirs=library_dirs,
+ runtime_library_dirs=runtime_library_dirs,
+ ))
setup(
name = 'gudhi',
diff --git a/src/python/test/test_cubical_complex.py b/src/python/test/test_cubical_complex.py
index fce4875c..d0e4e9e8 100755
--- a/src/python/test/test_cubical_complex.py
+++ b/src/python/test/test_cubical_complex.py
@@ -147,3 +147,30 @@ def test_connected_sublevel_sets():
periodic_dimensions = periodic_dimensions)
assert cub.persistence() == [(0, (2.0, float("inf")))]
assert cub.betti_numbers() == [1, 0, 0]
+
+def test_cubical_generators():
+ cub = CubicalComplex(top_dimensional_cells = [[0, 0, 0], [0, 1, 0], [0, 0, 0]])
+ cub.persistence()
+ g = cub.cofaces_of_persistence_pairs()
+ assert len(g[0]) == 2
+ assert len(g[1]) == 1
+ assert np.array_equal(g[0][0], np.empty(shape=[0,2]))
+ assert np.array_equal(g[0][1], np.array([[7, 4]]))
+ assert np.array_equal(g[1][0], np.array([8]))
+
+def test_cubical_cofaces_of_persistence_pairs_when_pd_has_no_paired_birth_and_death():
+ cubCpx = CubicalComplex(dimensions=[1,2], top_dimensional_cells=[0.0, 1.0])
+ Diag = cubCpx.persistence(homology_coeff_field=2, min_persistence=0)
+ pairs = cubCpx.cofaces_of_persistence_pairs()
+ assert pairs[0] == []
+ assert np.array_equal(pairs[1][0], np.array([0]))
+
+def test_periodic_cofaces_of_persistence_pairs_when_pd_has_no_paired_birth_and_death():
+ perCubCpx = PeriodicCubicalComplex(dimensions=[1,2], top_dimensional_cells=[0.0, 1.0],
+ periodic_dimensions=[True, True])
+ Diag = perCubCpx.persistence(homology_coeff_field=2, min_persistence=0)
+ pairs = perCubCpx.cofaces_of_persistence_pairs()
+ assert pairs[0] == []
+ assert np.array_equal(pairs[1][0], np.array([0]))
+ assert np.array_equal(pairs[1][1], np.array([0, 1]))
+ assert np.array_equal(pairs[1][2], np.array([1]))
diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py
new file mode 100755
index 00000000..0a52279e
--- /dev/null
+++ b/src/python/test/test_dtm.py
@@ -0,0 +1,89 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Marc Glisse
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.point_cloud.dtm import DistanceToMeasure, DTMDensity
+import numpy
+import pytest
+import torch
+import math
+
+
+def test_dtm_compare_euclidean():
+ pts = numpy.random.rand(1000, 4)
+ k = 6
+ dtm = DistanceToMeasure(k, implementation="ckdtree")
+ r0 = dtm.fit_transform(pts)
+ dtm = DistanceToMeasure(k, implementation="sklearn")
+ r1 = dtm.fit_transform(pts)
+ assert r1 == pytest.approx(r0)
+ dtm = DistanceToMeasure(k, implementation="sklearn", algorithm="brute")
+ r2 = dtm.fit_transform(pts)
+ assert r2 == pytest.approx(r0)
+ dtm = DistanceToMeasure(k, implementation="hnsw")
+ r3 = dtm.fit_transform(pts)
+ assert r3 == pytest.approx(r0, rel=0.1)
+ from scipy.spatial.distance import cdist
+
+ d = cdist(pts, pts)
+ dtm = DistanceToMeasure(k, metric="precomputed")
+ r4 = dtm.fit_transform(d)
+ assert r4 == pytest.approx(r0)
+ dtm = DistanceToMeasure(k, metric="precomputed", n_jobs=2)
+ r4b = dtm.fit_transform(d)
+ assert r4b == pytest.approx(r0)
+ dtm = DistanceToMeasure(k, implementation="keops")
+ r5 = dtm.fit_transform(pts)
+ assert r5 == pytest.approx(r0)
+ pts2 = torch.tensor(pts, requires_grad=True)
+ assert pts2.grad is None
+ dtm = DistanceToMeasure(k, implementation="keops", enable_autodiff=True)
+ r6 = dtm.fit_transform(pts2)
+ assert r6.detach().numpy() == pytest.approx(r0)
+ r6.sum().backward()
+ assert not torch.isnan(pts2.grad).any()
+ pts2 = torch.tensor(pts, requires_grad=True)
+ assert pts2.grad is None
+ dtm = DistanceToMeasure(k, implementation="ckdtree", enable_autodiff=True)
+ r7 = dtm.fit_transform(pts2)
+ assert r7.detach().numpy() == pytest.approx(r0)
+ r7.sum().backward()
+ assert not torch.isnan(pts2.grad).any()
+
+
+def test_dtm_precomputed():
+ dist = numpy.array([[1.0, 3, 8], [1, 5, 5], [0, 2, 3]])
+ dtm = DistanceToMeasure(2, q=1, metric="neighbors")
+ r = dtm.fit_transform(dist)
+ assert r == pytest.approx([2.0, 3, 1])
+
+ dist = numpy.array([[2.0, 2], [0, 1], [3, 4]])
+ dtm = DistanceToMeasure(2, q=2, metric="neighbors")
+ r = dtm.fit_transform(dist)
+ assert r == pytest.approx([2.0, 0.707, 3.5355], rel=0.01)
+
+
+def test_density_normalized():
+ sample = numpy.random.normal(0, 1, (1000000, 2))
+ queries = numpy.array([[0.0, 0.0], [-0.5, 0.7], [0.4, 1.7]])
+ expected = numpy.exp(-(queries ** 2).sum(-1) / 2) / (2 * math.pi)
+ estimated = DTMDensity(k=150, normalize=True).fit(sample).transform(queries)
+ assert estimated == pytest.approx(expected, rel=0.4)
+
+
+def test_density():
+ distances = [[0, 1, 10], [2, 0, 30], [1, 3, 5]]
+ density = DTMDensity(k=2, metric="neighbors", dim=1).fit_transform(distances)
+ expected = numpy.array([2.0, 1.0, 0.5])
+ assert density == pytest.approx(expected)
+ distances = [[0, 1], [2, 0], [1, 3]]
+ density = DTMDensity(metric="neighbors", dim=1).fit_transform(distances)
+ assert density == pytest.approx(expected)
+ density = DTMDensity(weights=[0.5, 0.5], metric="neighbors", dim=1).fit_transform(distances)
+ assert density == pytest.approx(expected)
diff --git a/src/python/test/test_knn.py b/src/python/test/test_knn.py
new file mode 100755
index 00000000..a87ec212
--- /dev/null
+++ b/src/python/test/test_knn.py
@@ -0,0 +1,130 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Marc Glisse
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.point_cloud.knn import KNearestNeighbors
+import numpy as np
+import pytest
+
+
+def test_knn_explicit():
+ base = np.array([[1.0, 1], [1, 2], [4, 2], [4, 3]])
+ query = np.array([[1.0, 1], [2, 2], [4, 4]])
+ knn = KNearestNeighbors(2, metric="manhattan", return_distance=True, return_index=True)
+ knn.fit(base)
+ r = knn.transform(query)
+ assert r[0] == pytest.approx(np.array([[0, 1], [1, 0], [3, 2]]))
+ assert r[1] == pytest.approx(np.array([[0.0, 1], [1, 2], [1, 2]]))
+
+ knn = KNearestNeighbors(2, metric="chebyshev", return_distance=True, return_index=False)
+ knn.fit(base)
+ r = knn.transform(query)
+ assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]]))
+ r = (
+ KNearestNeighbors(2, metric="chebyshev", return_distance=True, return_index=False, implementation="keops")
+ .fit(base)
+ .transform(query)
+ )
+ assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]]))
+ r = (
+ KNearestNeighbors(2, metric="chebyshev", return_distance=True, return_index=False, implementation="keops", enable_autodiff=True)
+ .fit(base)
+ .transform(query)
+ )
+ assert r == pytest.approx(np.array([[0.0, 1], [1, 1], [1, 2]]))
+
+ knn = KNearestNeighbors(2, metric="minkowski", p=3, return_distance=False, return_index=True)
+ knn.fit(base)
+ r = knn.transform(query)
+ assert np.array_equal(r, [[0, 1], [1, 0], [3, 2]])
+ r = (
+ KNearestNeighbors(2, metric="minkowski", p=3, return_distance=False, return_index=True, implementation="keops")
+ .fit(base)
+ .transform(query)
+ )
+ assert np.array_equal(r, [[0, 1], [1, 0], [3, 2]])
+
+ dist = np.array([[0.0, 3, 8], [1, 0, 5], [1, 2, 0]])
+ knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=False)
+ r = knn.fit_transform(dist)
+ assert np.array_equal(r, [[0, 1], [1, 0], [2, 0]])
+ knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=True, sort_results=True)
+ r = knn.fit_transform(dist)
+ assert np.array_equal(r[0], [[0, 1], [1, 0], [2, 0]])
+ assert np.array_equal(r[1], [[0, 3], [0, 1], [0, 1]])
+ # Second time in parallel
+ knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=False, n_jobs=2, sort_results=True)
+ r = knn.fit_transform(dist)
+ assert np.array_equal(r, [[0, 1], [1, 0], [2, 0]])
+ knn = KNearestNeighbors(2, metric="precomputed", return_index=True, return_distance=True, n_jobs=2)
+ r = knn.fit_transform(dist)
+ assert np.array_equal(r[0], [[0, 1], [1, 0], [2, 0]])
+ assert np.array_equal(r[1], [[0, 3], [0, 1], [0, 1]])
+
+
+def test_knn_compare():
+ base = np.array([[1.0, 1], [1, 2], [4, 2], [4, 3]])
+ query = np.array([[1.0, 1], [2, 2], [4, 4]])
+ r0 = (
+ KNearestNeighbors(2, implementation="ckdtree", return_index=True, return_distance=False)
+ .fit(base)
+ .transform(query)
+ )
+ r1 = (
+ KNearestNeighbors(2, implementation="sklearn", return_index=True, return_distance=False)
+ .fit(base)
+ .transform(query)
+ )
+ r2 = (
+ KNearestNeighbors(2, implementation="hnsw", return_index=True, return_distance=False).fit(base).transform(query)
+ )
+ r3 = (
+ KNearestNeighbors(2, implementation="keops", return_index=True, return_distance=False)
+ .fit(base)
+ .transform(query)
+ )
+ assert np.array_equal(r0, r1) and np.array_equal(r0, r2) and np.array_equal(r0, r3)
+
+ r0 = (
+ KNearestNeighbors(2, implementation="ckdtree", return_index=True, return_distance=True)
+ .fit(base)
+ .transform(query)
+ )
+ r1 = (
+ KNearestNeighbors(2, implementation="sklearn", return_index=True, return_distance=True)
+ .fit(base)
+ .transform(query)
+ )
+ r2 = KNearestNeighbors(2, implementation="hnsw", return_index=True, return_distance=True).fit(base).transform(query)
+ r3 = (
+ KNearestNeighbors(2, implementation="keops", return_index=True, return_distance=True).fit(base).transform(query)
+ )
+ assert np.array_equal(r0[0], r1[0]) and np.array_equal(r0[0], r2[0]) and np.array_equal(r0[0], r3[0])
+ d0 = pytest.approx(r0[1])
+ assert r1[1] == d0 and r2[1] == d0 and r3[1] == d0
+
+
+def test_knn_nop():
+ # This doesn't look super useful...
+ p = np.array([[0.0]])
+ assert None is KNearestNeighbors(
+ k=1, return_index=False, return_distance=False, implementation="sklearn"
+ ).fit_transform(p)
+ assert None is KNearestNeighbors(
+ k=1, return_index=False, return_distance=False, implementation="ckdtree"
+ ).fit_transform(p)
+ assert None is KNearestNeighbors(
+ k=1, return_index=False, return_distance=False, implementation="hnsw", ef=5
+ ).fit_transform(p)
+ assert None is KNearestNeighbors(
+ k=1, return_index=False, return_distance=False, implementation="keops"
+ ).fit_transform(p)
+ assert None is KNearestNeighbors(
+ k=1, return_index=False, return_distance=False, metric="precomputed"
+ ).fit_transform(p)
diff --git a/src/python/test/test_simplex_generators.py b/src/python/test/test_simplex_generators.py
new file mode 100755
index 00000000..8a9b4844
--- /dev/null
+++ b/src/python/test/test_simplex_generators.py
@@ -0,0 +1,64 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Marc Glisse
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+import gudhi
+import numpy as np
+
+
+def test_flag_generators():
+ pts = np.array([[0, 0], [0, 1.01], [1, 0], [1.02, 1.03], [100, 0], [100, 3.01], [103, 0], [103.02, 3.03]])
+ r = gudhi.RipsComplex(pts, max_edge_length=4)
+ st = r.create_simplex_tree(max_dimension=50)
+ st.persistence()
+ g = st.flag_persistence_generators()
+ assert np.array_equal(g[0], [[2, 2, 0], [1, 1, 0], [3, 3, 1], [6, 6, 4], [5, 5, 4], [7, 7, 5]])
+ assert len(g[1]) == 1
+ assert np.array_equal(g[1][0], [[3, 2, 2, 1]])
+ assert np.array_equal(g[2], [0, 4])
+ assert len(g[3]) == 1
+ assert np.array_equal(g[3][0], [[7, 6]])
+ # Compare trivial cases (where the simplex is the generator) with persistence_pairs.
+ # This still makes assumptions on the order of vertices in a simplex and could be more robust.
+ pairs = st.persistence_pairs()
+ assert {tuple(i) for i in g[0]} == {(i[0][0],) + tuple(i[1]) for i in pairs if len(i[0]) == 1 and len(i[1]) != 0}
+ assert {(i[0], i[1]) for i in g[1][0]} == {tuple(i[0]) for i in pairs if len(i[0]) == 2 and len(i[1]) != 0}
+ assert set(g[2]) == {i[0][0] for i in pairs if len(i[0]) == 1 and len(i[1]) == 0}
+ assert {(i[0], i[1]) for i in g[3][0]} == {tuple(i[0]) for i in pairs if len(i[0]) == 2 and len(i[1]) == 0}
+
+
+def test_lower_star_generators():
+ st = gudhi.SimplexTree()
+ st.insert([0, 1, 2], -10)
+ st.insert([0, 3], -10)
+ st.insert([1, 3], -10)
+ st.assign_filtration([2], -1)
+ st.assign_filtration([3], 0)
+ st.assign_filtration([0], 1)
+ st.assign_filtration([1], 2)
+ st.make_filtration_non_decreasing()
+ st.persistence(min_persistence=-1)
+ g = st.lower_star_persistence_generators()
+ assert len(g[0]) == 2
+ assert np.array_equal(g[0][0], [[0, 0], [3, 0], [1, 1]])
+ assert np.array_equal(g[0][1], [[1, 1]])
+ assert len(g[1]) == 2
+ assert np.array_equal(g[1][0], [2])
+ assert np.array_equal(g[1][1], [1])
+
+
+def test_empty():
+ st = gudhi.SimplexTree()
+ st.persistence()
+ assert st.lower_star_persistence_generators() == ([], [])
+ g = st.flag_persistence_generators()
+ assert np.array_equal(g[0], np.empty((0, 3)))
+ assert g[1] == []
+ assert np.array_equal(g[2], [])
+ assert g[3] == []
diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py
index 70b26e97..2137d822 100755
--- a/src/python/test/test_simplex_tree.py
+++ b/src/python/test/test_simplex_tree.py
@@ -46,7 +46,6 @@ def test_insertion():
assert st.find([2, 3]) == False
# filtration test
- st.initialize_filtration()
assert st.filtration([0, 1, 2]) == 4.0
assert st.filtration([0, 2]) == 4.0
assert st.filtration([1, 2]) == 4.0
@@ -93,7 +92,6 @@ def test_insertion():
assert st.find([1]) == True
assert st.find([2]) == True
- st.initialize_filtration()
assert st.persistence(persistence_dim_max=True) == [
(1, (4.0, float("inf"))),
(0, (0.0, float("inf"))),
@@ -151,7 +149,6 @@ def test_expansion():
st.expansion(3)
assert st.num_vertices() == 7
assert st.num_simplices() == 22
- st.initialize_filtration()
assert list(st.get_filtration()) == [
([2], 0.1),
diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py
index 7e0d0f5f..90d26809 100755
--- a/src/python/test/test_wasserstein_distance.py
+++ b/src/python/test/test_wasserstein_distance.py
@@ -8,6 +8,7 @@
- YYYY/MM Author: Description of the modification
"""
+from gudhi.wasserstein.wasserstein import _proj_on_diag
from gudhi.wasserstein import wasserstein_distance as pot
from gudhi.hera import wasserstein_distance as hera
import numpy as np
@@ -17,6 +18,12 @@ __author__ = "Theo Lacombe"
__copyright__ = "Copyright (C) 2019 Inria"
__license__ = "MIT"
+def test_proj_on_diag():
+ dgm = np.array([[1., 1.], [1., 2.], [3., 5.]])
+ assert np.array_equal(_proj_on_diag(dgm), [[1., 1.], [1.5, 1.5], [4., 4.]])
+ empty = np.empty((0, 2))
+ assert np.array_equal(_proj_on_diag(empty), empty)
+
def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_matching=True):
diag1 = np.array([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]])
diag2 = np.array([[2.8, 4.45], [9.5, 14.1]])
@@ -73,14 +80,44 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_mat
-def hera_wrap(delta):
+def hera_wrap(**extra):
+ def fun(*kargs,**kwargs):
+ return hera(*kargs,**kwargs,**extra)
+ return fun
+
+def pot_wrap(**extra):
def fun(*kargs,**kwargs):
- return hera(*kargs,**kwargs,delta=delta)
+ return pot(*kargs,**kwargs,**extra)
return fun
def test_wasserstein_distance_pot():
_basic_wasserstein(pot, 1e-15, test_infinity=False, test_matching=True)
+ _basic_wasserstein(pot_wrap(enable_autodiff=True), 1e-15, test_infinity=False, test_matching=False)
def test_wasserstein_distance_hera():
- _basic_wasserstein(hera_wrap(1e-12), 1e-12, test_matching=False)
- _basic_wasserstein(hera_wrap(.1), .1, test_matching=False)
+ _basic_wasserstein(hera_wrap(delta=1e-12), 1e-12, test_matching=False)
+ _basic_wasserstein(hera_wrap(delta=.1), .1, test_matching=False)
+
+def test_wasserstein_distance_grad():
+ import torch
+
+ diag1 = torch.tensor([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]], requires_grad=True)
+ diag2 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True)
+ diag3 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True)
+ assert diag1.grad is None and diag2.grad is None and diag3.grad is None
+ dist12 = pot(diag1, diag2, internal_p=2, order=2, enable_autodiff=True)
+ dist30 = pot(diag3, torch.tensor([]), internal_p=2, order=2, enable_autodiff=True)
+ dist12.backward()
+ dist30.backward()
+ assert not torch.isnan(diag1.grad).any() and not torch.isnan(diag2.grad).any() and not torch.isnan(diag3.grad).any()
+ diag4 = torch.tensor([[0., 10.]], requires_grad=True)
+ diag5 = torch.tensor([[1., 11.], [3., 4.]], requires_grad=True)
+ dist45 = pot(diag4, diag5, internal_p=1, order=1, enable_autodiff=True)
+ assert dist45 == 3.
+ dist45.backward()
+ assert np.array_equal(diag4.grad, [[-1., -1.]])
+ assert np.array_equal(diag5.grad, [[1., 1.], [-1., 1.]])
+ diag6 = torch.tensor([[5., 10.]], requires_grad=True)
+ pot(diag6, diag6, internal_p=2, order=2, enable_autodiff=True).backward()
+ # https://github.com/jonasrauber/eagerpy/issues/6
+ # assert np.array_equal(diag6.grad, [[0., 0.]])
diff --git a/src/python/test/test_weighted_rips.py b/src/python/test/test_weighted_rips.py
new file mode 100644
index 00000000..7ef48333
--- /dev/null
+++ b/src/python/test/test_weighted_rips.py
@@ -0,0 +1,63 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Yuichi Ike and Masatoshi Takenouchi
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.weighted_rips_complex import WeightedRipsComplex
+from gudhi.point_cloud.dtm import DistanceToMeasure
+import numpy as np
+from math import sqrt
+from scipy.spatial.distance import cdist
+import pytest
+
+def test_non_dtm_rips_complex():
+ dist = [[], [1]]
+ weights = [1, 100]
+ w_rips = WeightedRipsComplex(distance_matrix=dist, weights=weights)
+ st = w_rips.create_simplex_tree(max_dimension=2)
+ assert st.filtration([0,1]) == pytest.approx(200.0)
+
+def test_compatibility_with_rips():
+ distance_matrix = [[0], [1, 0], [1, sqrt(2), 0], [sqrt(2), 1, 1, 0]]
+ w_rips = WeightedRipsComplex(distance_matrix=distance_matrix,max_filtration=42)
+ st = w_rips.create_simplex_tree(max_dimension=1)
+ assert list(st.get_filtration()) == [
+ ([0], 0.0),
+ ([1], 0.0),
+ ([2], 0.0),
+ ([3], 0.0),
+ ([0, 1], 1.0),
+ ([0, 2], 1.0),
+ ([1, 3], 1.0),
+ ([2, 3], 1.0),
+ ([1, 2], sqrt(2)),
+ ([0, 3], sqrt(2)),
+ ]
+
+def test_compatibility_with_filtered_rips():
+ distance_matrix = [[0], [1, 0], [1, sqrt(2), 0], [sqrt(2), 1, 1, 0]]
+ w_rips = WeightedRipsComplex(distance_matrix=distance_matrix,max_filtration=1.0)
+ st = w_rips.create_simplex_tree(max_dimension=1)
+
+ assert st.__is_defined() == True
+ assert st.__is_persistence_defined() == False
+
+ assert st.num_simplices() == 8
+ assert st.num_vertices() == 4
+
+def test_dtm_rips_complex():
+ pts = np.array([[2.0, 2.0], [0.0, 1.0], [3.0, 4.0]])
+ dist = cdist(pts,pts)
+ dtm = DistanceToMeasure(2, q=2, metric="precomputed")
+ r = dtm.fit_transform(dist)
+ w_rips = WeightedRipsComplex(distance_matrix=dist, weights=r)
+ st = w_rips.create_simplex_tree(max_dimension=2)
+ st.persistence()
+ persistence_intervals0 = st.persistence_intervals_in_dimension(0)
+ assert persistence_intervals0 == pytest.approx(np.array([[3.16227766, 5.39834564],[3.16227766, 5.39834564], [3.16227766, float("inf")]]))
+