summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.circleci/config.yml95
-rw-r--r--.github/for_maintainers/new_gudhi_version_creation.md3
-rw-r--r--.github/how_to_use_github_to_contribute_to_gudhi.md15
-rw-r--r--.github/next_release.md19
-rw-r--r--.github/workflows/pip-build-linux.yml18
-rw-r--r--.github/workflows/pip-build-osx.yml14
-rw-r--r--.github/workflows/pip-build-windows.yml2
-rw-r--r--.github/workflows/pip-packaging-linux.yml14
-rw-r--r--.github/workflows/pip-packaging-osx.yml19
-rw-r--r--.github/workflows/pip-packaging-windows.yml2
-rw-r--r--CMakeGUDHIVersion.txt2
-rw-r--r--biblio/bibliography.bib2
-rw-r--r--biblio/how_to_cite_gudhi.bib.in352
-rw-r--r--biblio/test/test_biblio.tex7
-rw-r--r--biblio/test/test_gudhi_citation.tex7
m---------ext/hera0
-rwxr-xr-xscripts/build_osx_universal_gmpfr.sh47
-rw-r--r--src/Cech_complex/doc/Intro_cech_complex.h2
-rw-r--r--src/Cech_complex/include/gudhi/Cech_complex.h3
-rw-r--r--src/Cech_complex/utilities/cechcomplex.md4
-rw-r--r--src/Doxyfile.in1
-rw-r--r--src/Nerve_GIC/example/CMakeLists.txt34
-rw-r--r--src/Nerve_GIC/include/gudhi/GIC.h18
-rw-r--r--src/Nerve_GIC/test/CMakeLists.txt17
-rw-r--r--src/Nerve_GIC/utilities/CMakeLists.txt36
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree.h32
-rw-r--r--src/Simplex_tree/test/simplex_tree_unit_test.cpp14
-rw-r--r--src/Tangential_complex/include/gudhi/Tangential_complex.h11
-rw-r--r--src/cmake/modules/GUDHI_submodules.cmake8
-rw-r--r--src/cmake/modules/GUDHI_user_version_target.cmake12
-rw-r--r--src/common/doc/main_page.md2
-rw-r--r--src/python/CMakeLists.txt64
-rw-r--r--src/python/doc/representations_sum.inc22
-rw-r--r--src/python/gudhi/hera/bottleneck.cc2
-rw-r--r--src/python/gudhi/hera/wasserstein.cc10
-rw-r--r--src/python/gudhi/representations/vector_methods.py151
-rw-r--r--src/python/gudhi/simplex_tree.pxd3
-rw-r--r--src/python/gudhi/simplex_tree.pyx19
-rw-r--r--src/python/include/Alpha_complex_factory.h4
-rw-r--r--src/python/setup.py.in4
-rw-r--r--src/python/test/test_persistence_graphical_tools.py5
-rwxr-xr-xsrc/python/test/test_representations.py16
-rwxr-xr-xsrc/python/test/test_wasserstein_distance.py9
43 files changed, 740 insertions, 381 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index e2df5c87..ef22fbea 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -10,6 +10,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test examples
command: |
mkdir build
@@ -24,6 +29,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test unitary tests
command: |
mkdir build
@@ -38,6 +48,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test utilities
command: |
mkdir build
@@ -52,10 +67,13 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test python module. Generates and tests the python documentation
command: |
- git submodule init
- git submodule update
mkdir build
cd build
cmake -DWITH_GUDHI_THIRD_PARTY=OFF -DUSER_VERSION_DIR=version ..
@@ -64,6 +82,7 @@ jobs:
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 -DWITH_GUDHI_REMOTE_TEST=ON .
cd python
python3 setup.py build_ext --inplace
+ ctest --output-on-failure
make sphinx
cp -R sphinx /tmp/sphinx
python3 setup.py install
@@ -83,10 +102,13 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Generates the C++ documentation with doxygen
command: |
- git submodule init
- git submodule update
mkdir build
cd build
cmake -DWITH_GUDHI_THIRD_PARTY=OFF -DUSER_VERSION_DIR=version ..
@@ -104,6 +126,26 @@ jobs:
path: /tmp/doxygen
destination: doxygen
+ bibliography:
+ docker:
+ - image: gudhi/doxygen_for_gudhi:latest
+ steps:
+ - checkout
+ - run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
+ name: Test the LaTeX bibliography files
+ command: |
+ mkdir build
+ cd build
+ cmake -DWITH_GUDHI_THIRD_PARTY=OFF -DUSER_VERSION_DIR=version ..
+ cd biblio/test
+ latexmk -pdf -interaction=nonstopmode test_biblio.tex
+ latexmk -pdf -interaction=nonstopmode test_gudhi_citation.tex
+
### With all third parties, except CGAL and Eigen
@@ -114,6 +156,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test examples without cgal and eigen
command: |
mkdir build
@@ -128,6 +175,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test unitary tests without cgal and eigen
command: |
mkdir build
@@ -142,6 +194,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test utilities without cgal and eigen
command: |
mkdir build
@@ -156,10 +213,13 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test python module without cgal and eigen
command: |
- git submodule init
- git submodule update
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..
@@ -176,6 +236,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test examples without cgal
command: |
mkdir build
@@ -190,6 +255,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test unitary tests without cgal
command: |
mkdir build
@@ -204,6 +274,11 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test utilities without cgal
command: |
mkdir build
@@ -218,10 +293,13 @@ jobs:
steps:
- checkout
- run:
+ name: Checkout submodules
+ command: |
+ git submodule sync
+ git submodule update --init
+ - run:
name: Build and test python module without cgal
command: |
- git submodule init
- git submodule update
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=Release -DEIGEN3_INCLUDE_DIR=/eigen-3.3.9 -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..
@@ -246,3 +324,4 @@ workflows:
- utils
- python
- doxygen
+ - bibliography
diff --git a/.github/for_maintainers/new_gudhi_version_creation.md b/.github/for_maintainers/new_gudhi_version_creation.md
index 19ef168e..9634adae 100644
--- a/.github/for_maintainers/new_gudhi_version_creation.md
+++ b/.github/for_maintainers/new_gudhi_version_creation.md
@@ -22,7 +22,6 @@ cmake -DCMAKE_BUILD_TYPE=Release -DCGAL_DIR=/your/path/to/CGAL -DWITH_GUDHI_EXAM
make user_version
date +"%d-%m-%Y-%T" > gudhi.@GUDHI_VERSION@/timestamp.txt
tar -czvf gudhi.@GUDHI_VERSION@.tar.gz gudhi.@GUDHI_VERSION@
-md5sum gudhi.@GUDHI_VERSION@.tar.gz > md5sum.txt
sha256sum gudhi.@GUDHI_VERSION@.tar.gz > sha256sum.txt
sha512sum gudhi.@GUDHI_VERSION@.tar.gz > sha512sum.txt
@@ -87,7 +86,7 @@ Copy gudhi WebDAV python/@GUDHI_VERSION@ as python/latest (no symbolic link with
* Name the tag: tags/gudhi-release-@GUDHI_VERSION@
* Name the release GUDHI @GUDHI_VERSION@ release
* Write the release note
-* Drag'n drop *gudhi.@GUDHI_VERSION@.tar.gz*, *md5sum.txt*, *sha256sum.txt*, *sha512sum.txt* files
+* Drag'n drop *gudhi.@GUDHI_VERSION@.tar.gz*, *sha256sum.txt*, *sha512sum.txt* files
* Tick the *This is a pre-release* check button if this is a release candidate (untick if this is an official version)
* Click the *Publish the release* button
diff --git a/.github/how_to_use_github_to_contribute_to_gudhi.md b/.github/how_to_use_github_to_contribute_to_gudhi.md
index 738c1ce9..f72bb9d6 100644
--- a/.github/how_to_use_github_to_contribute_to_gudhi.md
+++ b/.github/how_to_use_github_to_contribute_to_gudhi.md
@@ -17,7 +17,7 @@ You can see your fork at https://github.com/LOGIN/gudhi-devel
## Create a local clone on your computer
```bash
-git clone https://github.com/LOGIN/gudhi-devel.git
+git clone --recurse-submodules https://github.com/LOGIN/gudhi-devel.git
```
This creates a directory gudhi-devel, which you are free to move around or rename. For the following, change to that directory:
@@ -25,16 +25,14 @@ This creates a directory gudhi-devel, which you are free to move around or renam
cd gudhi-devel
```
-When you clone the repository, you also need to download the *submodules*.
-
## Submodules
-Hera, used for Wasserstein distance, is available on an external git repository. To download it:
+When you clone the repository, you also need to download the *submodules*. This is done automatically thanks to `--recurse-submodules`.
+If you forgot this option, you can still download them with
```bash
git submodule update --init
```
-[gudhi-deploy](https://github.com/GUDHI/gudhi-deploy) is used for Continuous Integration python
-requirements and will also be downloaded by the above command.
+The submodules appear in the `ext/` subdirectory. There are currently 2, [Hera](https://github.com/anigmetov/hera) for distances between persistence diagrams, and [gudhi-deploy](https://github.com/GUDHI/gudhi-deploy) for Continuous Integration.
## Configuring a remote for a fork
```bash
@@ -68,6 +66,11 @@ It is safe, it will not mess with your files.
git submodule sync
git submodule update --init
```
+You can configure `git` to do this automatically with
+```bash
+git config submodule.recurse true
+```
+(add `--global` if you want it to apply to other projects as well)
## Create a branch, based on the current master
```bash
diff --git a/.github/next_release.md b/.github/next_release.md
index 929a7ce6..937fc557 100644
--- a/.github/next_release.md
+++ b/.github/next_release.md
@@ -1,25 +1,20 @@
-We are pleased to announce the release 3.7.0 of the GUDHI library.
+We are pleased to announce the release 3.8.0 of the GUDHI library.
As a major new feature, the GUDHI library now offers ...
We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz).
-Below is a list of changes made since GUDHI 3.6.0:
+Below is a list of changes made since GUDHI 3.7.0:
- [Module](link)
- ...
-- [Simplex tree](https://gudhi.inria.fr/python/latest/simplex_tree_ref.html)
- - New functions to initialize from a matrix or insert batches of simplices of the same dimension.
-
-- [Rips complex](https://gudhi.inria.fr/python/latest/rips_complex_user.html)
- - Construction now rejects positional arguments, you need to specify `points=X`.
+- [Module](link)
+ - ...
-- Installation
- - c++17 is the new minimal standard to compile the library. This implies Visual Studio minimal version is now 2017.
- Miscellaneous
- - The [list of bugs that were solved since GUDHI-3.6.0](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.7.0+is%3Aclosed) is available on GitHub.
+ - The [list of bugs that were solved since GUDHI-3.7.0](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.8.0+is%3Aclosed) is available on GitHub.
All modules are distributed under the terms of the MIT license.
However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details.
@@ -32,3 +27,7 @@ Feel free to [contact us](https://gudhi.inria.fr/contact/) in case you have any
For further information about downloading and installing the library ([C++](https://gudhi.inria.fr/doc/latest/installation.html) or [Python](https://gudhi.inria.fr/python/latest/installation.html)), please visit the [GUDHI web site](https://gudhi.inria.fr/).
+## Contributors
+
+- ...
+- ... \ No newline at end of file
diff --git a/.github/workflows/pip-build-linux.yml b/.github/workflows/pip-build-linux.yml
index 11b6271d..bc4f999e 100644
--- a/.github/workflows/pip-build-linux.yml
+++ b/.github/workflows/pip-build-linux.yml
@@ -12,16 +12,16 @@ jobs:
- uses: actions/checkout@v3
with:
submodules: true
- - name: Build wheel for Python 3.10
+ - name: Build wheel for Python 3.11
run: |
- mkdir build_310
- cd build_310
- cmake -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=$PYTHON310/bin/python ..
+ mkdir build_311
+ cd build_311
+ cmake -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=$PYTHON311/bin/python ..
cd src/python
- $PYTHON310/bin/python setup.py bdist_wheel
+ $PYTHON311/bin/python setup.py bdist_wheel
auditwheel repair dist/*.whl
- - name: Install and test wheel for Python 3.10
+ - name: Install and test wheel for Python 3.11
run: |
- $PYTHON310/bin/python -m pip install --user pytest build_310/src/python/dist/*.whl
- $PYTHON310/bin/python -c "import gudhi; print(gudhi.__version__)"
- $PYTHON310/bin/python -m pytest src/python/test/test_alpha_complex.py
+ $PYTHON311/bin/python -m pip install --user pytest build_311/src/python/dist/*.whl
+ $PYTHON311/bin/python -c "import gudhi; print(gudhi.__version__)"
+ $PYTHON311/bin/python -m pytest src/python/test/test_alpha_complex.py
diff --git a/.github/workflows/pip-build-osx.yml b/.github/workflows/pip-build-osx.yml
index 59e94ca5..a438124a 100644
--- a/.github/workflows/pip-build-osx.yml
+++ b/.github/workflows/pip-build-osx.yml
@@ -2,13 +2,18 @@ name: pip build osx
on: [push, pull_request]
+env:
+ MACOSX_DEPLOYMENT_TARGET: 10.14
+ _PYTHON_HOST_PLATFORM: macosx-10.14-universal2
+ ARCHFLAGS: "-arch arm64 -arch x86_64"
+
jobs:
build:
runs-on: macos-latest
strategy:
max-parallel: 4
matrix:
- python-version: ['3.10']
+ python-version: ['3.11']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v3
@@ -24,14 +29,21 @@ jobs:
brew install boost eigen gmp mpfr cgal || true
python -m pip install --user -r ext/gudhi-deploy/build-requirements.txt
python -m pip install --user twine delocate
+ ./scripts/build_osx_universal_gmpfr.sh
+ # Now the universal libraries are in $PWD/deps-uni/lib
- name: Build python wheel
run: |
+ export GMP_LIB_DIR=$PWD/deps-uni/lib
+ export GMPXX_LIB_DIR=$PWD/deps-uni/lib
+ export MPFR_LIB_DIR=$PWD/deps-uni/lib
python --version
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=Release -DPython_ADDITIONAL_VERSIONS=3 ..
cd src/python
python setup.py bdist_wheel
+ export PATH="$PATH:`python -m site --user-base`/bin"
+ delocate-wheel --require-archs universal2 -v dist/*.whl
- name: Install and test python wheel
run: |
python -m pip install --user pytest build/src/python/dist/*.whl
diff --git a/.github/workflows/pip-build-windows.yml b/.github/workflows/pip-build-windows.yml
index b3d75706..50bdfe2c 100644
--- a/.github/workflows/pip-build-windows.yml
+++ b/.github/workflows/pip-build-windows.yml
@@ -8,7 +8,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ['3.10']
+ python-version: ['3.11']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v3
diff --git a/.github/workflows/pip-packaging-linux.yml b/.github/workflows/pip-packaging-linux.yml
index 285cfa00..14b1cf7a 100644
--- a/.github/workflows/pip-packaging-linux.yml
+++ b/.github/workflows/pip-packaging-linux.yml
@@ -79,6 +79,19 @@ jobs:
$PYTHON310/bin/python -m pip install --user pytest build_310/src/python/dist/*.whl
$PYTHON310/bin/python -c "import gudhi; print(gudhi.__version__)"
$PYTHON310/bin/python -m pytest src/python/test/test_alpha_complex.py
+ - name: Build wheel for Python 3.11
+ run: |
+ mkdir build_311
+ cd build_311
+ cmake -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=$PYTHON311/bin/python ..
+ cd src/python
+ $PYTHON311/bin/python setup.py bdist_wheel
+ auditwheel repair dist/*.whl
+ - name: Install and test wheel for Python 3.11
+ run: |
+ $PYTHON311/bin/python -m pip install --user pytest build_311/src/python/dist/*.whl
+ $PYTHON311/bin/python -c "import gudhi; print(gudhi.__version__)"
+ $PYTHON311/bin/python -m pytest src/python/test/test_alpha_complex.py
- name: Publish on PyPi
env:
TWINE_USERNAME: __token__
@@ -89,3 +102,4 @@ jobs:
$PYTHON36/bin/python -m twine upload build_38/src/python/wheelhouse/*
$PYTHON36/bin/python -m twine upload build_39/src/python/wheelhouse/*
$PYTHON36/bin/python -m twine upload build_310/src/python/wheelhouse/*
+ $PYTHON36/bin/python -m twine upload build_311/src/python/wheelhouse/*
diff --git a/.github/workflows/pip-packaging-osx.yml b/.github/workflows/pip-packaging-osx.yml
index 3ae840c6..9ddbcfce 100644
--- a/.github/workflows/pip-packaging-osx.yml
+++ b/.github/workflows/pip-packaging-osx.yml
@@ -4,13 +4,18 @@ on:
release:
types: [published]
+env:
+ MACOSX_DEPLOYMENT_TARGET: 10.15
+ _PYTHON_HOST_PLATFORM: macosx-10.15-universal2
+ ARCHFLAGS: "-arch arm64 -arch x86_64"
+
jobs:
build:
runs-on: macos-latest
strategy:
max-parallel: 4
matrix:
- python-version: ['3.7', '3.8', '3.9', '3.10']
+ python-version: ['3.7', '3.8', '3.9', '3.10', '3.11']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v3
@@ -26,8 +31,13 @@ jobs:
brew install boost eigen gmp mpfr cgal || true
python -m pip install --user -r ext/gudhi-deploy/build-requirements.txt
python -m pip install --user twine delocate
+ ./scripts/build_osx_universal_gmpfr.sh
+ # Now the universal libs are in $PWD/deps-uni/lib
- name: Build python wheel
run: |
+ export GMP_LIB_DIR=$PWD/deps-uni/lib
+ export GMPXX_LIB_DIR=$PWD/deps-uni/lib
+ export MPFR_LIB_DIR=$PWD/deps-uni/lib
python --version
mkdir build
cd build
@@ -45,6 +55,7 @@ jobs:
TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
run: |
mkdir wheelhouse
- /Users/runner/.local/bin/delocate-listdeps build/src/python/dist/*
- /Users/runner/.local/bin/delocate-wheel --require-archs x86_64 -w wheelhouse build/src/python/dist/*
- python -m twine upload wheelhouse/* \ No newline at end of file
+ export PATH="$PATH:`python -m site --user-base`/bin"
+ delocate-listdeps build/src/python/dist/*
+ delocate-wheel --require-archs universal2 -w wheelhouse build/src/python/dist/*
+ python -m twine upload wheelhouse/*
diff --git a/.github/workflows/pip-packaging-windows.yml b/.github/workflows/pip-packaging-windows.yml
index 6f544499..df0db9a5 100644
--- a/.github/workflows/pip-packaging-windows.yml
+++ b/.github/workflows/pip-packaging-windows.yml
@@ -10,7 +10,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ['3.7', '3.8', '3.9', '3.10']
+ python-version: ['3.7', '3.8', '3.9', '3.10', '3.11']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v3
diff --git a/CMakeGUDHIVersion.txt b/CMakeGUDHIVersion.txt
index 1dab47ab..69c8e9fc 100644
--- a/CMakeGUDHIVersion.txt
+++ b/CMakeGUDHIVersion.txt
@@ -1,6 +1,6 @@
# Must be conform to pep440 - https://www.python.org/dev/peps/pep-0440/#pre-releases
set (GUDHI_MAJOR_VERSION 3)
-set (GUDHI_MINOR_VERSION 7)
+set (GUDHI_MINOR_VERSION 8)
# GUDHI_PATCH_VERSION can be 'ZaN' for Alpha release, 'ZbN' for Beta release, 'ZrcN' for release candidate or 'Z' for a final release.
set (GUDHI_PATCH_VERSION 0a0)
set(GUDHI_VERSION ${GUDHI_MAJOR_VERSION}.${GUDHI_MINOR_VERSION}.${GUDHI_PATCH_VERSION})
diff --git a/biblio/bibliography.bib b/biblio/bibliography.bib
index 0a3ef43d..d8472ad0 100644
--- a/biblio/bibliography.bib
+++ b/biblio/bibliography.bib
@@ -1090,7 +1090,7 @@ language={English}
@ARTICLE{Reininghaus_Huber_ALL_PSSK,
author = {J. Reininghaus and S. Huber and U. Bauer and R. Kwitt},
title = {A Stable Multi-Scale Kernel for Topological Machine Learning.},
- journal = {Proc. 2015 IEEE Conf. Comp. Vision & Pat. Rec. (CVPR '15)},
+ journal = {Proc. 2015 IEEE Conf. Comp. Vision \& Pat. Rec. (CVPR '15)},
year = {2015}
}
diff --git a/biblio/how_to_cite_gudhi.bib.in b/biblio/how_to_cite_gudhi.bib.in
index 579dbf41..02c09dea 100644
--- a/biblio/how_to_cite_gudhi.bib.in
+++ b/biblio/how_to_cite_gudhi.bib.in
@@ -1,168 +1,262 @@
@book{gudhi:urm
-, title = "{GUDHI} User and Reference Manual"
-, author = "{The GUDHI Project}"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, year = @GUDHI_VERSION_YEAR@
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/"
+, title = {GUDHI User and Reference Manual}
+, author = {The GUDHI Project}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, year = {@GUDHI_VERSION_YEAR@}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/}
}
-@incollection{gudhi:FilteredComplexes
-, author = "Cl\'ement Maria"
-, title = "Filtered Complexes"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__simplex__tree.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:CubicalComplex
+, author = {Pawel Dlotko}
+, title = {Cubical complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__cubical__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:PersistentCohomology
-, author = "Cl\'ement Maria"
-, title = "Persistent Cohomology"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__persistent__cohomology.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:FilteredComplexes
+, author = {Cl{\'{e}}ment Maria}
+, title = {Filtered Complexes}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__simplex__tree.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:Contraction
-, author = "David Salinas"
-, title = "Contraction"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__contr.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:ToplexMap
+, author = {Fran{\c{c}}ois Godi}
+, title = {Toplex map}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__toplex__map.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
@incollection{gudhi:SkeletonBlocker
-, author = "David Salinas"
-, title = "Skeleton-Blocker"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__skbl.html"
-, year = @GUDHI_VERSION_YEAR@
+, author = {David Salinas}
+, title = {Skeleton-Blocker}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__skbl.html}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:Contraction
+, author = {David Salinas}
+, title = {Contraction}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__contr.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
@incollection{gudhi:AlphaComplex
-, author = "Vincent Rouvreau"
-, title = "Alpha complex"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__alpha__complex.html"
-, year = @GUDHI_VERSION_YEAR@
+, author = {Vincent Rouvreau}
+, title = {Alpha complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__alpha__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:CubicalComplex
-, author = "Pawel Dlotko"
-, title = "Cubical complex"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__cubical__complex.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:CechComplex
+, author = {Vincent Rouvreau and Hind Montassif}
+, title = {{\v{C}}ech complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__cech__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:RipsComplex
+, author = {Cl{\'{e}}ment Maria and Pawel Dlotko and Vincent Rouvreau and Marc Glisse}
+, title = {Rips complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__rips__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:Collapse
+, author = {Siddharth Pritam and Marc Glisse}
+, title = {Edge collapse}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__edge__collapse.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
@incollection{gudhi:WitnessComplex
-, author = "Siargey Kachanovich"
-, title = "Witness complex"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__witness__complex.html"
-, year = @GUDHI_VERSION_YEAR@
+, author = {Siargey Kachanovich}
+, title = {Witness complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__witness__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:SubSampling
-, author = "Cl\'ement Jamin and Siargey Kachanovich"
-, title = "Subsampling"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__subsampling.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:CoverComplex
+, author = {Mathieu Carri{\`{e}}re}
+, title = {Cover complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__cover__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:SpatialSearching
-, author = "Cl\'ement Jamin"
-, title = "Spatial searching"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__spatial__searching.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:CoxeterTriangulation
+, author = {Siargey Kachanovich}
+, title = {Coxeter triangulation}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__cover__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
@incollection{gudhi:TangentialComplex
-, author = "Cl\'ement Jamin"
-, title = "Tangential complex"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__tangential__complex.html"
-, year = @GUDHI_VERSION_YEAR@
+, author = {Cl{\'{e}}ment Jamin}
+, title = {Tangential complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__tangential__complex.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:RipsComplex
-, author = "Cl\'ement Maria and Pawel Dlotko and Vincent Rouvreau and Marc Glisse"
-, title = "Rips complex"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__rips__complex.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:PersistentCohomology
+, author = {Cl{\'{e}}ment Maria}
+, title = {Persistent Cohomology}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__persistent__cohomology.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
@incollection{gudhi:BottleneckDistance
-, author = "Fran{{\c{c}}ois Godi"
-, title = "Bottleneck distance"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__bottleneck__distance.html"
-, year = @GUDHI_VERSION_YEAR@
+, author = {Fran{\c{c}}ois Godi}
+, title = {Bottleneck distance}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__bottleneck__distance.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:cython
-, author = "Vincent Rouvreau"
-, title = "Cython interface"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/python/@GUDHI_VERSION@/"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:PersistenceRepresentations
+, author = {Pawel Dlotko}
+, title = {Persistence representations}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group___persistence__representations.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:CoverComplex
-, author = "Mathieu Carri\`ere"
-, title = "Cover complex"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__cover__complex.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:SubSampling
+, author = {Cl{\'{e}}ment Jamin and Siargey Kachanovich}
+, title = {Subsampling}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__subsampling.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:PersistenceRepresentations
-, author = "Pawel Dlotko"
-, title = "Persistence representations"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group___persistence__representations.html"
-, year = @GUDHI_VERSION_YEAR@
+@incollection{gudhi:SpatialSearching
+, author = {Cl{\'{e}}ment Jamin}
+, title = {Spatial searching}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__spatial__searching.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
-@incollection{gudhi:Collapse
-, author = "Siddharth Pritam and Marc Glisse"
-, title = "Edge collapse"
-, publisher = "{GUDHI Editorial Board}"
-, edition = "{@GUDHI_VERSION@}"
-, booktitle = "{GUDHI} User and Reference Manual"
-, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__edge__collapse.html"
-, year = @GUDHI_VERSION_YEAR@
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Python specific gudhi modules
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+@incollection{gudhi:WeightedRipsComplex
+, author = {Rapha{\"{e}}l Tinarrage and Yuichi Ike and Masatoshi Takenouchi}
+, title = {Weighted Rips Complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/python/@GUDHI_VERSION@/rips_complex_user.html#weighted-rips-complex}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:DTMRipsComplex
+, author = {Yuichi Ike}
+, title = {DTM Rips Complex}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/python/@GUDHI_VERSION@/rips_complex_user.html#dtm-rips-complex}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:WassersteinDistance
+, author = {Th{\'{e}}o Lacombe and Marc Glisse}
+, title = {Wasserstein distance}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/python/@GUDHI_VERSION@/wasserstein_distance_user.html}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:PersistenceRepresentationsScikitlearnInterface
+, author = {Mathieu Carri{\`{e}}re and Gard Spreemann and Wojciech Reise}
+, title = {Persistence representations scikit-learn like interface}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/python/@GUDHI_VERSION@/representations.html}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:Atol
+, author = {Martin Royer}
+, title = {Measure Vectorization for Automatic Topologically-Oriented Learning}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/python/@GUDHI_VERSION@/representations.html#gudhi.representations.vector_methods.Atol}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:DistanceToMeasure
+, author = {Marc Glisse}
+, title = {Distance to measure}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/python/@GUDHI_VERSION@/point_cloud.html#module-gudhi.point_cloud.knn}
+, year = {@GUDHI_VERSION_YEAR@}
+}
+
+@incollection{gudhi:PersistenceBasedClustering
+, author = {Marc Glisse}
+, title = {persistence-based clustering}
+, publisher = {GUDHI Editorial Board}
+, edition = {@GUDHI_VERSION@}
+, booktitle = {GUDHI User and Reference Manual}
+, url = {https://gudhi.inria.fr/python/@GUDHI_VERSION@/clustering.html}
+, year = {@GUDHI_VERSION_YEAR@}
}
diff --git a/biblio/test/test_biblio.tex b/biblio/test/test_biblio.tex
new file mode 100644
index 00000000..97dee9ed
--- /dev/null
+++ b/biblio/test/test_biblio.tex
@@ -0,0 +1,7 @@
+\documentclass{article}
+\usepackage{hyperref}
+\bibliographystyle{plainurl}
+\begin{document}
+\nocite{*}
+\bibliography{../bibliography}
+\end{document} \ No newline at end of file
diff --git a/biblio/test/test_gudhi_citation.tex b/biblio/test/test_gudhi_citation.tex
new file mode 100644
index 00000000..5fb2d33d
--- /dev/null
+++ b/biblio/test/test_gudhi_citation.tex
@@ -0,0 +1,7 @@
+\documentclass{article}
+\usepackage{hyperref}
+\bibliographystyle{plainurl}
+\begin{document}
+\nocite{*}
+\bibliography{../how_to_cite_gudhi}
+\end{document} \ No newline at end of file
diff --git a/ext/hera b/ext/hera
-Subproject b528c4067a8aac346eb307d3c23b82d5953cfe2
+Subproject 8bfdd4bd32f005c18b5c75c502b987de552d6e4
diff --git a/scripts/build_osx_universal_gmpfr.sh b/scripts/build_osx_universal_gmpfr.sh
new file mode 100755
index 00000000..3dafa3ce
--- /dev/null
+++ b/scripts/build_osx_universal_gmpfr.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+set -e
+
+# In the working directory, creates deps-uni/lib/*
+# Assumes that the user has enough rights to run brew fetch
+
+# Downloading
+mkdir deps-amd64
+cd deps-amd64
+tar xf "`brew fetch --bottle-tag=big_sur gmp | sed -ne 's/^Downloaded to: //p'`"
+tar xf "`brew fetch --bottle-tag=big_sur mpfr | sed -ne 's/^Downloaded to: //p'`"
+cd ..
+mkdir deps-arm64
+cd deps-arm64
+tar xf "`brew fetch --bottle-tag=arm64_big_sur gmp | sed -ne 's/^Downloaded to: //p'`"
+tar xf "`brew fetch --bottle-tag=arm64_big_sur mpfr | sed -ne 's/^Downloaded to: //p'`"
+cd ..
+
+# Merging
+mkdir -p deps-uni/lib
+GMP1=deps-amd64/gmp/*/lib/libgmp.*.dylib
+GMP=`basename $GMP1`
+GMPXX1=deps-amd64/gmp/*/lib/libgmpxx.*.dylib
+GMPXX=`basename $GMPXX1`
+MPFR1=deps-amd64/mpfr/*/lib/libmpfr.*.dylib
+MPFR=`basename $MPFR1`
+lipo -create $GMP1 deps-arm64/gmp/*/lib/$GMP -output deps-uni/lib/$GMP
+lipo -create $GMPXX1 deps-arm64/gmp/*/lib/$GMPXX -output deps-uni/lib/$GMPXX
+lipo -create $MPFR1 deps-arm64/mpfr/*/lib/$MPFR -output deps-uni/lib/$MPFR
+
+# Necessary even for libs created by lipo
+install_name_tool -id $PWD/deps-uni/lib/$GMP deps-uni/lib/$GMP
+install_name_tool -id $PWD/deps-uni/lib/$GMPXX deps-uni/lib/$GMPXX
+install_name_tool -id $PWD/deps-uni/lib/$MPFR deps-uni/lib/$MPFR
+# Also fix dependencies
+BADGMP=`otool -L deps-uni/lib/$MPFR|sed -ne 's/[[:space:]]*\(.*libgmp\..*dylib\).*/\1/p'`
+install_name_tool -change $BADGMP $PWD/deps-uni/lib/$GMP deps-uni/lib/$MPFR
+BADGMP=`otool -L deps-uni/lib/$GMPXX|sed -ne 's/[[:space:]]*\(.*libgmp\..*dylib\).*/\1/p'`
+install_name_tool -change $BADGMP $PWD/deps-uni/lib/$GMP deps-uni/lib/$GMPXX
+
+ln -s $GMP deps-uni/lib/libgmp.dylib
+ln -s $GMPXX deps-uni/lib/libgmpxx.dylib
+ln -s $MPFR deps-uni/lib/libmpfr.dylib
+
+# Debug
+ls -l deps-uni/lib
+otool -L deps-uni/lib/*.*.dylib
diff --git a/src/Cech_complex/doc/Intro_cech_complex.h b/src/Cech_complex/doc/Intro_cech_complex.h
index 595fb64b..73093c07 100644
--- a/src/Cech_complex/doc/Intro_cech_complex.h
+++ b/src/Cech_complex/doc/Intro_cech_complex.h
@@ -17,7 +17,7 @@ namespace cech_complex {
/** \defgroup cech_complex ÄŒech complex
*
- * \author Vincent Rouvreau
+ * \author Vincent Rouvreau, Hind montassif
*
* @{
*
diff --git a/src/Cech_complex/include/gudhi/Cech_complex.h b/src/Cech_complex/include/gudhi/Cech_complex.h
index 625f7c9c..dbdf5e93 100644
--- a/src/Cech_complex/include/gudhi/Cech_complex.h
+++ b/src/Cech_complex/include/gudhi/Cech_complex.h
@@ -1,11 +1,12 @@
/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
* See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- * Author(s): Vincent Rouvreau
+ * Author(s): Vincent Rouvreau, Hind Montassif
*
* Copyright (C) 2018 Inria
*
* Modification(s):
* - YYYY/MM Author: Description of the modification
+ * - 2022/02 Hind Montassif : Replace MiniBall with Sphere_circumradius
*/
#ifndef CECH_COMPLEX_H_
diff --git a/src/Cech_complex/utilities/cechcomplex.md b/src/Cech_complex/utilities/cechcomplex.md
index 0e82674d..54c4e88d 100644
--- a/src/Cech_complex/utilities/cechcomplex.md
+++ b/src/Cech_complex/utilities/cechcomplex.md
@@ -36,14 +36,14 @@ where
* `-h [ --help ]` Produce help message
* `-o [ --output-file ]` Name of file in which the persistence diagram is written. Default print in standard output.
-* `-r [ --max-edge-length ]` (default = inf) Maximal length of an edge for the ÄŒech complex construction.
+* `-r [ --max-radius ]` (default = inf) Maximal radius for the ÄŒech complex construction.
* `-d [ --cpx-dimension ]` (default = 1) Maximal dimension of the ÄŒech complex we want to compute.
* `-p [ --field-charac ]` (default = 11) Characteristic p of the coefficient field Z/pZ for computing homology.
* `-m [ --min-persistence ]` (default = 0) Minimal lifetime of homology feature to be recorded. Enter a negative value to see zero length intervals.
* `-e [ --exact ]` for the exact computation version.
* `-f [ --fast ]` for the fast computation version.
-Beware: this program may use a lot of RAM and take a lot of time if `max-edge-length` is set to a large value.
+Beware: this program may use a lot of RAM and take a lot of time if `max-radius` is set to a large value.
**Example 1 with Z/2Z coefficients**
diff --git a/src/Doxyfile.in b/src/Doxyfile.in
index 1ec190d9..d5664a49 100644
--- a/src/Doxyfile.in
+++ b/src/Doxyfile.in
@@ -700,7 +700,6 @@ LAYOUT_FILE =
# search path. See also \cite for info how to create references.
CITE_BIB_FILES = @CMAKE_SOURCE_DIR@/biblio/bibliography.bib \
- @CMAKE_SOURCE_DIR@/biblio/how_to_cite_cgal.bib \
@CMAKE_SOURCE_DIR@/biblio/how_to_cite_gudhi.bib
#---------------------------------------------------------------------------
diff --git a/src/Nerve_GIC/example/CMakeLists.txt b/src/Nerve_GIC/example/CMakeLists.txt
index 4b0f4677..9faf1f3b 100644
--- a/src/Nerve_GIC/example/CMakeLists.txt
+++ b/src/Nerve_GIC/example/CMakeLists.txt
@@ -1,25 +1,21 @@
project(Nerve_GIC_examples)
-if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+add_executable ( CoordGIC CoordGIC.cpp )
+add_executable ( FuncGIC FuncGIC.cpp )
- add_executable ( CoordGIC CoordGIC.cpp )
- add_executable ( FuncGIC FuncGIC.cpp )
+if (TBB_FOUND)
+ target_link_libraries(CoordGIC ${TBB_LIBRARIES})
+ target_link_libraries(FuncGIC ${TBB_LIBRARIES})
+endif()
- if (TBB_FOUND)
- target_link_libraries(CoordGIC ${TBB_LIBRARIES})
- target_link_libraries(FuncGIC ${TBB_LIBRARIES})
- endif()
+# Copy files for not to pollute sources when testing
+file(COPY "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+file(COPY "${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+file(COPY "${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat_PCA1" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- # Copy files for not to pollute sources when testing
- file(COPY "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- file(COPY "${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- file(COPY "${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat_PCA1" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+add_test(NAME Nerve_GIC_example_CoordGIC COMMAND $<TARGET_FILE:CoordGIC>
+ "${CMAKE_CURRENT_BINARY_DIR}/tore3D_1307.off" "0")
- add_test(NAME Nerve_GIC_example_CoordGIC COMMAND $<TARGET_FILE:CoordGIC>
- "${CMAKE_CURRENT_BINARY_DIR}/tore3D_1307.off" "0")
-
- add_test(NAME Nerve_GIC_example_FuncGIC COMMAND $<TARGET_FILE:FuncGIC>
- "${CMAKE_CURRENT_BINARY_DIR}/lucky_cat.off"
- "${CMAKE_CURRENT_BINARY_DIR}/lucky_cat_PCA1")
-
-endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+add_test(NAME Nerve_GIC_example_FuncGIC COMMAND $<TARGET_FILE:FuncGIC>
+ "${CMAKE_CURRENT_BINARY_DIR}/lucky_cat.off"
+ "${CMAKE_CURRENT_BINARY_DIR}/lucky_cat_PCA1")
diff --git a/src/Nerve_GIC/include/gudhi/GIC.h b/src/Nerve_GIC/include/gudhi/GIC.h
index 1b1f9323..047fba61 100644
--- a/src/Nerve_GIC/include/gudhi/GIC.h
+++ b/src/Nerve_GIC/include/gudhi/GIC.h
@@ -17,6 +17,14 @@
#include <mutex>
#endif
+#if __has_include(<CGAL/version.h>)
+# define GUDHI_GIC_USE_CGAL 1
+# include <gudhi/Bottleneck.h>
+#elif __has_include(<hera/bottleneck.h>)
+# define GUDHI_GIC_USE_HERA 1
+# include <hera/bottleneck.h>
+#endif
+
#include <gudhi/Debug_utils.h>
#include <gudhi/graph_simplicial_complex.h>
#include <gudhi/reader_utils.h>
@@ -25,7 +33,6 @@
#include <gudhi/Points_off_io.h>
#include <gudhi/distance_functions.h>
#include <gudhi/Persistent_cohomology.h>
-#include <gudhi/Bottleneck.h>
#include <boost/config.hpp>
#include <boost/graph/graph_traits.hpp>
@@ -35,8 +42,6 @@
#include <boost/graph/subgraph.hpp>
#include <boost/graph/graph_utility.hpp>
-#include <CGAL/version.h> // for CGAL_VERSION_NR
-
#include <iostream>
#include <vector>
#include <map>
@@ -1228,7 +1233,14 @@ class Cover_complex {
Cboot.set_cover_from_function();
Cboot.find_simplices();
Cboot.compute_PD();
+#ifdef GUDHI_GIC_USE_CGAL
double db = Gudhi::persistence_diagram::bottleneck_distance(this->PD, Cboot.PD);
+#elif defined GUDHI_GIC_USE_HERA
+ double db = hera::bottleneckDistExact(this->PD, Cboot.PD);
+#else
+ double db;
+ throw std::logic_error("This function requires CGAL or Hera for the bottleneck distance.");
+#endif
if (verbose) std::clog << db << std::endl;
distribution.push_back(db);
}
diff --git a/src/Nerve_GIC/test/CMakeLists.txt b/src/Nerve_GIC/test/CMakeLists.txt
index 567bf43f..e012a178 100644
--- a/src/Nerve_GIC/test/CMakeLists.txt
+++ b/src/Nerve_GIC/test/CMakeLists.txt
@@ -1,15 +1,12 @@
project(Graph_induced_complex_tests)
-if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
- include(GUDHI_boost_test)
+include(GUDHI_boost_test)
- add_executable ( Nerve_GIC_test_unit test_GIC.cpp )
- if (TBB_FOUND)
- target_link_libraries(Nerve_GIC_test_unit ${TBB_LIBRARIES})
- endif()
+add_executable ( Nerve_GIC_test_unit test_GIC.cpp )
+if (TBB_FOUND)
+ target_link_libraries(Nerve_GIC_test_unit ${TBB_LIBRARIES})
+endif()
- file(COPY data DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+file(COPY data DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- gudhi_add_boost_test(Nerve_GIC_test_unit)
-
-endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+gudhi_add_boost_test(Nerve_GIC_test_unit)
diff --git a/src/Nerve_GIC/utilities/CMakeLists.txt b/src/Nerve_GIC/utilities/CMakeLists.txt
index 65a08d9a..4521a992 100644
--- a/src/Nerve_GIC/utilities/CMakeLists.txt
+++ b/src/Nerve_GIC/utilities/CMakeLists.txt
@@ -1,27 +1,23 @@
project(Nerve_GIC_examples)
-if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+add_executable ( Nerve Nerve.cpp )
+add_executable ( VoronoiGIC VoronoiGIC.cpp )
- add_executable ( Nerve Nerve.cpp )
- add_executable ( VoronoiGIC VoronoiGIC.cpp )
+if (TBB_FOUND)
+ target_link_libraries(Nerve ${TBB_LIBRARIES})
+ target_link_libraries(VoronoiGIC ${TBB_LIBRARIES})
+endif()
- if (TBB_FOUND)
- target_link_libraries(Nerve ${TBB_LIBRARIES})
- target_link_libraries(VoronoiGIC ${TBB_LIBRARIES})
- endif()
+file(COPY KeplerMapperVisuFromTxtFile.py km.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+# Copy files for not to pollute sources when testing
+file(COPY "${CMAKE_SOURCE_DIR}/data/points/human.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- file(COPY KeplerMapperVisuFromTxtFile.py km.py DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- # Copy files for not to pollute sources when testing
- file(COPY "${CMAKE_SOURCE_DIR}/data/points/human.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+add_test(NAME Nerve_GIC_utilities_nerve COMMAND $<TARGET_FILE:Nerve>
+ "human.off" "2" "10" "0.3")
- add_test(NAME Nerve_GIC_utilities_nerve COMMAND $<TARGET_FILE:Nerve>
- "human.off" "2" "10" "0.3")
+add_test(NAME Nerve_GIC_utilities_VoronoiGIC COMMAND $<TARGET_FILE:VoronoiGIC>
+ "human.off" "100")
- add_test(NAME Nerve_GIC_utilities_VoronoiGIC COMMAND $<TARGET_FILE:VoronoiGIC>
- "human.off" "100")
-
- install(TARGETS Nerve DESTINATION bin)
- install(TARGETS VoronoiGIC DESTINATION bin)
- install(FILES KeplerMapperVisuFromTxtFile.py km.py km.py.COPYRIGHT DESTINATION bin)
-
-endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+install(TARGETS Nerve DESTINATION bin)
+install(TARGETS VoronoiGIC DESTINATION bin)
+install(FILES KeplerMapperVisuFromTxtFile.py km.py km.py.COPYRIGHT DESTINATION bin)
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h
index ef9f8428..4177a0b8 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h
@@ -24,6 +24,7 @@
#include <boost/iterator/transform_iterator.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/range/adaptor/reversed.hpp>
+#include <boost/range/adaptor/transformed.hpp>
#include <boost/range/size.hpp>
#include <boost/container/static_vector.hpp>
@@ -1122,16 +1123,12 @@ class Simplex_tree {
dimension_ = 1;
}
- root_.members_.reserve(num_vertices(skel_graph));
+ root_.members_.reserve(num_vertices(skel_graph)); // probably useless in most cases
+ auto verts = vertices(skel_graph) | boost::adaptors::transformed([&](auto v){
+ return Dit_value_t(v, Node(&root_, get(vertex_filtration_t(), skel_graph, v))); });
+ root_.members_.insert(boost::begin(verts), boost::end(verts));
+ // This automatically sorts the vertices, the graph concept doesn't guarantee the order in which we iterate.
- typename boost::graph_traits<OneSkeletonGraph>::vertex_iterator v_it,
- v_it_end;
- for (std::tie(v_it, v_it_end) = vertices(skel_graph); v_it != v_it_end;
- ++v_it) {
- root_.members_.emplace_hint(
- root_.members_.end(), *v_it,
- Node(&root_, get(vertex_filtration_t(), skel_graph, *v_it)));
- }
std::pair<typename boost::graph_traits<OneSkeletonGraph>::edge_iterator,
typename boost::graph_traits<OneSkeletonGraph>::edge_iterator> boost_edges = edges(skel_graph);
// boost_edges.first is the equivalent to boost_edges.begin()
@@ -1140,7 +1137,7 @@ class Simplex_tree {
auto edge = *(boost_edges.first);
auto u = source(edge, skel_graph);
auto v = target(edge, skel_graph);
- if (u == v) throw "Self-loops are not simplicial";
+ if (u == v) throw std::invalid_argument("Self-loops are not simplicial");
// We cannot skip edges with the wrong orientation and expect them to
// come a second time with the right orientation, that does not always
// happen in practice. emplace() should be a NOP when an element with the
@@ -1159,6 +1156,21 @@ class Simplex_tree {
}
}
+ /** \brief Inserts several vertices.
+ * @param[in] vertices A range of Vertex_handle
+ * @param[in] filt filtration value of the new vertices (the same for all)
+ *
+ * This may be faster than inserting the vertices one by one, especially in a random order.
+ * The complex does not need to be empty before calling this function. However, if a vertex is
+ * already present, its filtration value is not modified, unlike with other insertion functions. */
+ template <class VertexRange>
+ void insert_batch_vertices(VertexRange const& vertices, Filtration_value filt = 0) {
+ auto verts = vertices | boost::adaptors::transformed([&](auto v){
+ return Dit_value_t(v, Node(&root_, filt)); });
+ root_.members_.insert(boost::begin(verts), boost::end(verts));
+ if (dimension_ < 0 && !root_.members_.empty()) dimension_ = 0;
+ }
+
/** \brief Expands the Simplex_tree containing only its one skeleton
* until dimension max_dim.
*
diff --git a/src/Simplex_tree/test/simplex_tree_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_unit_test.cpp
index 79bb5a93..ebcc406c 100644
--- a/src/Simplex_tree/test/simplex_tree_unit_test.cpp
+++ b/src/Simplex_tree/test/simplex_tree_unit_test.cpp
@@ -1038,3 +1038,17 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_boundaries_and_opposite_vertex_iterat
BOOST_CHECK(opposite_vertices.size() == 0);
}
}
+
+BOOST_AUTO_TEST_CASE(batch_vertices) {
+ typedef Simplex_tree<> typeST;
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST BATCH VERTEX INSERTION" << std::endl;
+ typeST st;
+ st.insert_simplex_and_subfaces({3}, 1.5);
+ std::vector verts { 2, 3, 5, 6 };
+ st.insert_batch_vertices(verts);
+ BOOST_CHECK(st.num_vertices() == 4);
+ BOOST_CHECK(st.num_simplices() == 4);
+ BOOST_CHECK(st.filtration(st.find({2})) == 0.);
+ BOOST_CHECK(st.filtration(st.find({3})) == 1.5);
+}
diff --git a/src/Tangential_complex/include/gudhi/Tangential_complex.h b/src/Tangential_complex/include/gudhi/Tangential_complex.h
index 56a24af0..ab203ca5 100644
--- a/src/Tangential_complex/include/gudhi/Tangential_complex.h
+++ b/src/Tangential_complex/include/gudhi/Tangential_complex.h
@@ -56,6 +56,7 @@
#include <string>
#include <cstddef> // for std::size_t
#include <optional>
+#include <numeric> // for std::iota
#ifdef GUDHI_USE_TBB
#include <tbb/parallel_for.h>
@@ -345,10 +346,11 @@ class Tangential_complex {
m_stars.resize(m_points.size());
m_squared_star_spheres_radii_incl_margin.resize(m_points.size(), FT(-1));
#ifdef GUDHI_TC_PERTURB_POSITION
- if (m_points.empty())
+ if (m_points.empty()) {
m_translations.clear();
- else
+ } else {
m_translations.resize(m_points.size(), m_k.construct_vector_d_object()(m_ambient_dim));
+ }
#if defined(GUDHI_USE_TBB)
delete[] m_p_perturb_mutexes;
m_p_perturb_mutexes = new Mutex_for_perturb[m_points.size()];
@@ -623,6 +625,11 @@ class Tangential_complex {
int max_dim = -1;
+ // Ordered vertices to be inserted first by the create_complex method to avoid quadratic complexity.
+ std::vector<typename Simplex_tree_::Vertex_handle> vertices(m_points.size());
+ std::iota(vertices.begin(), vertices.end(), 0);
+ tree.insert_batch_vertices(vertices);
+
// For each triangulation
for (std::size_t idx = 0; idx < m_points.size(); ++idx) {
// For each cell of the star
diff --git a/src/cmake/modules/GUDHI_submodules.cmake b/src/cmake/modules/GUDHI_submodules.cmake
index 78b045bd..9ede852d 100644
--- a/src/cmake/modules/GUDHI_submodules.cmake
+++ b/src/cmake/modules/GUDHI_submodules.cmake
@@ -1,5 +1,5 @@
# For those who dislike bundled dependencies, this indicates where to find a preinstalled Hera.
-set(HERA_WASSERSTEIN_INTERNAL_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/ext/hera/wasserstein/include)
-set(HERA_WASSERSTEIN_INCLUDE_DIR ${HERA_WASSERSTEIN_INTERNAL_INCLUDE_DIR} CACHE PATH "Directory where one can find Hera's wasserstein.h")
-set(HERA_BOTTLENECK_INTERNAL_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/ext/hera/bottleneck/include)
-set(HERA_BOTTLENECK_INCLUDE_DIR ${HERA_BOTTLENECK_INTERNAL_INCLUDE_DIR} CACHE PATH "Directory where one can find Hera's bottleneck.h") \ No newline at end of file
+set(HERA_INTERNAL_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/ext/hera/include)
+set(HERA_INCLUDE_DIR ${HERA_INTERNAL_INCLUDE_DIR} CACHE PATH "Directory where one can find hera/{wasserstein.h,bottleneck.h}")
+# since everything is cleanly under include/hera/, there is no harm always including it
+include_directories(${HERA_INCLUDE_DIR})
diff --git a/src/cmake/modules/GUDHI_user_version_target.cmake b/src/cmake/modules/GUDHI_user_version_target.cmake
index 4487ad86..b9bf1414 100644
--- a/src/cmake/modules/GUDHI_user_version_target.cmake
+++ b/src/cmake/modules/GUDHI_user_version_target.cmake
@@ -18,14 +18,17 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
string(TIMESTAMP GUDHI_VERSION_YEAR "%Y")
configure_file(${CMAKE_SOURCE_DIR}/biblio/how_to_cite_gudhi.bib.in "${CMAKE_CURRENT_BINARY_DIR}/biblio/how_to_cite_gudhi.bib" @ONLY)
file(COPY "${CMAKE_SOURCE_DIR}/biblio/bibliography.bib" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/biblio/")
+file(COPY "${CMAKE_SOURCE_DIR}/biblio/test" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/biblio")
# append cgal citation inside bibliography - sphinx cannot deal with more than one bib file
file(READ "${CMAKE_SOURCE_DIR}/biblio/how_to_cite_cgal.bib" CGAL_CITATION_CONTENT)
file(APPEND "${CMAKE_CURRENT_BINARY_DIR}/biblio/bibliography.bib" "${CGAL_CITATION_CONTENT}")
-# Copy biblio directory for user version
+# Copy biblio files for user version
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
- copy_directory ${CMAKE_CURRENT_BINARY_DIR}/biblio ${GUDHI_USER_VERSION_DIR}/biblio)
+ copy ${CMAKE_CURRENT_BINARY_DIR}/biblio/bibliography.bib ${GUDHI_USER_VERSION_DIR}/biblio/bibliography.bib)
+add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
+ copy ${CMAKE_CURRENT_BINARY_DIR}/biblio/how_to_cite_gudhi.bib ${GUDHI_USER_VERSION_DIR}/biblio/how_to_cite_gudhi.bib)
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy ${CMAKE_SOURCE_DIR}/README.md ${GUDHI_USER_VERSION_DIR}/README.md)
@@ -60,10 +63,9 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy_directory ${CMAKE_SOURCE_DIR}/src/GudhUI ${GUDHI_USER_VERSION_DIR}/GudhUI)
-if(HERA_WASSERSTEIN_INCLUDE_DIR STREQUAL HERA_WASSERSTEIN_INTERNAL_INCLUDE_DIR OR
- HERA_BOTTLENECK_INCLUDE_DIR STREQUAL HERA_BOTTLENECK_INTERNAL_INCLUDE_DIR)
+if(HERA_INCLUDE_DIR STREQUAL HERA_INTERNAL_INCLUDE_DIR)
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
- copy_directory ${CMAKE_SOURCE_DIR}/ext/hera ${GUDHI_USER_VERSION_DIR}/ext/hera)
+ copy_directory ${CMAKE_SOURCE_DIR}/ext/hera/include ${GUDHI_USER_VERSION_DIR}/ext/hera/include)
endif()
set(GUDHI_DIRECTORIES "doc;example;concept;utilities")
diff --git a/src/common/doc/main_page.md b/src/common/doc/main_page.md
index ce903405..9b7c2853 100644
--- a/src/common/doc/main_page.md
+++ b/src/common/doc/main_page.md
@@ -178,7 +178,7 @@
The set of all simplices is filtered by the radius of their minimal enclosing ball.
</td>
<td width="15%">
- <b>Author:</b> Vincent Rouvreau<br>
+ <b>Author:</b> Vincent Rouvreau, Hind Montassif<br>
<b>Introduced in:</b> GUDHI 2.2.0<br>
<b>Copyright:</b> MIT [(LGPL v3)](../../licensing/)<br>
<b>Requires:</b> \ref cgal
diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt
index 32ec13bd..39e2acd4 100644
--- a/src/python/CMakeLists.txt
+++ b/src/python/CMakeLists.txt
@@ -44,7 +44,7 @@ function( add_gudhi_debug_info DEBUG_INFO )
endfunction( add_gudhi_debug_info )
if(PYTHONINTERP_FOUND)
- if(PYBIND11_FOUND AND CYTHON_FOUND)
+ if(NUMPY_FOUND AND PYBIND11_FOUND AND CYTHON_FOUND)
add_gudhi_debug_info("Pybind11 version ${PYBIND11_VERSION}")
# PyBind11 modules
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ")
@@ -163,10 +163,10 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'clustering/_tomato', ")
set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/wasserstein', ")
set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/bottleneck', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'nerve_gic', ")
if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'datasets/generators/_points', ")
set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'bottleneck', ")
- set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'nerve_gic', ")
endif ()
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'subsampling', ")
@@ -432,38 +432,38 @@ if(PYTHONINTERP_FOUND)
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/bottleneck_basic_example.py")
add_gudhi_py_test(test_bottleneck_distance)
+ endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
- # Cover complex
- file(COPY ${CMAKE_SOURCE_DIR}/data/points/human.off DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- file(COPY ${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat.off DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- file(COPY ${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat_PCA1 DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- add_test(NAME cover_complex_nerve_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/nerve_of_a_covering.py"
- -f human.off -c 2 -r 10 -g 0.3)
+ # Cover complex
+ file(COPY ${CMAKE_SOURCE_DIR}/data/points/human.off DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+ file(COPY ${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat.off DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+ file(COPY ${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat_PCA1 DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+ add_test(NAME cover_complex_nerve_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/nerve_of_a_covering.py"
+ -f human.off -c 2 -r 10 -g 0.3)
- add_test(NAME cover_complex_coordinate_gic_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/coordinate_graph_induced_complex.py"
- -f human.off -c 0 -v)
+ add_test(NAME cover_complex_coordinate_gic_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/coordinate_graph_induced_complex.py"
+ -f human.off -c 0 -v)
- add_test(NAME cover_complex_functional_gic_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/functional_graph_induced_complex.py"
- -o lucky_cat.off
- -f lucky_cat_PCA1 -v)
+ add_test(NAME cover_complex_functional_gic_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/functional_graph_induced_complex.py"
+ -o lucky_cat.off
+ -f lucky_cat_PCA1 -v)
- add_test(NAME cover_complex_voronoi_gic_example_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
- ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/voronoi_graph_induced_complex.py"
- -f human.off -n 700 -v)
+ add_test(NAME cover_complex_voronoi_gic_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/voronoi_graph_induced_complex.py"
+ -f human.off -n 700 -v)
- add_gudhi_py_test(test_cover_complex)
- endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+ add_gudhi_py_test(test_cover_complex)
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
# Alpha
@@ -623,10 +623,10 @@ if(PYTHONINTERP_FOUND)
# Set missing or not modules
set(GUDHI_MODULES ${GUDHI_MODULES} "python" CACHE INTERNAL "GUDHI_MODULES")
- else(PYBIND11_FOUND AND CYTHON_FOUND)
- message("++ Python module will not be compiled because cython and/or pybind11 was/were not found")
+ else(NUMPY_FOUND AND PYBIND11_FOUND AND CYTHON_FOUND)
+ message("++ Python module will not be compiled because numpy and/or cython and/or pybind11 was/were not found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(PYBIND11_FOUND AND CYTHON_FOUND)
+ endif(NUMPY_FOUND AND PYBIND11_FOUND AND CYTHON_FOUND)
else(PYTHONINTERP_FOUND)
message("++ Python module will not be compiled because no Python interpreter was found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python" CACHE INTERNAL "GUDHI_MISSING_MODULES")
diff --git a/src/python/doc/representations_sum.inc b/src/python/doc/representations_sum.inc
index 4298aea9..9515f044 100644
--- a/src/python/doc/representations_sum.inc
+++ b/src/python/doc/representations_sum.inc
@@ -1,14 +1,14 @@
.. table::
:widths: 30 40 30
- +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------+
- | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière, Martin Royer |
- | img/sklearn-tda.png | diagrams, compatible with scikit-learn. | |
- | | | :Since: GUDHI 3.1.0 |
- | | | |
- | | | :License: MIT |
- | | | |
- | | | :Requires: `Scikit-learn <installation.html#scikit-learn>`_ |
- +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------+
- | * :doc:`representations` |
- +------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------+
+ +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------------------+
+ | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière, Martin Royer, Gard Spreemann, Wojciech Reise |
+ | img/sklearn-tda.png | diagrams, compatible with scikit-learn. | |
+ | | | :Since: GUDHI 3.1.0 |
+ | | | |
+ | | | :License: MIT |
+ | | | |
+ | | | :Requires: `Scikit-learn <installation.html#scikit-learn>`_ |
+ +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------------------+
+ | * :doc:`representations` |
+ +------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/gudhi/hera/bottleneck.cc b/src/python/gudhi/hera/bottleneck.cc
index 0cb562ce..ec461f7c 100644
--- a/src/python/gudhi/hera/bottleneck.cc
+++ b/src/python/gudhi/hera/bottleneck.cc
@@ -16,7 +16,7 @@
using py::ssize_t;
#endif
-#include <bottleneck.h> // Hera
+#include <hera/bottleneck.h> // Hera
double bottleneck_distance(Dgm d1, Dgm d2, double delta)
{
diff --git a/src/python/gudhi/hera/wasserstein.cc b/src/python/gudhi/hera/wasserstein.cc
index fa0cf8aa..3516352e 100644
--- a/src/python/gudhi/hera/wasserstein.cc
+++ b/src/python/gudhi/hera/wasserstein.cc
@@ -8,10 +8,16 @@
* - YYYY/MM Author: Description of the modification
*/
-#include <wasserstein.h> // Hera
-
#include <pybind11_diagram_utils.h>
+#ifdef _MSC_VER
+// https://github.com/grey-narn/hera/issues/3
+// ssize_t is a non-standard type (well, posix)
+using py::ssize_t;
+#endif
+
+#include <hera/wasserstein.h> // Hera
+
double wasserstein_distance(
Dgm d1, Dgm d2,
double wasserstein_power, double internal_p,
diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py
index a169aee8..745fe1e5 100644
--- a/src/python/gudhi/representations/vector_methods.py
+++ b/src/python/gudhi/representations/vector_methods.py
@@ -13,8 +13,13 @@ import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler
-from sklearn.neighbors import DistanceMetric
from sklearn.metrics import pairwise
+try:
+ # New location since 1.0
+ from sklearn.metrics import DistanceMetric
+except ImportError:
+ # Will be removed in 1.3
+ from sklearn.neighbors import DistanceMetric
from .preprocessing import DiagramScaler, BirthPersistenceTransform
@@ -101,7 +106,7 @@ class PersistenceImage(BaseEstimator, TransformerMixin):
"""
return self.fit_transform([diag])[0,:]
-def _automatic_sample_range(sample_range, X, y):
+def _automatic_sample_range(sample_range, X):
"""
Compute and returns sample range from the persistence diagrams if one of the sample_range values is numpy.nan.
@@ -114,7 +119,7 @@ def _automatic_sample_range(sample_range, X, y):
nan_in_range = np.isnan(sample_range)
if nan_in_range.any():
try:
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X)
[mx,my] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]]
[Mx,My] = [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
return np.where(nan_in_range, np.array([mx, My]), sample_range)
@@ -124,7 +129,7 @@ def _automatic_sample_range(sample_range, X, y):
return sample_range
-def _trim_on_edges(x, are_endpoints_nan):
+def _trim_endpoints(x, are_endpoints_nan):
if are_endpoints_nan[0]:
x = x[1:]
if are_endpoints_nan[1]:
@@ -132,11 +137,26 @@ def _trim_on_edges(x, are_endpoints_nan):
return x
+def _grid_from_sample_range(self, X):
+ sample_range = np.array(self.sample_range_init)
+ self.nan_in_range = np.isnan(sample_range)
+ self.new_resolution = self.resolution
+ if not self.keep_endpoints:
+ self.new_resolution += self.nan_in_range.sum()
+ self.sample_range = _automatic_sample_range(sample_range, X)
+ self.grid_ = np.linspace(self.sample_range[0], self.sample_range[1], self.new_resolution)
+ if not self.keep_endpoints:
+ self.grid_ = _trim_endpoints(self.grid_, self.nan_in_range)
+
+
class Landscape(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence landscapes from a list of persistence diagrams. A persistence landscape is a collection of 1D piecewise-linear functions computed from the rank function associated to the persistence diagram. These piecewise-linear functions are then sampled evenly on a given range and the corresponding vectors of samples are concatenated and returned. See http://jmlr.org/papers/v16/bubenik15a.html for more details.
+
+ Attributes:
+ grid_ (1d array): The grid on which the landscapes are computed.
"""
- def __init__(self, num_landscapes=5, resolution=100, sample_range=[np.nan, np.nan]):
+ def __init__(self, num_landscapes=5, resolution=100, sample_range=[np.nan, np.nan], *, keep_endpoints=False):
"""
Constructor for the Landscape class.
@@ -144,10 +164,10 @@ class Landscape(BaseEstimator, TransformerMixin):
num_landscapes (int): number of piecewise-linear functions to output (default 5).
resolution (int): number of sample for all piecewise-linear functions (default 100).
sample_range ([double, double]): minimum and maximum of all piecewise-linear function domains, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ keep_endpoints (bool): when computing `sample_range`, use the exact extremities (where the value is always 0). This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
- self.num_landscapes, self.resolution, self.sample_range = num_landscapes, resolution, sample_range
- self.nan_in_range = np.isnan(np.array(self.sample_range))
- self.new_resolution = self.resolution + self.nan_in_range.sum()
+ self.num_landscapes, self.resolution, self.sample_range_init = num_landscapes, resolution, sample_range
+ self.keep_endpoints = keep_endpoints
def fit(self, X, y=None):
"""
@@ -157,9 +177,7 @@ class Landscape(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
- self.im_range = np.linspace(self.sample_range[0], self.sample_range[1], self.new_resolution)
- self.im_range = _trim_on_edges(self.im_range, self.nan_in_range)
+ _grid_from_sample_range(self, X)
return self
def transform(self, X):
@@ -174,7 +192,7 @@ class Landscape(BaseEstimator, TransformerMixin):
"""
Xfit = []
- x_values = self.im_range
+ x_values = self.grid_
for diag in X:
midpoints, heights = (diag[:, 0] + diag[:, 1]) / 2., (diag[:, 1] - diag[:, 0]) / 2.
tent_functions = np.maximum(heights[None, :] - np.abs(x_values[:, None] - midpoints[None, :]), 0)
@@ -208,8 +226,11 @@ class Landscape(BaseEstimator, TransformerMixin):
class Silhouette(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence silhouettes from a list of persistence diagrams. A persistence silhouette is computed by taking a weighted average of the collection of 1D piecewise-linear functions given by the persistence landscapes, and then by evenly sampling this average on a given range. Finally, the corresponding vector of samples is returned. See https://arxiv.org/abs/1312.0308 for more details.
+
+ Attributes:
+ grid_ (1d array): The grid on which the silhouette is computed.
"""
- def __init__(self, weight=lambda x: 1, resolution=100, sample_range=[np.nan, np.nan]):
+ def __init__(self, weight=lambda x: 1, resolution=100, sample_range=[np.nan, np.nan], *, keep_endpoints=False):
"""
Constructor for the Silhouette class.
@@ -217,10 +238,10 @@ class Silhouette(BaseEstimator, TransformerMixin):
weight (function): weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie on lists or numpy arrays of the form [p_x,p_y].
resolution (int): number of samples for the weighted average (default 100).
sample_range ([double, double]): minimum and maximum for the weighted average domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ keep_endpoints (bool): when computing `sample_range`, use the exact extremities (where the value is always 0). This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
- self.weight, self.resolution, self.sample_range = weight, resolution, sample_range
- self.nan_in_range = np.isnan(np.array(self.sample_range))
- self.new_resolution = self.resolution + self.nan_in_range.sum()
+ self.weight, self.resolution, self.sample_range_init = weight, resolution, sample_range
+ self.keep_endpoints = keep_endpoints
def fit(self, X, y=None):
"""
@@ -230,9 +251,7 @@ class Silhouette(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
- self.im_range = np.linspace(self.sample_range[0], self.sample_range[1], self.new_resolution)
- self.im_range = _trim_on_edges(self.im_range, self.nan_in_range)
+ _grid_from_sample_range(self, X)
return self
def transform(self, X):
@@ -246,7 +265,7 @@ class Silhouette(BaseEstimator, TransformerMixin):
numpy array with shape (number of diagrams) x (**resolution**): output persistence silhouettes.
"""
Xfit = []
- x_values = self.im_range
+ x_values = self.grid_
for diag in X:
midpoints, heights = (diag[:, 0] + diag[:, 1]) / 2., (diag[:, 1] - diag[:, 0]) / 2.
@@ -275,36 +294,39 @@ class Silhouette(BaseEstimator, TransformerMixin):
class BettiCurve(BaseEstimator, TransformerMixin):
"""
Compute Betti curves from persistence diagrams. There are several modes of operation: with a given resolution (with or without a sample_range), with a predefined grid, and with none of the previous. With a predefined grid, the class computes the Betti numbers at those grid points. Without a predefined grid, if the resolution is set to None, it can be fit to a list of persistence diagrams and produce a grid that consists of (at least) the filtration values at which at least one of those persistence diagrams changes Betti numbers, and then compute the Betti numbers at those grid points. In the latter mode, the exact Betti curve is computed for the entire real line. Otherwise, if the resolution is given, the Betti curve is obtained by sampling evenly using either the given sample_range or based on the persistence diagrams.
- """
- def __init__(self, resolution=100, sample_range=[np.nan, np.nan], predefined_grid=None):
- """
- Constructor for the BettiCurve class.
+ Examples
+ --------
+ If pd is a persistence diagram and xs is a nonempty grid of finite values such that xs[0] >= pd.min(), then the results of:
- Parameters:
- resolution (int): number of sample for the piecewise-constant function (default 100).
- sample_range ([double, double]): minimum and maximum of the piecewise-constant function domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
- predefined_grid (1d array or None, default=None): Predefined filtration grid points at which to compute the Betti curves. Must be strictly ordered. Infinities are ok. If None (default), and resolution is given, the grid will be uniform from x_min to x_max in 'resolution' steps, otherwise a grid will be computed that captures all changes in Betti numbers in the provided data.
+ >>> bc = BettiCurve(predefined_grid=xs) # doctest: +SKIP
+ >>> result = bc(pd) # doctest: +SKIP
- Attributes:
- grid_ (1d array): The grid on which the Betti numbers are computed. If predefined_grid was specified, `grid_` will always be that grid, independently of data. If not, the grid is fitted to capture all filtration values at which the Betti numbers change.
+ and
- Examples
- --------
- If pd is a persistence diagram and xs is a nonempty grid of finite values such that xs[0] >= pd.min(), then the results of:
+ >>> from scipy.interpolate import interp1d # doctest: +SKIP
+ >>> bc = BettiCurve(resolution=None, predefined_grid=None) # doctest: +SKIP
+ >>> bettis = bc.fit_transform([pd]) # doctest: +SKIP
+ >>> interp = interp1d(bc.grid_, bettis[0, :], kind="previous", fill_value="extrapolate") # doctest: +SKIP
+ >>> result = np.array(interp(xs), dtype=int) # doctest: +SKIP
- >>> bc = BettiCurve(predefined_grid=xs) # doctest: +SKIP
- >>> result = bc(pd) # doctest: +SKIP
+ are the same.
- and
+ Attributes
+ ----------
+ grid_ : 1d array
+ The grid on which the Betti numbers are computed. If predefined_grid was specified, `grid_` will always be that grid, independently of data. If not and resolution is None, the grid is fitted to capture all filtration values at which the Betti numbers change.
+ """
- >>> from scipy.interpolate import interp1d # doctest: +SKIP
- >>> bc = BettiCurve(resolution=None, predefined_grid=None) # doctest: +SKIP
- >>> bettis = bc.fit_transform([pd]) # doctest: +SKIP
- >>> interp = interp1d(bc.grid_, bettis[0, :], kind="previous", fill_value="extrapolate") # doctest: +SKIP
- >>> result = np.array(interp(xs), dtype=int) # doctest: +SKIP
+ def __init__(self, resolution=100, sample_range=[np.nan, np.nan], predefined_grid=None, *, keep_endpoints=False):
+ """
+ Constructor for the BettiCurve class.
- are the same.
+ Parameters:
+ resolution (int): number of samples for the piecewise-constant function (default 100), or None for the exact curve.
+ sample_range ([double, double]): minimum and maximum of the piecewise-constant function domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ predefined_grid (1d array or None, default=None): Predefined filtration grid points at which to compute the Betti curves. Must be strictly ordered. Infinities are ok. If None (default), and resolution is given, the grid will be uniform from x_min to x_max in 'resolution' steps, otherwise a grid will be computed that captures all changes in Betti numbers in the provided data.
+ keep_endpoints (bool): when computing `sample_range` (fixed `resolution`, no `predefined_grid`), use the exact extremities. This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
if (predefined_grid is not None) and (not isinstance(predefined_grid, np.ndarray)):
@@ -312,7 +334,8 @@ class BettiCurve(BaseEstimator, TransformerMixin):
self.predefined_grid = predefined_grid
self.resolution = resolution
- self.sample_range = sample_range
+ self.sample_range_init = sample_range
+ self.keep_endpoints = keep_endpoints
def is_fitted(self):
return hasattr(self, "grid_")
@@ -331,8 +354,7 @@ class BettiCurve(BaseEstimator, TransformerMixin):
events = np.unique(np.concatenate([pd.flatten() for pd in X] + [[-np.inf]], axis=0))
self.grid_ = np.array(events)
else:
- self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
- self.grid_ = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
+ _grid_from_sample_range(self, X)
else:
self.grid_ = self.predefined_grid # Get the predefined grid from user
@@ -431,8 +453,11 @@ class BettiCurve(BaseEstimator, TransformerMixin):
class Entropy(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence entropy. Persistence entropy is a statistic for persistence diagrams inspired from Shannon entropy. This statistic can also be used to compute a feature vector, called the entropy summary function. See https://arxiv.org/pdf/1803.08304.pdf for more details. Note that a previous implementation was contributed by Manuel Soriano-Trigueros.
+
+ Attributes:
+ grid_ (1d array): In vector mode, the grid on which the entropy summary function is computed.
"""
- def __init__(self, mode="scalar", normalized=True, resolution=100, sample_range=[np.nan, np.nan]):
+ def __init__(self, mode="scalar", normalized=True, resolution=100, sample_range=[np.nan, np.nan], *, keep_endpoints=False):
"""
Constructor for the Entropy class.
@@ -441,8 +466,10 @@ class Entropy(BaseEstimator, TransformerMixin):
normalized (bool): whether to normalize the entropy summary function (default True). Used only if **mode** = "vector".
resolution (int): number of sample for the entropy summary function (default 100). Used only if **mode** = "vector".
sample_range ([double, double]): minimum and maximum of the entropy summary function domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method. Used only if **mode** = "vector".
+ keep_endpoints (bool): when computing `sample_range`, use the exact extremities. This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
- self.mode, self.normalized, self.resolution, self.sample_range = mode, normalized, resolution, sample_range
+ self.mode, self.normalized, self.resolution, self.sample_range_init = mode, normalized, resolution, sample_range
+ self.keep_endpoints = keep_endpoints
def fit(self, X, y=None):
"""
@@ -452,7 +479,9 @@ class Entropy(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
+ if self.mode == "vector":
+ _grid_from_sample_range(self, X)
+ self.step_ = self.grid_[1] - self.grid_[0]
return self
def transform(self, X):
@@ -466,8 +495,6 @@ class Entropy(BaseEstimator, TransformerMixin):
numpy array with shape (number of diagrams) x (1 if **mode** = "scalar" else **resolution**): output entropy.
"""
num_diag, Xfit = len(X), []
- x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
- step_x = x_values[1] - x_values[0]
new_X = BirthPersistenceTransform().fit_transform(X)
for i in range(num_diag):
@@ -482,8 +509,8 @@ class Entropy(BaseEstimator, TransformerMixin):
ent = np.zeros(self.resolution)
for j in range(num_pts_in_diag):
[px,py] = orig_diagram[j,:2]
- min_idx = np.clip(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
+ min_idx = np.clip(np.ceil((px - self.sample_range[0]) / self.step_).astype(int), 0, self.resolution)
+ max_idx = np.clip(np.ceil((py - self.sample_range[0]) / self.step_).astype(int), 0, self.resolution)
ent[min_idx:max_idx]-=p[j]*np.log(p[j])
if self.normalized:
ent = ent / np.linalg.norm(ent, ord=1)
@@ -683,17 +710,17 @@ class Atol(BaseEstimator, TransformerMixin):
>>> b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]])
>>> c = np.array([[3, 2, -1], [1, 2, -1]])
>>> atol_vectoriser = Atol(quantiser=KMeans(n_clusters=2, random_state=202006))
- >>> atol_vectoriser.fit(X=[a, b, c]).centers # doctest: +SKIP
- >>> # array([[ 2. , 0.66666667, 3.33333333],
- >>> # [ 2.6 , 2.8 , -0.4 ]])
+ >>> atol_vectoriser.fit(X=[a, b, c]).centers
+ array([[ 2.6 , 2.8 , -0.4 ],
+ [ 2. , 0.66666667, 3.33333333]])
>>> atol_vectoriser(a)
- >>> # array([1.18168665, 0.42375966]) # doctest: +SKIP
+ array([0.42375966, 1.18168665])
>>> atol_vectoriser(c)
- >>> # array([0.02062512, 1.25157463]) # doctest: +SKIP
- >>> atol_vectoriser.transform(X=[a, b, c]) # doctest: +SKIP
- >>> # array([[1.18168665, 0.42375966],
- >>> # [0.29861028, 1.06330156],
- >>> # [0.02062512, 1.25157463]])
+ array([1.25157463, 0.02062512])
+ >>> atol_vectoriser.transform(X=[a, b, c])
+ array([[0.42375966, 1.18168665],
+ [1.06330156, 0.29861028],
+ [1.25157463, 0.02062512]])
"""
# Note the example above must be up to date with the one in tests called test_atol_doc
def __init__(self, quantiser, weighting_method="cloud", contrast="gaussian"):
@@ -744,6 +771,8 @@ class Atol(BaseEstimator, TransformerMixin):
measures_concat = np.concatenate(X)
self.quantiser.fit(X=measures_concat, sample_weight=sample_weight)
self.centers = self.quantiser.cluster_centers_
+ # Hack, but some people are unhappy if the order depends on the version of sklearn
+ self.centers = self.centers[np.lexsort(self.centers.T)]
if self.quantiser.n_clusters == 1:
dist_centers = pairwise.pairwise_distances(measures_concat)
np.fill_diagonal(dist_centers, 0)
diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd
index f86f1232..5309c6fa 100644
--- a/src/python/gudhi/simplex_tree.pxd
+++ b/src/python/gudhi/simplex_tree.pxd
@@ -56,7 +56,8 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
int upper_bound_dimension() nogil
bool find_simplex(vector[int] simplex) nogil
bool insert(vector[int] simplex, double filtration) nogil
- void insert_matrix(double* filtrations, int n, int stride0, int stride1, double max_filtration) nogil
+ void insert_matrix(double* filtrations, int n, int stride0, int stride1, double max_filtration) nogil except +
+ void insert_batch_vertices(vector[int] v, double f) nogil except +
vector[pair[vector[int], double]] get_star(vector[int] simplex) nogil
vector[pair[vector[int], double]] get_cofaces(vector[int] simplex, int dimension) nogil
void expansion(int max_dim) nogil except +
diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx
index 24b970c4..4cf176f5 100644
--- a/src/python/gudhi/simplex_tree.pyx
+++ b/src/python/gudhi/simplex_tree.pyx
@@ -12,6 +12,7 @@ from libc.stdint cimport intptr_t, int32_t, int64_t
import numpy as np
cimport gudhi.simplex_tree
cimport cython
+from numpy.math cimport INFINITY
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
@@ -239,7 +240,7 @@ cdef class SimplexTree:
@staticmethod
@cython.boundscheck(False)
- def create_from_array(filtrations, double max_filtration=np.inf):
+ def create_from_array(filtrations, double max_filtration=INFINITY):
"""Creates a new, empty complex and inserts vertices and edges. The vertices are numbered from 0 to n-1, and
the filtration values are encoded in the array, with the diagonal representing the vertices. It is the
caller's responsibility to ensure that this defines a filtration, which can be achieved with either::
@@ -281,6 +282,8 @@ cdef class SimplexTree:
.. seealso:: :func:`insert_batch`
"""
+ # Without this, it could be slow if we end up inserting vertices in a bad order (flat_map).
+ self.get_ptr().insert_batch_vertices(np.unique(np.stack((edges.row, edges.col))), INFINITY)
# TODO: optimize this?
for edge in zip(edges.row, edges.col, edges.data):
self.get_ptr().insert((edge[0], edge[1]), edge[2])
@@ -303,8 +306,7 @@ cdef class SimplexTree:
:param filtrations: the filtration values.
:type filtrations: numpy.array of shape (n,)
"""
- # This may be slow if we end up inserting vertices in a bad order (flat_map).
- # We could first insert the vertices from np.unique(vertex_array), or leave it to the caller.
+ cdef vector[int] vertices = np.unique(vertex_array)
cdef Py_ssize_t k = vertex_array.shape[0]
cdef Py_ssize_t n = vertex_array.shape[1]
assert filtrations.shape[0] == n, 'inconsistent sizes for vertex_array and filtrations'
@@ -312,6 +314,9 @@ cdef class SimplexTree:
cdef Py_ssize_t j
cdef vector[int] v
with nogil:
+ # Without this, it could be slow if we end up inserting vertices in a bad order (flat_map).
+ # NaN currently does the wrong thing
+ self.get_ptr().insert_batch_vertices(vertices, INFINITY)
for i in range(n):
for j in range(k):
v.push_back(vertex_array[j, i])
@@ -466,7 +471,7 @@ cdef class SimplexTree:
"""
return self.get_ptr().prune_above_filtration(filtration)
- def expansion(self, max_dim):
+ def expansion(self, max_dimension):
"""Expands the simplex tree containing only its one skeleton
until dimension max_dim.
@@ -480,10 +485,10 @@ cdef class SimplexTree:
The simplex tree must contain no simplex of dimension bigger than
1 when calling the method.
- :param max_dim: The maximal dimension.
- :type max_dim: int
+ :param max_dimension: The maximal dimension.
+ :type max_dimension: int
"""
- cdef int maxdim = max_dim
+ cdef int maxdim = max_dimension
with nogil:
self.get_ptr().expansion(maxdim)
diff --git a/src/python/include/Alpha_complex_factory.h b/src/python/include/Alpha_complex_factory.h
index 3d20aa8f..41eb72c1 100644
--- a/src/python/include/Alpha_complex_factory.h
+++ b/src/python/include/Alpha_complex_factory.h
@@ -106,7 +106,7 @@ class Exact_alpha_complex_dD final : public Abstract_alpha_complex {
return alpha_complex_.create_complex(*simplex_tree, max_alpha_square, exact_version_, default_filtration_value);
}
- virtual std::size_t num_vertices() const {
+ virtual std::size_t num_vertices() const override {
return alpha_complex_.num_vertices();
}
@@ -141,7 +141,7 @@ class Inexact_alpha_complex_dD final : public Abstract_alpha_complex {
return alpha_complex_.create_complex(*simplex_tree, max_alpha_square, false, default_filtration_value);
}
- virtual std::size_t num_vertices() const {
+ virtual std::size_t num_vertices() const override {
return alpha_complex_.num_vertices();
}
diff --git a/src/python/setup.py.in b/src/python/setup.py.in
index 2c67c2c5..6eb0db42 100644
--- a/src/python/setup.py.in
+++ b/src/python/setup.py.in
@@ -48,10 +48,6 @@ ext_modules = cythonize(ext_modules, compiler_directives={'language_level': '3'}
for module in pybind11_modules:
my_include_dirs = include_dirs + [pybind11.get_include(False), pybind11.get_include(True)]
- if module == 'hera/wasserstein':
- my_include_dirs = ['@HERA_WASSERSTEIN_INCLUDE_DIR@'] + my_include_dirs
- elif module == 'hera/bottleneck':
- my_include_dirs = ['@HERA_BOTTLENECK_INCLUDE_DIR@'] + my_include_dirs
ext_modules.append(Extension(
'gudhi.' + module.replace('/', '.'),
sources = [source_dir + module + '.cc'],
diff --git a/src/python/test/test_persistence_graphical_tools.py b/src/python/test/test_persistence_graphical_tools.py
index c19836b7..0e2ac3f8 100644
--- a/src/python/test/test_persistence_graphical_tools.py
+++ b/src/python/test/test_persistence_graphical_tools.py
@@ -12,6 +12,7 @@ import gudhi as gd
import numpy as np
import matplotlib as plt
import pytest
+import warnings
def test_array_handler():
@@ -71,13 +72,13 @@ def test_limit_to_max_intervals():
(0, (0.0, 0.106382)),
]
# check no warnings if max_intervals equals to the diagrams number
- with pytest.warns(None) as record:
+ with warnings.catch_warnings():
+ warnings.simplefilter("error")
truncated_diags = gd.persistence_graphical_tools._limit_to_max_intervals(
diags, 10, key=lambda life_time: life_time[1][1] - life_time[1][0]
)
# check diagrams are not sorted
assert truncated_diags == diags
- assert len(record) == 0
# check warning if max_intervals lower than the diagrams number
with pytest.warns(UserWarning) as record:
diff --git a/src/python/test/test_representations.py b/src/python/test/test_representations.py
index 58caab21..ae0362f8 100755
--- a/src/python/test/test_representations.py
+++ b/src/python/test/test_representations.py
@@ -161,7 +161,7 @@ def test_entropy_miscalculation():
return -np.dot(l, np.log(l))
sce = Entropy(mode="scalar")
assert [[pe(diag_ex)]] == sce.fit_transform([diag_ex])
- sce = Entropy(mode="vector", resolution=4, normalized=False)
+ sce = Entropy(mode="vector", resolution=4, normalized=False, keep_endpoints=True)
pef = [-1/4*np.log(1/4)-1/4*np.log(1/4)-1/2*np.log(1/2),
-1/4*np.log(1/4)-1/4*np.log(1/4)-1/2*np.log(1/2),
-1/2*np.log(1/2),
@@ -170,7 +170,7 @@ def test_entropy_miscalculation():
sce = Entropy(mode="vector", resolution=4, normalized=True)
pefN = (sce.fit_transform([diag_ex]))[0]
area = np.linalg.norm(pefN, ord=1)
- assert area==1
+ assert area==pytest.approx(1)
def test_kernel_empty_diagrams():
empty_diag = np.empty(shape = [0, 2])
@@ -251,3 +251,15 @@ def test_landscape_nan_range():
lds_dgm = lds(dgm)
assert (lds.sample_range[0] == 2) & (lds.sample_range[1] == 6)
assert lds.new_resolution == 10
+
+def test_endpoints():
+ diags = [ np.array([[2., 3.]]) ]
+ for vec in [ Landscape(), Silhouette(), BettiCurve(), Entropy(mode="vector") ]:
+ vec.fit(diags)
+ assert vec.grid_[0] > 2 and vec.grid_[-1] < 3
+ for vec in [ Landscape(keep_endpoints=True), Silhouette(keep_endpoints=True), BettiCurve(keep_endpoints=True), Entropy(mode="vector", keep_endpoints=True)]:
+ vec.fit(diags)
+ assert vec.grid_[0] == 2 and vec.grid_[-1] == 3
+ vec = BettiCurve(resolution=None)
+ vec.fit(diags)
+ assert np.equal(vec.grid_, [-np.inf, 2., 3.]).all()
diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py
index 3a004d77..a76b6ce7 100755
--- a/src/python/test/test_wasserstein_distance.py
+++ b/src/python/test/test_wasserstein_distance.py
@@ -90,10 +90,11 @@ def test_get_essential_parts():
def test_warn_infty():
- assert _warn_infty(matching=False)==np.inf
- c, m = _warn_infty(matching=True)
- assert (c == np.inf)
- assert (m is None)
+ with pytest.warns(UserWarning):
+ assert _warn_infty(matching=False)==np.inf
+ c, m = _warn_infty(matching=True)
+ assert (c == np.inf)
+ assert (m is None)
def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_matching=True):