summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.appveyor.yml4
-rw-r--r--.circleci/config.yml137
-rw-r--r--.github/for_maintainers/new_gudhi_version_creation.md27
-rw-r--r--.github/for_maintainers/tests_strategy.md90
-rw-r--r--.github/how_to_use_github_to_contribute_to_gudhi.md3
-rw-r--r--.github/next_release.md14
-rw-r--r--.github/test-requirements.txt2
-rw-r--r--.github/workflows/pip-build-linux.yml11
-rw-r--r--.github/workflows/pip-build-osx.yml4
-rw-r--r--.github/workflows/pip-build-windows.yml4
-rw-r--r--.github/workflows/pip-packaging-linux.yml18
-rw-r--r--.github/workflows/pip-packaging-osx.yml4
-rw-r--r--.github/workflows/pip-packaging-windows.yml4
-rw-r--r--.gitmodules3
-rw-r--r--CMakeGUDHIVersion.txt4
-rw-r--r--Dockerfile_for_circleci_image69
-rw-r--r--Dockerfile_for_pip50
-rw-r--r--Dockerfile_gudhi_installation80
-rw-r--r--azure-pipelines.yml6
m---------ext/gudhi-deploy0
-rw-r--r--src/Alpha_complex/test/CMakeLists.txt14
-rw-r--r--src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp60
-rw-r--r--src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp102
-rw-r--r--src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp77
-rw-r--r--src/Rips_complex/include/gudhi/Sparse_rips_complex.h14
-rw-r--r--src/Spatial_searching/include/gudhi/Kd_tree_search.h98
-rw-r--r--src/Subsampling/example/CMakeLists.txt4
-rw-r--r--src/Subsampling/example/example_choose_n_farthest_points.cpp2
-rw-r--r--src/Subsampling/example/example_custom_distance.cpp44
-rw-r--r--src/Subsampling/example/example_custom_kernel.cpp63
-rw-r--r--src/Subsampling/include/gudhi/choose_n_farthest_points.h36
-rw-r--r--src/Subsampling/include/gudhi/pick_n_random_points.h14
-rw-r--r--src/Subsampling/include/gudhi/sparsify_point_set.h33
-rw-r--r--src/Subsampling/test/test_choose_n_farthest_points.cpp20
-rw-r--r--src/Tangential_complex/include/gudhi/Tangential_complex.h8
-rw-r--r--src/Witness_complex/doc/Witness_complex_doc.h6
-rw-r--r--src/Witness_complex/example/example_strong_witness_complex_off.cpp3
-rw-r--r--src/Witness_complex/example/example_witness_complex_off.cpp3
-rw-r--r--src/Witness_complex/example/example_witness_complex_sphere.cpp2
-rw-r--r--src/Witness_complex/utilities/strong_witness_persistence.cpp3
-rw-r--r--src/Witness_complex/utilities/weak_witness_persistence.cpp3
-rw-r--r--src/common/doc/examples.h2
-rw-r--r--src/common/doc/installation.h4
-rw-r--r--src/python/CMakeLists.txt6
-rwxr-xr-xsrc/python/doc/conf.py2
-rwxr-xr-xsrc/python/example/diagram_vectorizations_distances_kernels.py19
-rw-r--r--src/python/gudhi/__init__.py.in4
-rw-r--r--src/python/gudhi/representations/vector_methods.py19
-rw-r--r--src/python/gudhi/rips_complex.pyx8
-rw-r--r--src/python/gudhi/simplex_tree.pxd2
-rw-r--r--src/python/gudhi/simplex_tree.pyx3
-rw-r--r--src/python/gudhi/subsampling.pyx2
-rw-r--r--src/python/include/Alpha_complex_factory.h9
-rw-r--r--src/python/include/Simplex_tree_interface.h6
-rw-r--r--src/python/include/Subsampling_interface.h10
-rwxr-xr-xsrc/python/test/test_representations.py26
-rwxr-xr-xsrc/python/test/test_simplex_tree.py17
-rwxr-xr-xsrc/python/test/test_subsampling.py16
58 files changed, 758 insertions, 540 deletions
diff --git a/.appveyor.yml b/.appveyor.yml
index a257debc..9ff8f157 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -48,9 +48,9 @@ install:
- python --version
- pip --version
- python -m pip install --user --upgrade pip
- - python -m pip install --user -r .github/build-requirements.txt
+ - python -m pip install --user -r ext/gudhi-deploy/build-requirements.txt
# No PyKeOps on windows, let's workaround this one.
- - for /F "tokens=*" %%A in (.github/test-requirements.txt) do python -m pip install --user %%A
+ - for /F "tokens=*" %%A in (ext/gudhi-deploy/test-requirements.txt) do python -m pip install --user %%A
build_script:
- mkdir build
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 285a66a5..7fa9ae05 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -1,7 +1,11 @@
version: 2.0
jobs:
+
+### With all third parties
+
examples:
docker:
+ # cf. https://github.com/GUDHI/gudhi-deploy/blob/main/Dockerfile_for_circleci_image
- image: gudhi/ci_for_gudhi:latest
steps:
- checkout
@@ -95,10 +99,143 @@ jobs:
path: /tmp/doxygen
destination: doxygen
+
+### With all third parties, except CGAL and Eigen
+
+ examples_without_cgal_eigen:
+ docker:
+ # cf. https://github.com/GUDHI/gudhi-deploy/blob/main/Dockerfile_for_circleci_image_without_cgal
+ - image: gudhi/ci_for_gudhi_wo_cgal:latest
+ steps:
+ - checkout
+ - run:
+ name: Build and test examples without cgal and eigen
+ command: |
+ mkdir build
+ cd build
+ cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=OFF ..
+ make all
+ ctest --output-on-failure
+
+ tests_without_cgal_eigen:
+ docker:
+ - image: gudhi/ci_for_gudhi_wo_cgal:latest
+ steps:
+ - checkout
+ - run:
+ name: Build and test unitary tests without cgal and eigen
+ command: |
+ mkdir build
+ cd build
+ cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=OFF ..
+ make all
+ ctest --output-on-failure
+
+ utils_without_cgal_eigen:
+ docker:
+ - image: gudhi/ci_for_gudhi_wo_cgal:latest
+ steps:
+ - checkout
+ - run:
+ name: Build and test utilities without cgal and eigen
+ command: |
+ mkdir build
+ cd build
+ cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=OFF ..
+ make all
+ ctest --output-on-failure
+
+ python_without_cgal_eigen:
+ docker:
+ - image: gudhi/ci_for_gudhi_wo_cgal:latest
+ steps:
+ - checkout
+ - run:
+ name: Build and test python module without cgal and eigen
+ command: |
+ git submodule init
+ git submodule update
+ mkdir build
+ cd build
+ cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..
+ cd src/python
+ python3 setup.py build_ext --inplace
+ ctest --output-on-failure
+
+
+### With all third parties, except CGAL
+
+ examples_without_cgal:
+ docker:
+ - image: gudhi/ci_for_gudhi_wo_cgal:latest
+ steps:
+ - checkout
+ - run:
+ name: Build and test examples without cgal
+ command: |
+ mkdir build
+ cd build
+ cmake -DCMAKE_BUILD_TYPE=Release -DEIGEN3_INCLUDE_DIR=/eigen-3.3.9 -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=OFF ..
+ make all
+ ctest --output-on-failure
+
+ tests_without_cgal:
+ docker:
+ - image: gudhi/ci_for_gudhi_wo_cgal:latest
+ steps:
+ - checkout
+ - run:
+ name: Build and test unitary tests without cgal
+ command: |
+ mkdir build
+ cd build
+ cmake -DCMAKE_BUILD_TYPE=Release -DEIGEN3_INCLUDE_DIR=/eigen-3.3.9 -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=OFF ..
+ make all
+ ctest --output-on-failure
+
+ utils_without_cgal:
+ docker:
+ - image: gudhi/ci_for_gudhi_wo_cgal:latest
+ steps:
+ - checkout
+ - run:
+ name: Build and test utilities without cgal
+ command: |
+ mkdir build
+ cd build
+ cmake -DCMAKE_BUILD_TYPE=Release -DEIGEN3_INCLUDE_DIR=/eigen-3.3.9 -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=OFF ..
+ make all
+ ctest --output-on-failure
+
+ python_without_cgal:
+ docker:
+ - image: gudhi/ci_for_gudhi_wo_cgal:latest
+ steps:
+ - checkout
+ - run:
+ name: Build and test python module without cgal
+ command: |
+ git submodule init
+ git submodule update
+ mkdir build
+ cd build
+ cmake -DCMAKE_BUILD_TYPE=Release -DEIGEN3_INCLUDE_DIR=/eigen-3.3.9 -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..
+ cd src/python
+ python3 setup.py build_ext --inplace
+ ctest --output-on-failure
+
workflows:
version: 2
build:
jobs:
+ - examples_without_cgal_eigen
+ - tests_without_cgal_eigen
+ - utils_without_cgal_eigen
+ - python_without_cgal_eigen
+ - examples_without_cgal
+ - tests_without_cgal
+ - utils_without_cgal
+ - python_without_cgal
- examples
- tests
- utils
diff --git a/.github/for_maintainers/new_gudhi_version_creation.md b/.github/for_maintainers/new_gudhi_version_creation.md
index 4de81b8a..aadfae7d 100644
--- a/.github/for_maintainers/new_gudhi_version_creation.md
+++ b/.github/for_maintainers/new_gudhi_version_creation.md
@@ -90,7 +90,8 @@ ln -s @GUDHI_VERSION@ latest
## Pip package
-The pip package construction shall be started on release creation, you just have to check [gudhi github actions](https://github.com/GUDHI/gudhi-devel/actions) results.
+The pip package construction shall be started on release creation, you just have to check
+[gudhi github actions](https://github.com/GUDHI/gudhi-devel/actions) results.
The version number must be conform to [pep440](https://www.python.org/dev/peps/pep-0440/#pre-releases)
## Conda package
@@ -105,28 +106,20 @@ If you need to update conda tools (conda-build, conda-smithy, ...), add a commen
## Docker image
-You have to modify the `Dockerfile_gudhi_installation` at the root of this repository in order to use the last release, cf. lines:
+You have to modify the
+[Dockerfile_gudhi_installation](https://github.com/GUDHI/gudhi-deploy/blob/main/Dockerfile_for_gudhi_installation)
+in gudhi-deploy repository in order to use the last release, cf. lines:
```
...
-RUN curl -LO "https://github.com/GUDHI/gudhi-devel/releases/download/tags%2Fgudhi-release-@GUDHI_VERSION@/gudhi.@GUDHI_VERSION@.tar.gz" \
-&& tar xf gudhi.@GUDHI_VERSION@.tar.gz \
-&& cd gudhi.@GUDHI_VERSION@ \
+ARG GUDHI_VERSION="3.X.X"
...
```
-Build and push images to docker hub:
-```
-docker build -f Dockerfile_gudhi_installation -t gudhi/latest_gudhi_version:@GUDHI_VERSION@ .
-docker run --rm -it gudhi/latest_gudhi_version:@GUDHI_VERSION@
-```
-
-***[Check there are no error with utils and python version]***
+After pushing the changes the docker image build will be automatically performed for
+[latest_gudhi_version](https://hub.docker.com/repository/docker/gudhi/latest_gudhi_version)
+docker image on docker hub.
-```
-docker tag gudhi/latest_gudhi_version:@GUDHI_VERSION@ gudhi/latest_gudhi_version:latest
-docker push gudhi/latest_gudhi_version:latest
-docker push gudhi/latest_gudhi_version:@GUDHI_VERSION@
-```
+***[Check there are no error]***
## Mail sending
Send version mail to the following lists :
diff --git a/.github/for_maintainers/tests_strategy.md b/.github/for_maintainers/tests_strategy.md
new file mode 100644
index 00000000..9c181740
--- /dev/null
+++ b/.github/for_maintainers/tests_strategy.md
@@ -0,0 +1,90 @@
+# Tests strategy
+
+This document tries to sum up the tests strategy that has been put in place for gudhi continuous integration.
+
+The aim is to help maintainers to anticipate third parties modifications, updates.
+
+## Builds
+
+### Linux
+
+As all the third parties are already installed (thanks to docker), the compilations has been seperated by categories to be parallelized:
+
+* examples (C++)
+* tests (C++)
+* utils (C++)
+* doxygen (C++ documentation that is available in the artefacts)
+* python (including documentation and code coverage that are available in the artefacts)
+
+(cf. `.circleci/config.yml`)
+
+These build categories are done with and without CGAL, and, with and without Eigen to be sure the users won't be annoyed if a third party is missing.
+
+With CGAL and with Eigen builds are performed inside the docker image `gudhi/ci_for_gudhi` based on `Dockerfile_for_circleci_image` file.
+Without CGAL, and, with or without Eigen builds are performed inside the docker image `gudhi/ci_for_gudhi_wo_cgal` based on `Dockerfile_for_circleci_image_without_cgal` file.
+
+#### Update docker images
+
+C++ third parties installation are done thanks to apt on Ubuntu latest LTS.
+
+Docker images need to be rebuild and push each time `.github/build-requirements`, `.github/test-requirements`, when a new third party is added, when a new CGAL version improves gudhi performances, ...
+
+```bash
+docker build -f Dockerfile_for_circleci_image -t gudhi/ci_for_gudhi:latest .
+docker build -f Dockerfile_for_circleci_image_without_cgal -t gudhi/ci_for_gudhi_wo_cgal:latest .
+docker login # requires some specific rights on https://hub.docker.com/u/gudhi/repository/docker/gudhi
+docker push gudhi/ci_for_gudhi:latest
+docker push gudhi/ci_for_gudhi_wo_cgal:latest
+```
+
+### Windows
+
+The compilations has been seperated by categories to be parallelized, but I don't know why builds are not run in parallel:
+
+* examples (C++)
+* tests (C++)
+* utils (C++)
+* python
+
+Doxygen (C++) is not tested.
+(cf. `.appveyor.yml`)
+
+C++ third parties installation are done thanks to [vcpkg](https://github.com/microsoft/vcpkg/).
+In case of installation issue, check in [vcpkg issues](https://github.com/microsoft/vcpkg/issues).
+
+### OSx
+
+The compilations has been seperated by categories to be parallelized:
+
+* examples (C++)
+* tests (C++)
+* utils (C++)
+* python
+* Doxygen (C++)
+
+(cf. `azure-pipelines.yml`)
+
+C++ third parties installation are done thanks to [brew](https://formulae.brew.sh/formula/).
+In case of installation issue, check in formula issues.
+
+## Pip packaging
+
+Pip packaging is done in 2 parts:
+
+* on push and pull requests, the wheels are built (pip package dry-run)
+* on releases, the wheels are built and sent to pypi.org (package)
+
+Only the Linux pip package is based on a docker image (`gudhi/pip_for_gudhi` based on `Dockerfile_for_pip` file) to make it faster.
+
+### Update docker image
+
+C++ third parties installation are done thanks to yum on an image based on `quay.io/pypa/manylinux2014_x86_64`.
+
+Docker image need to be rebuild and push each time `.github/build-requirements`, when a new third party is added, when a new CGAL version improves gudhi performances, ...
+As `.github/test-requirements` is not installed, no need to rebuild image when this file is modified.
+
+```bash
+docker build -f Dockerfile_for_pip -t gudhi/pip_for_gudhi:latest .
+docker login # requires some specific rights on https://hub.docker.com/u/gudhi/repository/docker/gudhi
+docker push gudhi/pip_for_gudhi:latest
+```
diff --git a/.github/how_to_use_github_to_contribute_to_gudhi.md b/.github/how_to_use_github_to_contribute_to_gudhi.md
index 747ca39b..738c1ce9 100644
--- a/.github/how_to_use_github_to_contribute_to_gudhi.md
+++ b/.github/how_to_use_github_to_contribute_to_gudhi.md
@@ -33,6 +33,9 @@ Hera, used for Wasserstein distance, is available on an external git repository.
git submodule update --init
```
+[gudhi-deploy](https://github.com/GUDHI/gudhi-deploy) is used for Continuous Integration python
+requirements and will also be downloaded by the above command.
+
## Configuring a remote for a fork
```bash
git remote add upstream https://github.com/GUDHI/gudhi-devel.git
diff --git a/.github/next_release.md b/.github/next_release.md
index 190f8408..26143b0e 100644
--- a/.github/next_release.md
+++ b/.github/next_release.md
@@ -1,19 +1,19 @@
-We are pleased to announce the release 3.4.0 of the GUDHI library.
+We are pleased to announce the release 3.5.0 of the GUDHI library.
-As a major new feature, the GUDHI library now offers dD weighted alpha complex, ...
+As a major new feature, the GUDHI library now offers ...
-We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.4.0.tar.gz).
+We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz).
-Below is a list of changes made since GUDHI 3.3.0:
+Below is a list of changes made since GUDHI 3.4.0:
-- [Alpha complex](https://gudhi.inria.fr/doc/latest/group__alpha__complex.html)
- - the C++ weighted version for alpha complex is now available in dimension D.
+- [Module](link)
+ - ...
- [Module](link)
- ...
- Miscellaneous
- - The [list of bugs that were solved since GUDHI-3.3.0](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.4.0+is%3Aclosed) is available on GitHub.
+ - The [list of bugs that were solved since GUDHI-3.4.0](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.5.0+is%3Aclosed) is available on GitHub.
All modules are distributed under the terms of the MIT license.
However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details.
diff --git a/.github/test-requirements.txt b/.github/test-requirements.txt
index 688a2a11..d0803574 100644
--- a/.github/test-requirements.txt
+++ b/.github/test-requirements.txt
@@ -1,7 +1,7 @@
pytest
pytest-cov
sphinx
-sphinxcontrib-bibtex
+sphinxcontrib-bibtex==1.0.0
sphinx-paramlinks
matplotlib
scipy
diff --git a/.github/workflows/pip-build-linux.yml b/.github/workflows/pip-build-linux.yml
index 14fdc8d2..726ed0d5 100644
--- a/.github/workflows/pip-build-linux.yml
+++ b/.github/workflows/pip-build-linux.yml
@@ -6,16 +6,17 @@ jobs:
build:
name: build pip wheels
runs-on: ubuntu-latest
+ # cf. https://github.com/GUDHI/gudhi-deploy/blob/main/Dockerfile_for_pip
container: gudhi/pip_for_gudhi
steps:
- uses: actions/checkout@v1
with:
submodules: true
- - name: Build wheels for Python 3.8
+ - name: Build wheels for Python 3.9
run: |
- mkdir build_38
- cd build_38
- cmake -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=$PYTHON38/bin/python ..
+ mkdir build_39
+ cd build_39
+ cmake -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=$PYTHON39/bin/python ..
cd src/python
- $PYTHON38/bin/python setup.py bdist_wheel
+ $PYTHON39/bin/python setup.py bdist_wheel
auditwheel repair dist/*.whl \ No newline at end of file
diff --git a/.github/workflows/pip-build-osx.yml b/.github/workflows/pip-build-osx.yml
index 15b8880a..732e26af 100644
--- a/.github/workflows/pip-build-osx.yml
+++ b/.github/workflows/pip-build-osx.yml
@@ -8,7 +8,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ['3.8']
+ python-version: ['3.9']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v1
@@ -22,7 +22,7 @@ jobs:
run: |
brew update || true
brew install boost eigen gmp mpfr cgal || true
- python -m pip install --user -r .github/build-requirements.txt
+ python -m pip install --user -r ext/gudhi-deploy/build-requirements.txt
python -m pip install --user twine delocate
- name: Build python wheel
run: |
diff --git a/.github/workflows/pip-build-windows.yml b/.github/workflows/pip-build-windows.yml
index 995d52dd..d07e8c46 100644
--- a/.github/workflows/pip-build-windows.yml
+++ b/.github/workflows/pip-build-windows.yml
@@ -8,7 +8,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ['3.8']
+ python-version: ['3.9']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v1
@@ -24,7 +24,7 @@ jobs:
vcpkg upgrade --no-dry-run
type c:/vcpkg/ports/cgal/portfile.cmake
vcpkg install eigen3 cgal --triplet x64-windows
- python -m pip install --user -r .github/build-requirements.txt
+ python -m pip install --user -r ext/gudhi-deploy/build-requirements.txt
python -m pip list
- name: Build python wheel
run: |
diff --git a/.github/workflows/pip-packaging-linux.yml b/.github/workflows/pip-packaging-linux.yml
index bd524af9..95e8f034 100644
--- a/.github/workflows/pip-packaging-linux.yml
+++ b/.github/workflows/pip-packaging-linux.yml
@@ -8,6 +8,7 @@ jobs:
build:
name: build pip wheels
runs-on: ubuntu-latest
+ # cf. https://github.com/GUDHI/gudhi-deploy/blob/main/Dockerfile_for_pip
container: gudhi/pip_for_gudhi
steps:
- uses: actions/checkout@v1
@@ -45,12 +46,21 @@ jobs:
cd src/python
$PYTHON38/bin/python setup.py bdist_wheel
auditwheel repair dist/*.whl
+ - name: Build wheels for Python 3.9
+ run: |
+ mkdir build_39
+ cd build_39
+ cmake -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=$PYTHON39/bin/python ..
+ cd src/python
+ $PYTHON39/bin/python setup.py bdist_wheel
+ auditwheel repair dist/*.whl
- name: Publish on PyPi
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
run: |
- $PYTHON38/bin/python -m twine upload build_35/src/python/wheelhouse/*
- $PYTHON38/bin/python -m twine upload build_36/src/python/wheelhouse/*
- $PYTHON38/bin/python -m twine upload build_37/src/python/wheelhouse/*
- $PYTHON38/bin/python -m twine upload build_38/src/python/wheelhouse/* \ No newline at end of file
+ $PYTHON39/bin/python -m twine upload build_35/src/python/wheelhouse/*
+ $PYTHON39/bin/python -m twine upload build_36/src/python/wheelhouse/*
+ $PYTHON39/bin/python -m twine upload build_37/src/python/wheelhouse/*
+ $PYTHON39/bin/python -m twine upload build_38/src/python/wheelhouse/*
+ $PYTHON39/bin/python -m twine upload build_39/src/python/wheelhouse/* \ No newline at end of file
diff --git a/.github/workflows/pip-packaging-osx.yml b/.github/workflows/pip-packaging-osx.yml
index c94369ac..56309b88 100644
--- a/.github/workflows/pip-packaging-osx.yml
+++ b/.github/workflows/pip-packaging-osx.yml
@@ -10,7 +10,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ['3.5', '3.6', '3.7', '3.8']
+ python-version: ['3.5', '3.6', '3.7', '3.8', '3.9']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v1
@@ -24,7 +24,7 @@ jobs:
run: |
brew update || true
brew install boost eigen gmp mpfr cgal || true
- python -m pip install --user -r .github/build-requirements.txt
+ python -m pip install --user -r ext/gudhi-deploy/build-requirements.txt
python -m pip install --user twine delocate
- name: Build python wheel
run: |
diff --git a/.github/workflows/pip-packaging-windows.yml b/.github/workflows/pip-packaging-windows.yml
index 8f4ab6e7..a428eaba 100644
--- a/.github/workflows/pip-packaging-windows.yml
+++ b/.github/workflows/pip-packaging-windows.yml
@@ -10,7 +10,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ['3.5', '3.6', '3.7', '3.8']
+ python-version: ['3.5', '3.6', '3.7', '3.8', '3.9']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v1
@@ -26,7 +26,7 @@ jobs:
vcpkg upgrade --no-dry-run
type c:/vcpkg/ports/cgal/portfile.cmake
vcpkg install eigen3 cgal --triplet x64-windows
- python -m pip install --user -r .github/build-requirements.txt
+ python -m pip install --user -r ext/gudhi-deploy/build-requirements.txt
python -m pip install --user twine
python -m pip list
- name: Build python wheel
diff --git a/.gitmodules b/.gitmodules
index f70c570d..2aa8ad96 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +1,6 @@
[submodule "ext/hera"]
path = ext/hera
url = https://github.com/grey-narn/hera.git
+[submodule "ext/gudhi-deploy"]
+ path = ext/gudhi-deploy
+ url = https://github.com/GUDHI/gudhi-deploy
diff --git a/CMakeGUDHIVersion.txt b/CMakeGUDHIVersion.txt
index 5f1eaacf..db9b243b 100644
--- a/CMakeGUDHIVersion.txt
+++ b/CMakeGUDHIVersion.txt
@@ -1,8 +1,8 @@
# Must be conform to pep440 - https://www.python.org/dev/peps/pep-0440/#pre-releases
set (GUDHI_MAJOR_VERSION 3)
-set (GUDHI_MINOR_VERSION 4)
+set (GUDHI_MINOR_VERSION 5)
# GUDHI_PATCH_VERSION can be 'ZaN' for Alpha release, 'ZbN' for Beta release, 'ZrcN' for release candidate or 'Z' for a final release.
-set (GUDHI_PATCH_VERSION 0a0)
+set (GUDHI_PATCH_VERSION 0rc1)
set(GUDHI_VERSION ${GUDHI_MAJOR_VERSION}.${GUDHI_MINOR_VERSION}.${GUDHI_PATCH_VERSION})
message(STATUS "GUDHI version : ${GUDHI_VERSION}")
diff --git a/Dockerfile_for_circleci_image b/Dockerfile_for_circleci_image
deleted file mode 100644
index ec1b8ff8..00000000
--- a/Dockerfile_for_circleci_image
+++ /dev/null
@@ -1,69 +0,0 @@
-FROM ubuntu:20.04
-
-# Update and upgrade distribution
-RUN apt-get update && \
- apt-get upgrade -y
-
-# Tools necessary for installing and configuring Ubuntu
-RUN apt-get install -y \
- apt-utils \
- locales \
- tzdata
-
-# Timezone
-RUN echo "Europe/Paris" | tee /etc/timezone && \
- ln -fs /usr/share/zoneinfo/Europe/Paris /etc/localtime && \
- dpkg-reconfigure -f noninteractive tzdata
-
-# Locale with UTF-8 support
-RUN echo en_US.UTF-8 UTF-8 >> /etc/locale.gen && \
- locale-gen && \
- update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
-ENV LANG en_US.UTF-8
-ENV LANGUAGE en_US:en
-ENV LC_ALL en_US.UTF-8
-
-# Update again
-RUN apt-get update
-
-# Required for Gudhi compilation
-RUN apt-get install -y make \
- git \
- g++ \
- cmake \
- graphviz \
- perl \
- texlive-full \
- biber \
- doxygen \
- libboost-all-dev \
- libeigen3-dev \
- libgmp3-dev \
- libmpfr-dev \
- libtbb-dev \
- locales \
- python3 \
- python3-pip \
- python3-tk \
- python3-grpcio \
- libfreetype6-dev \
- pkg-config \
- curl
-
-RUN curl -LO "https://github.com/CGAL/cgal/releases/download/v5.1/CGAL-5.1.tar.xz" \
- && tar xf CGAL-5.1.tar.xz \
- && mkdir build \
- && cd build \
- && cmake -DCMAKE_BUILD_TYPE=Release ../CGAL-5.1/ \
- && make install \
- && cd .. \
- && rm -rf build CGAL-5.1
-
-ADD .github/build-requirements.txt /
-ADD .github/test-requirements.txt /
-
-RUN pip3 install -r build-requirements.txt
-RUN pip3 --no-cache-dir install -r test-requirements.txt
-
-# apt clean up
-RUN apt autoremove && rm -rf /var/lib/apt/lists/*
diff --git a/Dockerfile_for_pip b/Dockerfile_for_pip
deleted file mode 100644
index 98668a04..00000000
--- a/Dockerfile_for_pip
+++ /dev/null
@@ -1,50 +0,0 @@
-FROM quay.io/pypa/manylinux2014_x86_64
-
-RUN yum -y update && yum -y install \
- wget \
- zlib-devel \
- eigen3-devel \
- mpfr-devel \
- gmp-devel \
- devtoolset-8 \
- && yum clean all
-
-RUN mkdir -p /opt/cmake \
- && wget https://github.com/Kitware/CMake/releases/download/v3.16.2/cmake-3.16.2-Linux-x86_64.sh \
- && sh cmake-3.16.2-Linux-x86_64.sh --skip-license --prefix=/opt/cmake \
- && rm -f cmake-3.16.2-Linux-x86_64.sh
-
-# yum install boost-devel installs boost 1.53 and copy is the only way to install headers only boost
-RUN wget https://dl.bintray.com/boostorg/release/1.73.0/source/boost_1_73_0.tar.gz \
- && tar xf boost_1_73_0.tar.gz \
- && cd boost_1_73_0 \
- && ./bootstrap.sh \
- && ls \
- && cp -r boost /usr/local/include/ \
- && cd .. \
- && rm -rf boost
-
-RUN wget https://github.com/CGAL/cgal/releases/download/v5.1/CGAL-5.1.tar.xz \
- && tar xf CGAL-5.1.tar.xz \
- && mkdir build \
- && cd build \
- && /opt/cmake/bin/cmake -DCMAKE_BUILD_TYPE=Release ../CGAL-5.1/ \
- && make install \
- && cd .. \
- && rm -rf build CGAL-5.1
-
-ADD .github/build-requirements.txt /
-
-RUN /opt/python/cp35-cp35m/bin/pip install -r build-requirements.txt \
- && /opt/python/cp36-cp36m/bin/pip install -r build-requirements.txt\
- && /opt/python/cp37-cp37m/bin/pip install -r build-requirements.txt\
- && /opt/python/cp38-cp38/bin/pip install -r build-requirements.txt\
- && /opt/python/cp38-cp38/bin/pip install twine
-
-ENV PYTHON35="/opt/python/cp35-cp35m/"
-ENV PYTHON36="/opt/python/cp36-cp36m/"
-ENV PYTHON37="/opt/python/cp37-cp37m/"
-ENV PYTHON38="/opt/python/cp38-cp38/"
-
-ENV PATH="/opt/cmake/bin:${PATH}"
-ENV PATH="/opt/rh/devtoolset-8/root/usr/bin:${PATH}"
diff --git a/Dockerfile_gudhi_installation b/Dockerfile_gudhi_installation
deleted file mode 100644
index ebd21f8d..00000000
--- a/Dockerfile_gudhi_installation
+++ /dev/null
@@ -1,80 +0,0 @@
-FROM ubuntu:20.04
-
-# Update and upgrade distribution
-RUN apt-get update && \
- apt-get upgrade -y
-
-# Tools necessary for installing and configuring Ubuntu
-RUN apt-get install -y \
- apt-utils \
- locales \
- tzdata
-
-# Timezone
-RUN echo "Europe/Paris" | tee /etc/timezone && \
- ln -fs /usr/share/zoneinfo/Europe/Paris /etc/localtime && \
- dpkg-reconfigure -f noninteractive tzdata
-
-# Locale with UTF-8 support
-RUN echo en_US.UTF-8 UTF-8 >> /etc/locale.gen && \
- locale-gen && \
- update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
-ENV LANG en_US.UTF-8
-ENV LANGUAGE en_US:en
-ENV LC_ALL en_US.UTF-8
-
-# Update again
-RUN apt-get update
-
-# Required for Gudhi compilation
-RUN apt-get install -y make \
- g++ \
- cmake \
- graphviz \
- perl \
- texlive-bibtex-extra \
- biber \
- libboost-all-dev \
- libeigen3-dev \
- libgmp3-dev \
- libmpfr-dev \
- libtbb-dev \
- libcgal-dev \
- locales \
- python3 \
- python3-pip \
- python3-pytest \
- python3-tk \
- python3-pybind11 \
- libfreetype6-dev \
- pkg-config \
- curl
-
-RUN curl -LO "https://github.com/CGAL/cgal/releases/download/v5.1/CGAL-5.1.tar.xz" \
- && tar xf CGAL-5.1.tar.xz \
- && mkdir build \
- && cd build \
- && cmake -DCMAKE_BUILD_TYPE=Release ../CGAL-5.1/ \
- && make install \
- && cd .. \
- && rm -rf build CGAL-5.1
-
-RUN pip3 install \
- numpy \
- matplotlib \
- scipy \
- Cython \
- POT \
- scikit-learn
-
-# apt clean up
-RUN apt autoremove && rm -rf /var/lib/apt/lists/*
-
-RUN curl -LO "https://github.com/GUDHI/gudhi-devel/releases/download/tags%2Fgudhi-release-3.3.0/gudhi.3.3.0.tar.gz" \
-&& tar xf gudhi.3.3.0.tar.gz \
-&& cd gudhi.3.3.0 \
-&& mkdir build && cd build && cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_PYTHON=OFF -DPython_ADDITIONAL_VERSIONS=3 .. \
-&& make all test install \
-&& cmake -DWITH_GUDHI_PYTHON=ON . \
-&& cd python \
-&& python3 setup.py install
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 8e88cab5..6c194f2a 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -19,15 +19,15 @@ jobs:
- bash: |
source activate gudhi_build_env
+ git submodule update --init
sudo conda install --yes --quiet --name gudhi_build_env python=$(pythonVersion)
- python -m pip install --user -r .github/build-requirements.txt
- python -m pip install --user -r .github/test-requirements.txt
+ python -m pip install --user -r ext/gudhi-deploy/build-requirements.txt
+ python -m pip install --user -r ext/gudhi-deploy/test-requirements.txt
brew update || true
brew install graphviz doxygen boost eigen gmp mpfr tbb cgal || true
displayName: 'Install build dependencies'
- bash: |
source activate gudhi_build_env
- git submodule update --init
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..
diff --git a/ext/gudhi-deploy b/ext/gudhi-deploy
new file mode 160000
+Subproject f71c3749c4d57afcb9747fe99a8cfbb8aa960b6
diff --git a/src/Alpha_complex/test/CMakeLists.txt b/src/Alpha_complex/test/CMakeLists.txt
index db5d840f..0595ca92 100644
--- a/src/Alpha_complex/test/CMakeLists.txt
+++ b/src/Alpha_complex/test/CMakeLists.txt
@@ -59,4 +59,18 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
endif()
gudhi_add_boost_test(Weighted_alpha_complex_test_unit)
+ add_executable ( Weighted_alpha_complex_non_visible_points_test_unit Weighted_alpha_complex_non_visible_points_unit_test.cpp )
+ target_link_libraries(Weighted_alpha_complex_non_visible_points_test_unit ${CGAL_LIBRARY})
+ if (TBB_FOUND)
+ target_link_libraries(Weighted_alpha_complex_non_visible_points_test_unit ${TBB_LIBRARIES})
+ endif()
+ gudhi_add_boost_test(Weighted_alpha_complex_non_visible_points_test_unit)
+
+ add_executable ( Zero_weighted_alpha_complex_test_unit Zero_weighted_alpha_complex_unit_test.cpp )
+ target_link_libraries(Zero_weighted_alpha_complex_test_unit ${CGAL_LIBRARY})
+ if (TBB_FOUND)
+ target_link_libraries(Zero_weighted_alpha_complex_test_unit ${TBB_LIBRARIES})
+ endif()
+ gudhi_add_boost_test(Zero_weighted_alpha_complex_test_unit)
+
endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0) \ No newline at end of file
diff --git a/src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp b/src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp
new file mode 100644
index 00000000..dd83c1da
--- /dev/null
+++ b/src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp
@@ -0,0 +1,60 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "weighted_alpha_complex_non_visible_points"
+#include <boost/test/unit_test.hpp>
+#include <boost/mpl/list.hpp>
+
+#include <CGAL/Epick_d.h>
+#include <CGAL/Epeck_d.h>
+
+#include <vector>
+
+#include <gudhi/Alpha_complex.h>
+#include <gudhi/Simplex_tree.h>
+
+
+using list_of_1d_kernel_variants = boost::mpl::list<CGAL::Epeck_d< CGAL::Dynamic_dimension_tag >,
+ CGAL::Epeck_d< CGAL::Dimension_tag<1>>,
+ CGAL::Epick_d< CGAL::Dynamic_dimension_tag >,
+ CGAL::Epick_d< CGAL::Dimension_tag<1>>
+ >;
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(Weighted_alpha_complex_non_visible_points, Kernel, list_of_1d_kernel_variants) {
+ // check that for 2 closed weighted 1-d points, one with a high weight to hide the second one with a small weight,
+ // that the point with a small weight has the same high filtration value than the edge formed by the 2 points
+ using Point_d = typename Kernel::Point_d;
+ std::vector<Point_d> points;
+ std::vector<double> p1 {0.};
+ points.emplace_back(p1.begin(), p1.end());
+ // closed enough points
+ std::vector<double> p2 {0.1};
+ points.emplace_back(p2.begin(), p2.end());
+ std::vector<typename Kernel::FT> weights {100., 0.01};
+
+ Gudhi::alpha_complex::Alpha_complex<Kernel, true> alpha_complex(points, weights);
+ Gudhi::Simplex_tree<> stree;
+ BOOST_CHECK(alpha_complex.create_complex(stree));
+
+ std::clog << "Iterator on weighted alpha complex simplices in the filtration order, with [filtration value]:"
+ << std::endl;
+ for (auto f_simplex : stree.filtration_simplex_range()) {
+ std::clog << " ( ";
+ for (auto vertex : stree.simplex_vertex_range(f_simplex)) {
+ std::clog << vertex << " ";
+ }
+ std::clog << ") -> " << "[" << stree.filtration(f_simplex) << "] " << std::endl;
+ }
+
+ BOOST_CHECK(stree.filtration(stree.find({0})) == -100.);
+ BOOST_CHECK(stree.filtration(stree.find({1})) == stree.filtration(stree.find({0, 1})));
+ BOOST_CHECK(stree.filtration(stree.find({1})) > 100000);
+} \ No newline at end of file
diff --git a/src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp b/src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp
index d267276c..875704ee 100644
--- a/src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp
+++ b/src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp
@@ -13,10 +13,8 @@
#include <boost/test/unit_test.hpp>
#include <boost/mpl/list.hpp>
-#include <CGAL/Epick_d.h>
#include <CGAL/Epeck_d.h>
-#include <cmath> // float comparison
#include <vector>
#include <random>
#include <array>
@@ -25,69 +23,6 @@
#include <gudhi/Alpha_complex.h>
#include <gudhi/Alpha_complex_3d.h>
#include <gudhi/Simplex_tree.h>
-#include <gudhi/Unitary_tests_utils.h>
-
-using list_of_exact_kernel_variants = boost::mpl::list<CGAL::Epeck_d< CGAL::Dynamic_dimension_tag >,
- CGAL::Epeck_d< CGAL::Dimension_tag<4> >
- > ;
-
-BOOST_AUTO_TEST_CASE_TEMPLATE(Zero_weighted_alpha_complex, Kernel, list_of_exact_kernel_variants) {
- // Check that in exact mode for static dimension 4 the code for dD unweighted and for dD weighted with all weights
- // 0 give exactly the same simplex tree (simplices and filtration values).
-
- // Random points construction
- using Point_d = typename Kernel::Point_d;
- std::vector<Point_d> points;
- std::uniform_real_distribution<double> rd_pts(-10., 10.);
- std::random_device rand_dev;
- std::mt19937 rand_engine(rand_dev());
- for (int idx = 0; idx < 20; idx++) {
- std::vector<double> point {rd_pts(rand_engine), rd_pts(rand_engine), rd_pts(rand_engine), rd_pts(rand_engine)};
- points.emplace_back(point.begin(), point.end());
- }
-
- // Alpha complex from points
- Gudhi::alpha_complex::Alpha_complex<Kernel, false> alpha_complex_from_points(points);
- Gudhi::Simplex_tree<> simplex;
- Gudhi::Simplex_tree<>::Filtration_value infty = std::numeric_limits<Gudhi::Simplex_tree<>::Filtration_value>::infinity();
- BOOST_CHECK(alpha_complex_from_points.create_complex(simplex, infty, true));
- std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:"
- << std::endl;
- for (auto f_simplex : simplex.filtration_simplex_range()) {
- std::clog << " ( ";
- for (auto vertex : simplex.simplex_vertex_range(f_simplex)) {
- std::clog << vertex << " ";
- }
- std::clog << ") -> " << "[" << simplex.filtration(f_simplex) << "] " << std::endl;
- }
-
- // Alpha complex from zero weighted points
- std::vector<typename Kernel::FT> weights(20, 0.);
- Gudhi::alpha_complex::Alpha_complex<Kernel, true> alpha_complex_from_zero_weighted_points(points, weights);
- Gudhi::Simplex_tree<> zw_simplex;
- BOOST_CHECK(alpha_complex_from_zero_weighted_points.create_complex(zw_simplex, infty, true));
-
- std::clog << "Iterator on zero weighted alpha complex simplices in the filtration order, with [filtration value]:"
- << std::endl;
- for (auto f_simplex : zw_simplex.filtration_simplex_range()) {
- std::clog << " ( ";
- for (auto vertex : zw_simplex.simplex_vertex_range(f_simplex)) {
- std::clog << vertex << " ";
- }
- std::clog << ") -> " << "[" << zw_simplex.filtration(f_simplex) << "] " << std::endl;
- }
-
- BOOST_CHECK(zw_simplex == simplex);
-}
-
-template <typename Point_d>
-bool cgal_3d_point_sort (Point_d a,Point_d b) {
- if (a[0] != b[0])
- return a[0] < b[0];
- if (a[1] != b[1])
- return a[1] < b[1];
- return a[2] < b[2];
-}
BOOST_AUTO_TEST_CASE(Weighted_alpha_complex_3d_comparison) {
// check that for random weighted 3d points in safe mode the 3D and dD codes give the same result with some tolerance
@@ -189,41 +124,4 @@ BOOST_AUTO_TEST_CASE(Weighted_alpha_complex_3d_comparison) {
}
++dD_itr;
}
-}
-
-using list_of_1d_kernel_variants = boost::mpl::list<CGAL::Epeck_d< CGAL::Dynamic_dimension_tag >,
- CGAL::Epeck_d< CGAL::Dimension_tag<1>>,
- CGAL::Epick_d< CGAL::Dynamic_dimension_tag >,
- CGAL::Epick_d< CGAL::Dimension_tag<1>>
- >;
-
-BOOST_AUTO_TEST_CASE_TEMPLATE(Weighted_alpha_complex_non_visible_points, Kernel, list_of_1d_kernel_variants) {
- // check that for 2 closed weighted 1-d points, one with a high weight to hide the second one with a small weight,
- // that the point with a small weight has the same high filtration value than the edge formed by the 2 points
- using Point_d = typename Kernel::Point_d;
- std::vector<Point_d> points;
- std::vector<double> p1 {0.};
- points.emplace_back(p1.begin(), p1.end());
- // closed enough points
- std::vector<double> p2 {0.1};
- points.emplace_back(p2.begin(), p2.end());
- std::vector<typename Kernel::FT> weights {100., 0.01};
-
- Gudhi::alpha_complex::Alpha_complex<Kernel, true> alpha_complex(points, weights);
- Gudhi::Simplex_tree<> stree;
- BOOST_CHECK(alpha_complex.create_complex(stree));
-
- std::clog << "Iterator on weighted alpha complex simplices in the filtration order, with [filtration value]:"
- << std::endl;
- for (auto f_simplex : stree.filtration_simplex_range()) {
- std::clog << " ( ";
- for (auto vertex : stree.simplex_vertex_range(f_simplex)) {
- std::clog << vertex << " ";
- }
- std::clog << ") -> " << "[" << stree.filtration(f_simplex) << "] " << std::endl;
- }
-
- BOOST_CHECK(stree.filtration(stree.find({0})) == -100.);
- BOOST_CHECK(stree.filtration(stree.find({1})) == stree.filtration(stree.find({0, 1})));
- BOOST_CHECK(stree.filtration(stree.find({1})) > 100000);
} \ No newline at end of file
diff --git a/src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp b/src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp
new file mode 100644
index 00000000..b7df07c7
--- /dev/null
+++ b/src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp
@@ -0,0 +1,77 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "zero_weighted_alpha_complex"
+#include <boost/test/unit_test.hpp>
+#include <boost/mpl/list.hpp>
+
+#include <CGAL/Epeck_d.h>
+
+#include <vector>
+#include <random>
+#include <cmath> // for std::fabs
+
+#include <gudhi/Alpha_complex.h>
+#include <gudhi/Simplex_tree.h>
+#include <gudhi/Unitary_tests_utils.h>
+
+using list_of_exact_kernel_variants = boost::mpl::list<CGAL::Epeck_d< CGAL::Dynamic_dimension_tag >,
+ CGAL::Epeck_d< CGAL::Dimension_tag<4> >
+ > ;
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(Zero_weighted_alpha_complex, Kernel, list_of_exact_kernel_variants) {
+ // Check that in exact mode for static dimension 4 the code for dD unweighted and for dD weighted with all weights
+ // 0 give exactly the same simplex tree (simplices and filtration values).
+
+ // Random points construction
+ using Point_d = typename Kernel::Point_d;
+ std::vector<Point_d> points;
+ std::uniform_real_distribution<double> rd_pts(-10., 10.);
+ std::random_device rand_dev;
+ std::mt19937 rand_engine(rand_dev());
+ for (int idx = 0; idx < 20; idx++) {
+ std::vector<double> point {rd_pts(rand_engine), rd_pts(rand_engine), rd_pts(rand_engine), rd_pts(rand_engine)};
+ points.emplace_back(point.begin(), point.end());
+ }
+
+ // Alpha complex from points
+ Gudhi::alpha_complex::Alpha_complex<Kernel, false> alpha_complex_from_points(points);
+ Gudhi::Simplex_tree<> simplex;
+ Gudhi::Simplex_tree<>::Filtration_value infty = std::numeric_limits<Gudhi::Simplex_tree<>::Filtration_value>::infinity();
+ BOOST_CHECK(alpha_complex_from_points.create_complex(simplex, infty, true));
+ std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:"
+ << std::endl;
+ for (auto f_simplex : simplex.filtration_simplex_range()) {
+ std::clog << " ( ";
+ for (auto vertex : simplex.simplex_vertex_range(f_simplex)) {
+ std::clog << vertex << " ";
+ }
+ std::clog << ") -> " << "[" << simplex.filtration(f_simplex) << "] " << std::endl;
+ }
+
+ // Alpha complex from zero weighted points
+ std::vector<typename Kernel::FT> weights(20, 0.);
+ Gudhi::alpha_complex::Alpha_complex<Kernel, true> alpha_complex_from_zero_weighted_points(points, weights);
+ Gudhi::Simplex_tree<> zw_simplex;
+ BOOST_CHECK(alpha_complex_from_zero_weighted_points.create_complex(zw_simplex, infty, true));
+
+ std::clog << "Iterator on zero weighted alpha complex simplices in the filtration order, with [filtration value]:"
+ << std::endl;
+ for (auto f_simplex : zw_simplex.filtration_simplex_range()) {
+ std::clog << " ( ";
+ for (auto vertex : zw_simplex.simplex_vertex_range(f_simplex)) {
+ std::clog << vertex << " ";
+ }
+ std::clog << ") -> " << "[" << zw_simplex.filtration(f_simplex) << "] " << std::endl;
+ }
+
+ BOOST_CHECK(zw_simplex == simplex);
+} \ No newline at end of file
diff --git a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h
index 1b250818..a5501004 100644
--- a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h
+++ b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h
@@ -67,8 +67,7 @@ class Sparse_rips_complex {
: epsilon_(epsilon) {
GUDHI_CHECK(epsilon > 0, "epsilon must be positive");
auto dist_fun = [&](Vertex_handle i, Vertex_handle j) { return distance(points[i], points[j]); };
- Ker<decltype(dist_fun)> kernel(dist_fun);
- subsampling::choose_n_farthest_points(kernel, boost::irange<Vertex_handle>(0, boost::size(points)), -1, -1,
+ subsampling::choose_n_farthest_points(dist_fun, boost::irange<Vertex_handle>(0, boost::size(points)), -1, -1,
std::back_inserter(sorted_points), std::back_inserter(params));
compute_sparse_graph(dist_fun, epsilon, mini, maxi);
}
@@ -128,17 +127,6 @@ class Sparse_rips_complex {
}
private:
- // choose_n_farthest_points wants the distance function in this form...
- template <class Distance>
- struct Ker {
- typedef std::size_t Point_d; // index into point range
- Ker(Distance& d) : dist(d) {}
- // Despite the name, this is not squared...
- typedef Distance Squared_distance_d;
- Squared_distance_d& squared_distance_d_object() const { return dist; }
- Distance& dist;
- };
-
// PointRange must be random access.
template <typename Distance>
void compute_sparse_graph(Distance& dist, double epsilon, Filtration_value mini, Filtration_value maxi) {
diff --git a/src/Spatial_searching/include/gudhi/Kd_tree_search.h b/src/Spatial_searching/include/gudhi/Kd_tree_search.h
index 87969dd9..a50a8537 100644
--- a/src/Spatial_searching/include/gudhi/Kd_tree_search.h
+++ b/src/Spatial_searching/include/gudhi/Kd_tree_search.h
@@ -12,11 +12,12 @@
#ifndef KD_TREE_SEARCH_H_
#define KD_TREE_SEARCH_H_
+#include <gudhi/Debug_utils.h>
+
#include <CGAL/Orthogonal_k_neighbor_search.h>
#include <CGAL/Orthogonal_incremental_neighbor_search.h>
#include <CGAL/Search_traits.h>
#include <CGAL/Search_traits_adapter.h>
-#include <CGAL/Fuzzy_sphere.h>
#include <CGAL/property_map.h>
#include <CGAL/version.h> // for CGAL_VERSION_NR
@@ -40,7 +41,6 @@
namespace Gudhi {
namespace spatial_searching {
-
/**
* \class Kd_tree_search Kd_tree_search.h gudhi/Kd_tree_search.h
* \brief Spatial tree data structure to perform (approximate) nearest and furthest neighbor search.
@@ -83,7 +83,8 @@ class Kd_tree_search {
typedef CGAL::Search_traits<
FT, Point,
typename Traits::Cartesian_const_iterator_d,
- typename Traits::Construct_cartesian_const_iterator_d> Traits_base;
+ typename Traits::Construct_cartesian_const_iterator_d,
+ typename Traits::Dimension> Traits_base;
typedef CGAL::Search_traits_adapter<
std::ptrdiff_t,
@@ -110,7 +111,76 @@ class Kd_tree_search {
/// of a point P and `second` is the squared distance between P and the query point.
typedef Incremental_neighbor_search INS_range;
- typedef CGAL::Fuzzy_sphere<STraits> Fuzzy_sphere;
+ // Because CGAL::Fuzzy_sphere takes the radius and not its square
+ struct Sphere_for_kdtree_search
+ {
+ typedef typename Traits::Point_d Point_d;
+ typedef typename Traits::FT FT;
+ typedef typename Traits::Dimension D;
+ typedef D Dimension;
+
+ private:
+ STraits traits;
+ Point_d c;
+ FT sqradmin, sqradmax;
+ bool use_max;
+
+ public:
+ // `prefer_max` means that we prefer outputting more points at squared distance between r2min and r2max,
+ // while `!prefer_max` means we prefer fewer.
+ Sphere_for_kdtree_search(Point_d const& c_, FT const& r2min, FT const& r2max, bool prefer_max=true, STraits const& traits_ = {})
+ : traits(traits_), c(c_), sqradmin(r2min), sqradmax(r2max), use_max(prefer_max)
+ { GUDHI_CHECK(r2min >= 0 && r2max >= r2min, "0 <= r2min <= r2max"); }
+
+ bool contains(std::ptrdiff_t i) const {
+ const Point_d& p = get(traits.point_property_map(), i);
+ auto ccci = traits.construct_cartesian_const_iterator_d_object();
+ return contains_point_given_as_coordinates(ccci(p), ccci(p, 0));
+ }
+
+ template <typename Coord_iterator>
+ bool contains_point_given_as_coordinates(Coord_iterator pi, Coord_iterator CGAL_UNUSED) const {
+ FT distance = 0;
+ auto ccci = traits.construct_cartesian_const_iterator_d_object();
+ auto ci = ccci(c);
+ auto ce = ccci(c, 0);
+ FT const& limit = use_max ? sqradmax : sqradmin;
+ while (ci != ce) {
+ distance += CGAL::square(*pi++ - *ci++);
+ // I think Clément advised to check the distance at every step instead of
+ // just at the end, especially when the dimension becomes large. Distance
+ // isn't part of the concept anyway.
+ if (distance > limit) return false;
+ }
+ return true;
+ }
+
+ bool inner_range_intersects(CGAL::Kd_tree_rectangle<FT, D> const& rect) const {
+ auto ccci = traits.construct_cartesian_const_iterator_d_object();
+ FT distance = 0;
+ auto ci = ccci(c);
+ auto ce = ccci(c, 0);
+ for (int i = 0; ci != ce; ++i, ++ci) {
+ distance += CGAL::square(CGAL::max<FT>(CGAL::max<FT>(*ci - rect.max_coord(i), rect.min_coord(i) - *ci), 0 ));
+ if (distance > sqradmin) return false;
+ }
+ return true;
+ }
+
+
+ bool outer_range_contains(CGAL::Kd_tree_rectangle<FT, D> const& rect) const {
+ auto ccci = traits.construct_cartesian_const_iterator_d_object();
+ FT distance = 0;
+ auto ci = ccci(c);
+ auto ce = ccci(c, 0);
+ for (int i = 0; ci != ce; ++i, ++ci) {
+ distance += CGAL::square(CGAL::max<FT>(*ci - rect.min_coord(i), rect.max_coord(i) - *ci));
+ if (distance > sqradmax) return false;
+ }
+ return true;
+ }
+ };
+
/// \brief Constructor
/// @param[in] points Const reference to the point range. This range
/// is not copied, so it should not be destroyed or modified afterwards.
@@ -266,10 +336,26 @@ class Kd_tree_search {
/// @param[in] eps Approximation factor.
template <typename OutputIterator>
void all_near_neighbors(Point const& p,
- FT radius,
+ FT const& radius,
OutputIterator it,
FT eps = FT(0)) const {
- m_tree.search(it, Fuzzy_sphere(p, radius, eps, m_tree.traits()));
+ all_near_neighbors2(p, CGAL::square(radius - eps), CGAL::square(radius + eps), it);
+ }
+
+ /// \brief Search for all the neighbors in a ball. This is similar to `all_near_neighbors` but takes directly
+ /// the square of the minimum distance below which points must be considered neighbors and square of the
+ /// maximum distance above which they cannot be.
+ /// @param[in] p The query point.
+ /// @param[in] sq_radius_min The square of the minimum search radius
+ /// @param[in] sq_radius_max The square of the maximum search radius
+ /// @param[out] it The points that lie inside the sphere of center `p` and squared radius `sq_radius`.
+ /// Note: `it` is used this way: `*it++ = each_point`.
+ template <typename OutputIterator>
+ void all_near_neighbors2(Point const& p,
+ FT const& sq_radius_min,
+ FT const& sq_radius_max,
+ OutputIterator it) const {
+ m_tree.search(it, Sphere_for_kdtree_search(p, sq_radius_min, sq_radius_max, true, m_tree.traits()));
}
int tree_depth() const {
diff --git a/src/Subsampling/example/CMakeLists.txt b/src/Subsampling/example/CMakeLists.txt
index dfac055c..f4a23d22 100644
--- a/src/Subsampling/example/CMakeLists.txt
+++ b/src/Subsampling/example/CMakeLists.txt
@@ -3,7 +3,6 @@ project(Subsampling_examples)
if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
add_executable(Subsampling_example_pick_n_random_points example_pick_n_random_points.cpp)
add_executable(Subsampling_example_choose_n_farthest_points example_choose_n_farthest_points.cpp)
- add_executable(Subsampling_example_custom_kernel example_custom_kernel.cpp)
add_executable(Subsampling_example_sparsify_point_set example_sparsify_point_set.cpp)
target_link_libraries(Subsampling_example_sparsify_point_set ${CGAL_LIBRARY})
@@ -13,5 +12,6 @@ if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
COMMAND $<TARGET_FILE:Subsampling_example_choose_n_farthest_points>)
add_test(NAME Subsampling_example_sparsify_point_set
COMMAND $<TARGET_FILE:Subsampling_example_sparsify_point_set>)
-
endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+
+add_executable(Subsampling_example_custom_distance example_custom_distance.cpp)
diff --git a/src/Subsampling/example/example_choose_n_farthest_points.cpp b/src/Subsampling/example/example_choose_n_farthest_points.cpp
index 27cf5d4e..e8b3ce2d 100644
--- a/src/Subsampling/example/example_choose_n_farthest_points.cpp
+++ b/src/Subsampling/example/example_choose_n_farthest_points.cpp
@@ -20,7 +20,7 @@ int main(void) {
K k;
std::vector<Point_d> results;
- Gudhi::subsampling::choose_n_farthest_points(k, points, 100,
+ Gudhi::subsampling::choose_n_farthest_points(k.squared_distance_d_object(), points, 100,
Gudhi::subsampling::random_starting_point,
std::back_inserter(results));
std::clog << "Before sparsification: " << points.size() << " points.\n";
diff --git a/src/Subsampling/example/example_custom_distance.cpp b/src/Subsampling/example/example_custom_distance.cpp
new file mode 100644
index 00000000..3325b12d
--- /dev/null
+++ b/src/Subsampling/example/example_custom_distance.cpp
@@ -0,0 +1,44 @@
+#include <gudhi/choose_n_farthest_points.h>
+
+#include <iostream>
+#include <vector>
+#include <iterator>
+
+
+typedef unsigned Point;
+
+/* The class Distance contains a distance function defined on the set of points {0, 1, 2, 3}
+ * and computes a distance according to the matrix:
+ * 0 1 2 4
+ * 1 0 4 2
+ * 2 4 0 1
+ * 4 2 1 0
+ */
+class Distance {
+ private:
+ std::vector<std::vector<double>> matrix_;
+
+ public:
+ Distance() {
+ matrix_.push_back({0, 1, 2, 4});
+ matrix_.push_back({1, 0, 4, 2});
+ matrix_.push_back({2, 4, 0, 1});
+ matrix_.push_back({4, 2, 1, 0});
+ }
+
+ double operator()(Point p1, Point p2) const {
+ return matrix_[p1][p2];
+ }
+};
+
+int main(void) {
+ std::vector<Point> points = {0, 1, 2, 3};
+ std::vector<Point> results;
+
+ Gudhi::subsampling::choose_n_farthest_points(Distance(), points, 2,
+ Gudhi::subsampling::random_starting_point,
+ std::back_inserter(results));
+ std::clog << "Before sparsification: " << points.size() << " points.\n";
+ std::clog << "After sparsification: " << results.size() << " points.\n";
+ std::clog << "Result table: {" << results[0] << "," << results[1] << "}\n";
+}
diff --git a/src/Subsampling/example/example_custom_kernel.cpp b/src/Subsampling/example/example_custom_kernel.cpp
deleted file mode 100644
index 535bf42a..00000000
--- a/src/Subsampling/example/example_custom_kernel.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-#include <gudhi/choose_n_farthest_points.h>
-
-#include <iostream>
-#include <vector>
-#include <iterator>
-
-
-/* The class Kernel contains a distance function defined on the set of points {0, 1, 2, 3}
- * and computes a distance according to the matrix:
- * 0 1 2 4
- * 1 0 4 2
- * 2 4 0 1
- * 4 2 1 0
- */
-class Kernel {
- public:
- typedef double FT;
- typedef unsigned Point_d;
-
- // Class Squared_distance_d
- class Squared_distance_d {
- private:
- std::vector<std::vector<FT>> matrix_;
-
- public:
- Squared_distance_d() {
- matrix_.push_back(std::vector<FT>({0, 1, 2, 4}));
- matrix_.push_back(std::vector<FT>({1, 0, 4, 2}));
- matrix_.push_back(std::vector<FT>({2, 4, 0, 1}));
- matrix_.push_back(std::vector<FT>({4, 2, 1, 0}));
- }
-
- FT operator()(Point_d p1, Point_d p2) {
- return matrix_[p1][p2];
- }
- };
-
- // Constructor
- Kernel() {}
-
- // Object of type Squared_distance_d
- Squared_distance_d squared_distance_d_object() const {
- return Squared_distance_d();
- }
-};
-
-int main(void) {
- typedef Kernel K;
- typedef typename K::Point_d Point_d;
-
- K k;
- std::vector<Point_d> points = {0, 1, 2, 3};
- std::vector<Point_d> results;
-
- Gudhi::subsampling::choose_n_farthest_points(k, points, 2,
- Gudhi::subsampling::random_starting_point,
- std::back_inserter(results));
- std::clog << "Before sparsification: " << points.size() << " points.\n";
- std::clog << "After sparsification: " << results.size() << " points.\n";
- std::clog << "Result table: {" << results[0] << "," << results[1] << "}\n";
-
- return 0;
-}
diff --git a/src/Subsampling/include/gudhi/choose_n_farthest_points.h b/src/Subsampling/include/gudhi/choose_n_farthest_points.h
index b70af8a0..e6347d96 100644
--- a/src/Subsampling/include/gudhi/choose_n_farthest_points.h
+++ b/src/Subsampling/include/gudhi/choose_n_farthest_points.h
@@ -38,33 +38,35 @@ enum : std::size_t {
* \ingroup subsampling
* \brief Subsample by a greedy strategy of iteratively adding the farthest point from the
* current chosen point set to the subsampling.
- * The iteration starts with the landmark `starting point` or, if `starting point==random_starting_point`, with a random landmark.
- * \tparam Kernel must provide a type Kernel::Squared_distance_d which is a model of the
- * concept <a target="_blank"
- * href="http://doc.cgal.org/latest/Kernel_d/classKernel__d_1_1Squared__distance__d.html">Kernel_d::Squared_distance_d</a> (despite the name, taken from CGAL, this can be any kind of metric or proximity measure).
- * It must also contain a public member `squared_distance_d_object()` that returns an object of this type.
- * \tparam Point_range Range whose value type is Kernel::Point_d. It must provide random-access
- * via `operator[]` and the points should be stored contiguously in memory.
- * \tparam PointOutputIterator Output iterator whose value type is Kernel::Point_d.
- * \tparam DistanceOutputIterator Output iterator for distances.
- * \details It chooses `final_size` points from a random access range
+ * \details
+ * The iteration starts with the landmark `starting point` or, if `starting point==random_starting_point`,
+ * with a random landmark.
+ * It chooses `final_size` points from a random access range
* `input_pts` (or the number of distinct points if `final_size` is larger)
* and outputs them in the output iterator `output_it`. It also
* outputs the distance from each of those points to the set of previous
* points in `dist_it`.
- * @param[in] k A kernel object.
- * @param[in] input_pts Const reference to the input points.
+ * \tparam Distance must provide an operator() that takes 2 points (value type of the range)
+ * and returns their distance (or some more general proximity measure) as a `double`.
+ * \tparam Point_range Random access range of points.
+ * \tparam PointOutputIterator Output iterator whose value type is the point type.
+ * \tparam DistanceOutputIterator Output iterator for distances.
+ * @param[in] dist A distance function.
+ * @param[in] input_pts The input points.
* @param[in] final_size The size of the subsample to compute.
* @param[in] starting_point The seed in the farthest point algorithm.
* @param[out] output_it The output iterator for points.
* @param[out] dist_it The optional output iterator for distances.
+ *
+ * \warning Older versions of this function took a CGAL kernel as argument. Users need to replace `k` with
+ * `k.squared_distance_d_object()` in the first argument of every call to `choose_n_farthest_points`.
*
*/
-template < typename Kernel,
+template < typename Distance,
typename Point_range,
typename PointOutputIterator,
typename DistanceOutputIterator = Null_output_iterator>
-void choose_n_farthest_points(Kernel const &k,
+void choose_n_farthest_points(Distance dist,
Point_range const &input_pts,
std::size_t final_size,
std::size_t starting_point,
@@ -86,9 +88,9 @@ void choose_n_farthest_points(Kernel const &k,
starting_point = dis(gen);
}
- typename Kernel::Squared_distance_d sqdist = k.squared_distance_d_object();
-
std::size_t current_number_of_landmarks = 0; // counter for landmarks
+ static_assert(std::numeric_limits<double>::has_infinity, "the number type needs to support infinity()");
+ // FIXME: don't hard-code the type as double. For Epeck_d, we also want to handle types that do not have an infinity.
const double infty = std::numeric_limits<double>::infinity(); // infinity (see next entry)
std::vector< double > dist_to_L(nb_points, infty); // vector of current distances to L from input_pts
@@ -100,7 +102,7 @@ void choose_n_farthest_points(Kernel const &k,
*dist_it++ = dist_to_L[curr_max_w];
std::size_t i = 0;
for (auto&& p : input_pts) {
- double curr_dist = sqdist(p, input_pts[curr_max_w]);
+ double curr_dist = dist(p, input_pts[curr_max_w]);
if (curr_dist < dist_to_L[i])
dist_to_L[i] = curr_dist;
++i;
diff --git a/src/Subsampling/include/gudhi/pick_n_random_points.h b/src/Subsampling/include/gudhi/pick_n_random_points.h
index a67b2b84..e4246c29 100644
--- a/src/Subsampling/include/gudhi/pick_n_random_points.h
+++ b/src/Subsampling/include/gudhi/pick_n_random_points.h
@@ -11,7 +11,9 @@
#ifndef PICK_N_RANDOM_POINTS_H_
#define PICK_N_RANDOM_POINTS_H_
-#include <gudhi/Clock.h>
+#ifdef GUDHI_SUBSAMPLING_PROFILING
+# include <gudhi/Clock.h>
+#endif
#include <boost/range/size.hpp>
@@ -44,6 +46,12 @@ void pick_n_random_points(Point_container const &points,
Gudhi::Clock t;
#endif
+ std::random_device rd;
+ std::mt19937 g(rd());
+
+#if __cplusplus >= 201703L
+ std::sample(std::begin(points), std::end(points), output_it, final_size, g);
+#else
std::size_t nbP = boost::size(points);
if (final_size > nbP)
final_size = nbP;
@@ -51,14 +59,12 @@ void pick_n_random_points(Point_container const &points,
std::vector<int> landmarks(nbP);
std::iota(landmarks.begin(), landmarks.end(), 0);
- std::random_device rd;
- std::mt19937 g(rd());
-
std::shuffle(landmarks.begin(), landmarks.end(), g);
landmarks.resize(final_size);
for (int l : landmarks)
*output_it++ = points[l];
+#endif
#ifdef GUDHI_SUBSAMPLING_PROFILING
t.end();
diff --git a/src/Subsampling/include/gudhi/sparsify_point_set.h b/src/Subsampling/include/gudhi/sparsify_point_set.h
index b30cec80..4571b8f3 100644
--- a/src/Subsampling/include/gudhi/sparsify_point_set.h
+++ b/src/Subsampling/include/gudhi/sparsify_point_set.h
@@ -11,6 +11,13 @@
#ifndef SPARSIFY_POINT_SET_H_
#define SPARSIFY_POINT_SET_H_
+#include <boost/version.hpp>
+#if BOOST_VERSION < 106600
+# include <boost/function_output_iterator.hpp>
+#else
+# include <boost/iterator/function_output_iterator.hpp>
+#endif
+
#include <gudhi/Kd_tree_search.h>
#ifdef GUDHI_SUBSAMPLING_PROFILING
#include <gudhi/Clock.h>
@@ -27,7 +34,7 @@ namespace subsampling {
* \ingroup subsampling
* \brief Outputs a subset of the input points so that the
* squared distance between any two points
- * is greater than or equal to `min_squared_dist`.
+ * is greater than `min_squared_dist`.
*
* \tparam Kernel must be a model of the <a target="_blank"
* href="http://doc.cgal.org/latest/Spatial_searching/classSearchTraits.html">SearchTraits</a>
@@ -63,29 +70,15 @@ sparsify_point_set(
// Parse the input points, and add them if they are not too close to
// the other points
std::size_t pt_idx = 0;
- for (typename Point_range::const_iterator it_pt = input_pts.begin();
- it_pt != input_pts.end();
- ++it_pt, ++pt_idx) {
- if (dropped_points[pt_idx])
+ for (auto const& pt : input_pts) {
+ if (dropped_points[pt_idx++])
continue;
- *output_it++ = *it_pt;
-
- auto ins_range = points_ds.incremental_nearest_neighbors(*it_pt);
+ *output_it++ = pt;
// If another point Q is closer that min_squared_dist, mark Q to be dropped
- for (auto const& neighbor : ins_range) {
- std::size_t neighbor_point_idx = neighbor.first;
- // If the neighbor is too close, we drop the neighbor
- if (neighbor.second < min_squared_dist) {
- // N.B.: If neighbor_point_idx < pt_idx,
- // dropped_points[neighbor_point_idx] is already true but adding a
- // test doesn't make things faster, so why bother?
- dropped_points[neighbor_point_idx] = true;
- } else {
- break;
- }
- }
+ auto drop = [&dropped_points] (std::ptrdiff_t neighbor_point_idx) { dropped_points[neighbor_point_idx] = true; };
+ points_ds.all_near_neighbors2(pt, min_squared_dist, min_squared_dist, boost::make_function_output_iterator(std::ref(drop)));
}
#ifdef GUDHI_SUBSAMPLING_PROFILING
diff --git a/src/Subsampling/test/test_choose_n_farthest_points.cpp b/src/Subsampling/test/test_choose_n_farthest_points.cpp
index b318d58e..94793295 100644
--- a/src/Subsampling/test/test_choose_n_farthest_points.cpp
+++ b/src/Subsampling/test/test_choose_n_farthest_points.cpp
@@ -44,7 +44,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_choose_farthest_point, Kernel, list_of_tested
landmarks.clear();
Kernel k;
- Gudhi::subsampling::choose_n_farthest_points(k, points, 100, Gudhi::subsampling::random_starting_point, std::back_inserter(landmarks));
+ auto d = k.squared_distance_d_object();
+ Gudhi::subsampling::choose_n_farthest_points(d, points, 100, Gudhi::subsampling::random_starting_point, std::back_inserter(landmarks));
BOOST_CHECK(landmarks.size() == 100);
for (auto landmark : landmarks)
@@ -61,32 +62,33 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_choose_farthest_point_limits, Kernel, list_of
std::vector< FT > distances;
landmarks.clear();
Kernel k;
+ auto d = k.squared_distance_d_object();
// Choose -1 farthest points in an empty point cloud
- Gudhi::subsampling::choose_n_farthest_points(k, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ Gudhi::subsampling::choose_n_farthest_points(d, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
BOOST_CHECK(landmarks.size() == 0);
landmarks.clear(); distances.clear();
// Choose 0 farthest points in an empty point cloud
- Gudhi::subsampling::choose_n_farthest_points(k, points, 0, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ Gudhi::subsampling::choose_n_farthest_points(d, points, 0, -1, std::back_inserter(landmarks), std::back_inserter(distances));
BOOST_CHECK(landmarks.size() == 0);
landmarks.clear(); distances.clear();
// Choose 1 farthest points in an empty point cloud
- Gudhi::subsampling::choose_n_farthest_points(k, points, 1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ Gudhi::subsampling::choose_n_farthest_points(d, points, 1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
BOOST_CHECK(landmarks.size() == 0);
landmarks.clear(); distances.clear();
std::vector<FT> point({0.0, 0.0, 0.0, 0.0});
points.emplace_back(point.begin(), point.end());
// Choose -1 farthest points in a one point cloud
- Gudhi::subsampling::choose_n_farthest_points(k, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ Gudhi::subsampling::choose_n_farthest_points(d, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
BOOST_CHECK(landmarks.size() == 1 && distances.size() == 1);
BOOST_CHECK(distances[0] == std::numeric_limits<FT>::infinity());
landmarks.clear(); distances.clear();
// Choose 0 farthest points in a one point cloud
- Gudhi::subsampling::choose_n_farthest_points(k, points, 0, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ Gudhi::subsampling::choose_n_farthest_points(d, points, 0, -1, std::back_inserter(landmarks), std::back_inserter(distances));
BOOST_CHECK(landmarks.size() == 0 && distances.size() == 0);
landmarks.clear(); distances.clear();
// Choose 1 farthest points in a one point cloud
- Gudhi::subsampling::choose_n_farthest_points(k, points, 1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ Gudhi::subsampling::choose_n_farthest_points(d, points, 1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
BOOST_CHECK(landmarks.size() == 1 && distances.size() == 1);
BOOST_CHECK(distances[0] == std::numeric_limits<FT>::infinity());
landmarks.clear(); distances.clear();
@@ -94,7 +96,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_choose_farthest_point_limits, Kernel, list_of
std::vector<FT> point2({1.0, 0.0, 0.0, 0.0});
points.emplace_back(point2.begin(), point2.end());
// Choose all farthest points among 2 points
- Gudhi::subsampling::choose_n_farthest_points(k, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ Gudhi::subsampling::choose_n_farthest_points(d, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
BOOST_CHECK(landmarks.size() == 2 && distances.size() == 2);
BOOST_CHECK(distances[0] == std::numeric_limits<FT>::infinity());
BOOST_CHECK(distances[1] == 1);
@@ -102,7 +104,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_choose_farthest_point_limits, Kernel, list_of
// Ignore duplicated points
points.emplace_back(point.begin(), point.end());
- Gudhi::subsampling::choose_n_farthest_points(k, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
+ Gudhi::subsampling::choose_n_farthest_points(d, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
BOOST_CHECK(landmarks.size() == 2 && distances.size() == 2);
BOOST_CHECK(distances[0] == std::numeric_limits<FT>::infinity());
BOOST_CHECK(distances[1] == 1);
diff --git a/src/Tangential_complex/include/gudhi/Tangential_complex.h b/src/Tangential_complex/include/gudhi/Tangential_complex.h
index f007bdd5..f3491f91 100644
--- a/src/Tangential_complex/include/gudhi/Tangential_complex.h
+++ b/src/Tangential_complex/include/gudhi/Tangential_complex.h
@@ -954,7 +954,11 @@ class Tangential_complex {
// Triangulation's traits functor & objects
typename Tr_traits::Compute_weight_d point_weight = local_tr_traits.compute_weight_d_object();
+#if CGAL_VERSION_NR < 1050200000
typename Tr_traits::Power_center_d power_center = local_tr_traits.power_center_d_object();
+#else
+ typename Tr_traits::Construct_power_sphere_d power_center = local_tr_traits.construct_power_sphere_d_object();
+#endif
//***************************************************
// Build a minimal triangulation in the tangent space
@@ -1100,7 +1104,11 @@ class Tangential_complex {
std::size_t closest_pt_index = updated_pts_ds.k_nearest_neighbors(center_point, 1, false).begin()->first;
typename K::Construct_weighted_point_d k_constr_wp = m_k.construct_weighted_point_d_object();
+#if CGAL_VERSION_NR < 1050200000
typename K::Power_distance_d k_power_dist = m_k.power_distance_d_object();
+#else
+ typename K::Compute_power_product_d k_power_dist = m_k.compute_power_product_d_object();
+#endif
// Construct a weighted point equivalent to the star sphere
Weighted_point star_sphere = k_constr_wp(compute_perturbed_point(i), m_squared_star_spheres_radii_incl_margin[i]);
diff --git a/src/Witness_complex/doc/Witness_complex_doc.h b/src/Witness_complex/doc/Witness_complex_doc.h
index 62203054..202f4539 100644
--- a/src/Witness_complex/doc/Witness_complex_doc.h
+++ b/src/Witness_complex/doc/Witness_complex_doc.h
@@ -92,11 +92,11 @@ int main(int argc, char * const argv[]) {
// Choose landmarks (one can choose either of the two methods below)
// Gudhi::subsampling::pick_n_random_points(point_vector, nbL, std::back_inserter(landmarks));
- Gudhi::subsampling::choose_n_farthest_points(K(), point_vector, nbL, Gudhi::subsampling::random_starting_point, std::back_inserter(landmarks));
+ Gudhi::subsampling::choose_n_farthest_points(K().squared_distance_d_object(), point_vector, nbL,
+ Gudhi::subsampling::random_starting_point, std::back_inserter(landmarks));
// Compute witness complex
- Witness_complex witness_complex(landmarks,
- point_vector);
+ Witness_complex witness_complex(landmarks, point_vector);
witness_complex.create_complex(simplex_tree, alpha2, lim_dim);
}
diff --git a/src/Witness_complex/example/example_strong_witness_complex_off.cpp b/src/Witness_complex/example/example_strong_witness_complex_off.cpp
index 583a04ab..2bb135bf 100644
--- a/src/Witness_complex/example/example_strong_witness_complex_off.cpp
+++ b/src/Witness_complex/example/example_strong_witness_complex_off.cpp
@@ -43,7 +43,8 @@ int main(int argc, char* const argv[]) {
// Choose landmarks (decomment one of the following two lines)
// Gudhi::subsampling::pick_n_random_points(point_vector, nbL, std::back_inserter(landmarks));
- Gudhi::subsampling::choose_n_farthest_points(K(), point_vector, nbL, Gudhi::subsampling::random_starting_point,
+ Gudhi::subsampling::choose_n_farthest_points(K().squared_distance_d_object(), point_vector,
+ nbL, Gudhi::subsampling::random_starting_point,
std::back_inserter(landmarks));
// Compute witness complex
diff --git a/src/Witness_complex/example/example_witness_complex_off.cpp b/src/Witness_complex/example/example_witness_complex_off.cpp
index 3635da78..e1384c73 100644
--- a/src/Witness_complex/example/example_witness_complex_off.cpp
+++ b/src/Witness_complex/example/example_witness_complex_off.cpp
@@ -47,7 +47,8 @@ int main(int argc, char * const argv[]) {
// Choose landmarks (decomment one of the following two lines)
// Gudhi::subsampling::pick_n_random_points(point_vector, nbL, std::back_inserter(landmarks));
- Gudhi::subsampling::choose_n_farthest_points(K(), point_vector, nbL, Gudhi::subsampling::random_starting_point, std::back_inserter(landmarks));
+ Gudhi::subsampling::choose_n_farthest_points(K().squared_distance_d_object(), point_vector, nbL,
+ Gudhi::subsampling::random_starting_point, std::back_inserter(landmarks));
// Compute witness complex
start = clock();
diff --git a/src/Witness_complex/example/example_witness_complex_sphere.cpp b/src/Witness_complex/example/example_witness_complex_sphere.cpp
index 78d5db4f..12a56de4 100644
--- a/src/Witness_complex/example/example_witness_complex_sphere.cpp
+++ b/src/Witness_complex/example/example_witness_complex_sphere.cpp
@@ -53,7 +53,7 @@ int main(int argc, char* const argv[]) {
// Choose landmarks
start = clock();
// Gudhi::subsampling::pick_n_random_points(point_vector, number_of_landmarks, std::back_inserter(landmarks));
- Gudhi::subsampling::choose_n_farthest_points(K(), point_vector, number_of_landmarks,
+ Gudhi::subsampling::choose_n_farthest_points(K().squared_distance_d_object(), point_vector, number_of_landmarks,
Gudhi::subsampling::random_starting_point,
std::back_inserter(landmarks));
diff --git a/src/Witness_complex/utilities/strong_witness_persistence.cpp b/src/Witness_complex/utilities/strong_witness_persistence.cpp
index 1f61c77c..614de0d4 100644
--- a/src/Witness_complex/utilities/strong_witness_persistence.cpp
+++ b/src/Witness_complex/utilities/strong_witness_persistence.cpp
@@ -61,7 +61,8 @@ int main(int argc, char* argv[]) {
// Choose landmarks (decomment one of the following two lines)
// Gudhi::subsampling::pick_n_random_points(point_vector, nbL, std::back_inserter(landmarks));
- Gudhi::subsampling::choose_n_farthest_points(K(), witnesses, nbL, Gudhi::subsampling::random_starting_point,
+ Gudhi::subsampling::choose_n_farthest_points(K().squared_distance_d_object(), witnesses, nbL,
+ Gudhi::subsampling::random_starting_point,
std::back_inserter(landmarks));
// Compute witness complex
diff --git a/src/Witness_complex/utilities/weak_witness_persistence.cpp b/src/Witness_complex/utilities/weak_witness_persistence.cpp
index 93050af5..5ea31d6b 100644
--- a/src/Witness_complex/utilities/weak_witness_persistence.cpp
+++ b/src/Witness_complex/utilities/weak_witness_persistence.cpp
@@ -61,7 +61,8 @@ int main(int argc, char* argv[]) {
// Choose landmarks (decomment one of the following two lines)
// Gudhi::subsampling::pick_n_random_points(point_vector, nbL, std::back_inserter(landmarks));
- Gudhi::subsampling::choose_n_farthest_points(K(), witnesses, nbL, Gudhi::subsampling::random_starting_point,
+ Gudhi::subsampling::choose_n_farthest_points(K().squared_distance_d_object(), witnesses, nbL,
+ Gudhi::subsampling::random_starting_point,
std::back_inserter(landmarks));
// Compute witness complex
diff --git a/src/common/doc/examples.h b/src/common/doc/examples.h
index c19b3444..474f8699 100644
--- a/src/common/doc/examples.h
+++ b/src/common/doc/examples.h
@@ -42,7 +42,7 @@
* @example Persistence_representations/persistence_landscape.cpp
* @example Tangential_complex/example_basic.cpp
* @example Tangential_complex/example_with_perturb.cpp
- * @example Subsampling/example_custom_kernel.cpp
+ * @example Subsampling/example_custom_distance.cpp
* @example Subsampling/example_choose_n_farthest_points.cpp
* @example Subsampling/example_sparsify_point_set.cpp
* @example Subsampling/example_pick_n_random_points.cpp
diff --git a/src/common/doc/installation.h b/src/common/doc/installation.h
index 9df1c2f0..c2e63a24 100644
--- a/src/common/doc/installation.h
+++ b/src/common/doc/installation.h
@@ -113,8 +113,6 @@ make doxygen
* Spatial_searching/example_spatial_searching.cpp</a>
* \li <a href="_subsampling_2example_choose_n_farthest_points_8cpp-example.html">
* Subsampling/example_choose_n_farthest_points.cpp</a>
- * \li <a href="_subsampling_2example_custom_kernel_8cpp-example.html">
- * Subsampling/example_custom_kernel.cpp</a>
* \li <a href="_subsampling_2example_pick_n_random_points_8cpp-example.html">
* Subsampling/example_pick_n_random_points.cpp</a>
* \li <a href="_subsampling_2example_sparsify_point_set_8cpp-example.html">
@@ -153,8 +151,6 @@ make doxygen
* Spatial_searching/example_spatial_searching.cpp</a>
* \li <a href="_subsampling_2example_choose_n_farthest_points_8cpp-example.html">
* Subsampling/example_choose_n_farthest_points.cpp</a>
- * \li <a href="_subsampling_2example_custom_kernel_8cpp-example.html">
- * Subsampling/example_custom_kernel.cpp</a>
* \li <a href="_subsampling_2example_pick_n_random_points_8cpp-example.html">
* Subsampling/example_pick_n_random_points.cpp</a>
* \li <a href="_subsampling_2example_sparsify_point_set_8cpp-example.html">
diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt
index 56b6876c..5c1402a6 100644
--- a/src/python/CMakeLists.txt
+++ b/src/python/CMakeLists.txt
@@ -133,6 +133,10 @@ if(PYTHONINTERP_FOUND)
add_gudhi_debug_info("Eigen3 version ${EIGEN3_VERSION}")
# No problem, even if no CGAL found
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DCGAL_EIGEN3_ENABLED', ")
+ set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DGUDHI_USE_EIGEN3', ")
+ set(GUDHI_USE_EIGEN3 "True")
+ else (EIGEN3_FOUND)
+ set(GUDHI_USE_EIGEN3 "False")
endif (EIGEN3_FOUND)
set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'off_reader', ")
@@ -504,7 +508,7 @@ if(PYTHONINTERP_FOUND)
endif()
# Representations
- if(SKLEARN_FOUND AND MATPLOTLIB_FOUND)
+ if(SKLEARN_FOUND AND MATPLOTLIB_FOUND AND OT_FOUND AND NOT CGAL_VERSION VERSION_LESS 4.11.0)
add_gudhi_py_test(test_representations)
endif()
diff --git a/src/python/doc/conf.py b/src/python/doc/conf.py
index 3cc5d1d6..b06baf9c 100755
--- a/src/python/doc/conf.py
+++ b/src/python/doc/conf.py
@@ -44,6 +44,8 @@ extensions = [
'sphinx_paramlinks',
]
+bibtex_bibfiles = ['../../biblio/bibliography.bib']
+
todo_include_todos = True
# plot option : do not show hyperlinks (Source code, png, hires.png, pdf)
plot_html_show_source_link = False
diff --git a/src/python/example/diagram_vectorizations_distances_kernels.py b/src/python/example/diagram_vectorizations_distances_kernels.py
index c4a71a7a..2801576e 100755
--- a/src/python/example/diagram_vectorizations_distances_kernels.py
+++ b/src/python/example/diagram_vectorizations_distances_kernels.py
@@ -5,11 +5,11 @@ import numpy as np
from sklearn.kernel_approximation import RBFSampler
from sklearn.preprocessing import MinMaxScaler
-from gudhi.representations import DiagramSelector, Clamping, Landscape, Silhouette, BettiCurve, ComplexPolynomial,\
+from gudhi.representations import (DiagramSelector, Clamping, Landscape, Silhouette, BettiCurve, ComplexPolynomial,\
TopologicalVector, DiagramScaler, BirthPersistenceTransform,\
PersistenceImage, PersistenceWeightedGaussianKernel, Entropy, \
PersistenceScaleSpaceKernel, SlicedWassersteinDistance,\
- SlicedWassersteinKernel, BottleneckDistance, PersistenceFisherKernel, WassersteinDistance
+ SlicedWassersteinKernel, PersistenceFisherKernel, WassersteinDistance)
D1 = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.], [0., np.inf], [5., np.inf]])
@@ -93,14 +93,21 @@ print("SW distance is " + str(sW(D1, D2)))
SW = SlicedWassersteinKernel(num_directions=100, bandwidth=1.)
print("SW kernel is " + str(SW(D1, D2)))
-W = WassersteinDistance(order=2, internal_p=2, mode="pot")
-print("Wasserstein distance (POT) is " + str(W(D1, D2)))
+try:
+ W = WassersteinDistance(order=2, internal_p=2, mode="pot")
+ print("Wasserstein distance (POT) is " + str(W(D1, D2)))
+except ImportError:
+ print("WassersteinDistance (POT) is not available, you may be missing pot.")
W = WassersteinDistance(order=2, internal_p=2, mode="hera", delta=0.0001)
print("Wasserstein distance (hera) is " + str(W(D1, D2)))
-W = BottleneckDistance(epsilon=.001)
-print("Bottleneck distance is " + str(W(D1, D2)))
+try:
+ from gudhi.representations import BottleneckDistance
+ W = BottleneckDistance(epsilon=.001)
+ print("Bottleneck distance is " + str(W(D1, D2)))
+except ImportError:
+ print("BottleneckDistance is not available, you may be missing CGAL.")
PF = PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1.)
print("PF kernel is " + str(PF(D1, D2)))
diff --git a/src/python/gudhi/__init__.py.in b/src/python/gudhi/__init__.py.in
index 79e12fbc..3043201a 100644
--- a/src/python/gudhi/__init__.py.in
+++ b/src/python/gudhi/__init__.py.in
@@ -23,6 +23,10 @@ __all__ = [@GUDHI_PYTHON_MODULES@ @GUDHI_PYTHON_MODULES_EXTRA@]
__available_modules = ''
__missing_modules = ''
+# For unitary tests purpose
+# could use "if 'collapse_edges' in gudhi.__all__" when collapse edges will have a python module
+__GUDHI_USE_EIGEN3 = @GUDHI_USE_EIGEN3@
+
# Try to import * from gudhi.__module_name for default modules.
# Extra modules require an explicit import by the user (mostly because of
# unusual dependencies, but also to avoid cluttering namespace gudhi and
diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py
index cdcb1fde..84bc99a2 100644
--- a/src/python/gudhi/representations/vector_methods.py
+++ b/src/python/gudhi/representations/vector_methods.py
@@ -605,18 +605,19 @@ class Atol(BaseEstimator, TransformerMixin):
>>> b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]])
>>> c = np.array([[3, 2, -1], [1, 2, -1]])
>>> atol_vectoriser = Atol(quantiser=KMeans(n_clusters=2, random_state=202006))
- >>> atol_vectoriser.fit(X=[a, b, c]).centers
- array([[ 2. , 0.66666667, 3.33333333],
- [ 2.6 , 2.8 , -0.4 ]])
+ >>> atol_vectoriser.fit(X=[a, b, c]).centers # doctest: +SKIP
+ >>> # array([[ 2. , 0.66666667, 3.33333333],
+ >>> # [ 2.6 , 2.8 , -0.4 ]])
>>> atol_vectoriser(a)
- array([1.18168665, 0.42375966])
+ >>> # array([1.18168665, 0.42375966]) # doctest: +SKIP
>>> atol_vectoriser(c)
- array([0.02062512, 1.25157463])
- >>> atol_vectoriser.transform(X=[a, b, c])
- array([[1.18168665, 0.42375966],
- [0.29861028, 1.06330156],
- [0.02062512, 1.25157463]])
+ >>> # array([0.02062512, 1.25157463]) # doctest: +SKIP
+ >>> atol_vectoriser.transform(X=[a, b, c]) # doctest: +SKIP
+ >>> # array([[1.18168665, 0.42375966],
+ >>> # [0.29861028, 1.06330156],
+ >>> # [0.02062512, 1.25157463]])
"""
+ # Note the example above must be up to date with the one in tests called test_atol_doc
def __init__(self, quantiser, weighting_method="cloud", contrast="gaussian"):
"""
Constructor for the Atol measure vectorisation class.
diff --git a/src/python/gudhi/rips_complex.pyx b/src/python/gudhi/rips_complex.pyx
index 72e82c79..c3470292 100644
--- a/src/python/gudhi/rips_complex.pyx
+++ b/src/python/gudhi/rips_complex.pyx
@@ -49,13 +49,13 @@ cdef class RipsComplex:
:type max_edge_length: float
:param points: A list of points in d-Dimension.
- :type points: list of list of double
+ :type points: list of list of float
Or
:param distance_matrix: A distance matrix (full square or lower
triangular).
- :type points: list of list of double
+ :type points: list of list of float
And in both cases
@@ -89,10 +89,10 @@ cdef class RipsComplex:
def create_simplex_tree(self, max_dimension=1):
"""
- :param max_dimension: graph expansion for rips until this given maximal
+ :param max_dimension: graph expansion for Rips until this given maximal
dimension.
:type max_dimension: int
- :returns: A simplex tree created from the Delaunay Triangulation.
+ :returns: A simplex tree encoding the Vietoris–Rips filtration.
:rtype: SimplexTree
"""
stree = SimplexTree()
diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd
index 3c4cbed3..000323af 100644
--- a/src/python/gudhi/simplex_tree.pxd
+++ b/src/python/gudhi/simplex_tree.pxd
@@ -63,7 +63,7 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
bool make_filtration_non_decreasing() nogil
void compute_extended_filtration() nogil
vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(vector[pair[int, pair[double, double]]] dgm, double min_persistence) nogil
- Simplex_tree_interface_full_featured* collapse_edges(int nb_collapse_iteration) nogil
+ Simplex_tree_interface_full_featured* collapse_edges(int nb_collapse_iteration) nogil except +
void reset_filtration(double filtration, int dimension) nogil
# Iterators over Simplex tree
pair[vector[int], double] get_simplex_and_filtration(Simplex_tree_simplex_handle f_simplex) nogil
diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx
index cdd2e87b..d7991417 100644
--- a/src/python/gudhi/simplex_tree.pyx
+++ b/src/python/gudhi/simplex_tree.pyx
@@ -627,6 +627,9 @@ cdef class SimplexTree:
:param nb_iterations: The number of edge collapse iterations to perform. Default is 1.
:type nb_iterations: int
+
+ :note: collapse_edges method requires `Eigen <installation.html#eigen>`_ >= 3.1.0 and an exception is thrown
+ if this method is not available.
"""
# Backup old pointer
cdef Simplex_tree_interface_full_featured* ptr = self.get_ptr()
diff --git a/src/python/gudhi/subsampling.pyx b/src/python/gudhi/subsampling.pyx
index b11d07e5..46f32335 100644
--- a/src/python/gudhi/subsampling.pyx
+++ b/src/python/gudhi/subsampling.pyx
@@ -105,7 +105,7 @@ def pick_n_random_points(points=None, off_file='', nb_points=0):
def sparsify_point_set(points=None, off_file='', min_squared_dist=0.0):
"""Outputs a subset of the input points so that the squared distance
- between any two points is greater than or equal to min_squared_dist.
+ between any two points is greater than min_squared_dist.
:param points: The input point set.
:type points: Iterable[Iterable[float]]
diff --git a/src/python/include/Alpha_complex_factory.h b/src/python/include/Alpha_complex_factory.h
index d699ad9b..3405fdd6 100644
--- a/src/python/include/Alpha_complex_factory.h
+++ b/src/python/include/Alpha_complex_factory.h
@@ -48,11 +48,14 @@ static CgalPointType pt_cython_to_cgal(std::vector<double> const& vec) {
class Abstract_alpha_complex {
public:
virtual std::vector<double> get_point(int vh) = 0;
+
virtual bool create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square,
bool default_filtration_value) = 0;
+
+ virtual ~Abstract_alpha_complex() = default;
};
-class Exact_Alphacomplex_dD : public Abstract_alpha_complex {
+class Exact_Alphacomplex_dD final : public Abstract_alpha_complex {
private:
using Kernel = CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>;
using Point = typename Kernel::Point_d;
@@ -78,7 +81,7 @@ class Exact_Alphacomplex_dD : public Abstract_alpha_complex {
Alpha_complex<Kernel> alpha_complex_;
};
-class Inexact_Alphacomplex_dD : public Abstract_alpha_complex {
+class Inexact_Alphacomplex_dD final : public Abstract_alpha_complex {
private:
using Kernel = CGAL::Epick_d<CGAL::Dynamic_dimension_tag>;
using Point = typename Kernel::Point_d;
@@ -104,7 +107,7 @@ class Inexact_Alphacomplex_dD : public Abstract_alpha_complex {
};
template <complexity Complexity>
-class Alphacomplex_3D : public Abstract_alpha_complex {
+class Alphacomplex_3D final : public Abstract_alpha_complex {
private:
using Point = typename Alpha_complex_3d<Complexity, false, false>::Bare_point_3;
diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h
index baff3850..629f6083 100644
--- a/src/python/include/Simplex_tree_interface.h
+++ b/src/python/include/Simplex_tree_interface.h
@@ -15,7 +15,9 @@
#include <gudhi/distance_functions.h>
#include <gudhi/Simplex_tree.h>
#include <gudhi/Points_off_io.h>
+#ifdef GUDHI_USE_EIGEN3
#include <gudhi/Flag_complex_edge_collapser.h>
+#endif
#include <iostream>
#include <vector>
@@ -162,6 +164,7 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
}
Simplex_tree_interface* collapse_edges(int nb_collapse_iteration) {
+#ifdef GUDHI_USE_EIGEN3
using Filtered_edge = std::tuple<Vertex_handle, Vertex_handle, Filtration_value>;
std::vector<Filtered_edge> edges;
for (Simplex_handle sh : Base::skeleton_simplex_range(1)) {
@@ -187,6 +190,9 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
collapsed_stree_ptr->insert({std::get<0>(remaining_edge), std::get<1>(remaining_edge)}, std::get<2>(remaining_edge));
}
return collapsed_stree_ptr;
+#else
+ throw std::runtime_error("Unable to collapse edges as it requires Eigen3 >= 3.1.0.");
+#endif
}
// Iterator over the simplex tree
diff --git a/src/python/include/Subsampling_interface.h b/src/python/include/Subsampling_interface.h
index cdda851f..6aee7231 100644
--- a/src/python/include/Subsampling_interface.h
+++ b/src/python/include/Subsampling_interface.h
@@ -11,6 +11,7 @@
#ifndef INCLUDE_SUBSAMPLING_INTERFACE_H_
#define INCLUDE_SUBSAMPLING_INTERFACE_H_
+#include <gudhi/distance_functions.h>
#include <gudhi/choose_n_farthest_points.h>
#include <gudhi/pick_n_random_points.h>
#include <gudhi/sparsify_point_set.h>
@@ -27,14 +28,13 @@ namespace subsampling {
using Subsampling_dynamic_kernel = CGAL::Epick_d< CGAL::Dynamic_dimension_tag >;
using Subsampling_point_d = Subsampling_dynamic_kernel::Point_d;
-using Subsampling_ft = Subsampling_dynamic_kernel::FT;
// ------ choose_n_farthest_points ------
std::vector<std::vector<double>> subsampling_n_farthest_points(const std::vector<std::vector<double>>& points,
unsigned nb_points) {
std::vector<std::vector<double>> landmarks;
- Subsampling_dynamic_kernel k;
- choose_n_farthest_points(k, points, nb_points, random_starting_point, std::back_inserter(landmarks));
+ choose_n_farthest_points(Euclidean_distance(), points, nb_points,
+ random_starting_point, std::back_inserter(landmarks));
return landmarks;
}
@@ -42,8 +42,8 @@ std::vector<std::vector<double>> subsampling_n_farthest_points(const std::vector
std::vector<std::vector<double>> subsampling_n_farthest_points(const std::vector<std::vector<double>>& points,
unsigned nb_points, unsigned starting_point) {
std::vector<std::vector<double>> landmarks;
- Subsampling_dynamic_kernel k;
- choose_n_farthest_points(k, points, nb_points, starting_point, std::back_inserter(landmarks));
+ choose_n_farthest_points(Euclidean_distance(), points, nb_points,
+ starting_point, std::back_inserter(landmarks));
return landmarks;
}
diff --git a/src/python/test/test_representations.py b/src/python/test/test_representations.py
index 43c914f3..cda1a15b 100755
--- a/src/python/test/test_representations.py
+++ b/src/python/test/test_representations.py
@@ -46,6 +46,32 @@ def test_multiple():
assert d1 == pytest.approx(d2, rel=0.02)
+# Test sorted values as points order can be inverted, and sorted test is not documentation-friendly
+# Note the test below must be up to date with the Atol class documentation
+def test_atol_doc():
+ a = np.array([[1, 2, 4], [1, 4, 0], [1, 0, 4]])
+ b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]])
+ c = np.array([[3, 2, -1], [1, 2, -1]])
+
+ atol_vectoriser = Atol(quantiser=KMeans(n_clusters=2, random_state=202006))
+ # Atol will do
+ # X = np.concatenate([a,b,c])
+ # kmeans = KMeans(n_clusters=2, random_state=202006).fit(X)
+ # kmeans.labels_ will be : array([1, 0, 1, 0, 0, 1, 0, 0])
+ first_cluster = np.asarray([a[0], a[2], b[2]])
+ second_cluster = np.asarray([a[1], b[0], b[2], c[0], c[1]])
+
+ # Check the center of the first_cluster and second_cluster are in Atol centers
+ centers = atol_vectoriser.fit(X=[a, b, c]).centers
+ np.isclose(centers, first_cluster.mean(axis=0)).all(1).any()
+ np.isclose(centers, second_cluster.mean(axis=0)).all(1).any()
+
+ vectorization = atol_vectoriser.transform(X=[a, b, c])
+ assert np.allclose(vectorization[0], atol_vectoriser(a))
+ assert np.allclose(vectorization[1], atol_vectoriser(b))
+ assert np.allclose(vectorization[2], atol_vectoriser(c))
+
+
def test_dummy_atol():
a = np.array([[1, 2, 4], [1, 4, 0], [1, 0, 4]])
b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]])
diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py
index 3b23fa0b..a3eacaa9 100755
--- a/src/python/test/test_simplex_tree.py
+++ b/src/python/test/test_simplex_tree.py
@@ -8,7 +8,7 @@
- YYYY/MM Author: Description of the modification
"""
-from gudhi import SimplexTree
+from gudhi import SimplexTree, __GUDHI_USE_EIGEN3
import pytest
__author__ = "Vincent Rouvreau"
@@ -353,11 +353,16 @@ def test_collapse_edges():
assert st.num_simplices() == 10
- st.collapse_edges()
- assert st.num_simplices() == 9
- assert st.find([1, 3]) == False
- for simplex in st.get_skeleton(0):
- assert simplex[1] == 1.
+ if __GUDHI_USE_EIGEN3:
+ st.collapse_edges()
+ assert st.num_simplices() == 9
+ assert st.find([1, 3]) == False
+ for simplex in st.get_skeleton(0):
+ assert simplex[1] == 1.
+ else:
+ # If no Eigen3, collapse_edges throws an exception
+ with pytest.raises(RuntimeError):
+ st.collapse_edges()
def test_reset_filtration():
st = SimplexTree()
diff --git a/src/python/test/test_subsampling.py b/src/python/test/test_subsampling.py
index 31f64e32..4019852e 100755
--- a/src/python/test/test_subsampling.py
+++ b/src/python/test/test_subsampling.py
@@ -141,12 +141,16 @@ def test_simple_sparsify_points():
# assert gudhi.sparsify_point_set(points = [], min_squared_dist = 0.0) == []
# assert gudhi.sparsify_point_set(points = [], min_squared_dist = 10.0) == []
assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=0.0) == point_set
- assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=1.0) == point_set
- assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=2.0) == [
+ assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=0.999) == point_set
+ assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=1.001) == [
[0, 1],
[1, 0],
]
- assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=2.01) == [[0, 1]]
+ assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=1.999) == [
+ [0, 1],
+ [1, 0],
+ ]
+ assert gudhi.sparsify_point_set(points=point_set, min_squared_dist=2.001) == [[0, 1]]
assert (
len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=0.0))
@@ -157,11 +161,11 @@ def test_simple_sparsify_points():
== 5
)
assert (
- len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=40.0))
+ len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=40.1))
== 4
)
assert (
- len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=90.0))
+ len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=89.9))
== 3
)
assert (
@@ -169,7 +173,7 @@ def test_simple_sparsify_points():
== 2
)
assert (
- len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=325.0))
+ len(gudhi.sparsify_point_set(off_file="subsample.off", min_squared_dist=324.9))
== 2
)
assert (