summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Glisse <marc.glisse@inria.fr>2022-10-16 18:17:36 +0200
committerMarc Glisse <marc.glisse@inria.fr>2022-10-16 18:17:36 +0200
commitb99c9621fb7e1433eb67cc973825e2ee49936571 (patch)
tree9db6f6f86d3ae549a4f8d7ba5f604d33381a43b3
parent7b7d71e3a8d1302dc81eb020114fe4c4d767ccb0 (diff)
parent524718d63a8f633dbcc4fe7db3fe920ebd7e972c (diff)
Merge branch 'master' into insert
-rw-r--r--.circleci/config.yml16
-rw-r--r--.github/for_maintainers/tests_strategy.md33
-rw-r--r--.github/how_to_compile_gudhi_in_a_conda_env.md93
-rw-r--r--.github/next_release.md21
-rw-r--r--.github/workflows/pip-build-windows.yml8
-rw-r--r--.github/workflows/pip-packaging-windows.yml10
-rw-r--r--.gitignore4
-rw-r--r--CMakeGUDHIVersion.txt2
-rw-r--r--CMakeLists.txt12
-rw-r--r--azure-pipelines.yml30
-rw-r--r--biblio/bibliography.bib26
-rw-r--r--biblio/how_to_cite_gudhi.bib.in6
m---------ext/gudhi-deploy0
-rw-r--r--src/Alpha_complex/doc/Intro_alpha_complex.h8
-rw-r--r--src/Alpha_complex/include/gudhi/Alpha_complex.h18
-rw-r--r--src/Alpha_complex/include/gudhi/Alpha_complex_3d.h4
-rw-r--r--src/Alpha_complex/utilities/alphacomplex.md4
-rw-r--r--src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h2
-rw-r--r--src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h2
-rw-r--r--src/Bottleneck_distance/include/gudhi/Neighbors_finder.h2
-rw-r--r--src/CMakeLists.txt12
-rw-r--r--src/Cech_complex/benchmark/CMakeLists.txt18
-rw-r--r--src/Cech_complex/benchmark/cech_complex_benchmark.cpp169
-rw-r--r--src/Cech_complex/concept/SimplicialComplexForCech.h4
-rw-r--r--src/Cech_complex/doc/Intro_cech_complex.h16
-rw-r--r--src/Cech_complex/example/CMakeLists.txt16
-rw-r--r--src/Cech_complex/example/cech_complex_example_from_points.cpp31
-rw-r--r--src/Cech_complex/example/cech_complex_step_by_step.cpp154
-rw-r--r--src/Cech_complex/include/gudhi/Cech_complex.h89
-rw-r--r--src/Cech_complex/include/gudhi/Cech_complex_blocker.h90
-rw-r--r--src/Cech_complex/include/gudhi/Miniball.COPYRIGHT4
-rw-r--r--src/Cech_complex/include/gudhi/Miniball.README26
-rw-r--r--src/Cech_complex/include/gudhi/Miniball.hpp523
-rw-r--r--src/Cech_complex/include/gudhi/Sphere_circumradius.h78
-rw-r--r--src/Cech_complex/test/CMakeLists.txt19
-rw-r--r--src/Cech_complex/test/test_cech_complex.cpp80
-rw-r--r--src/Cech_complex/utilities/CMakeLists.txt24
-rw-r--r--src/Cech_complex/utilities/cech_persistence.cpp10
-rw-r--r--src/Collapse/doc/intro_edge_collapse.h72
-rw-r--r--src/Collapse/example/edge_collapse_conserve_persistence.cpp2
-rw-r--r--src/Collapse/include/gudhi/Flag_complex_edge_collapser.h579
-rw-r--r--src/Collapse/test/collapse_unit_test.cpp16
-rw-r--r--src/Collapse/utilities/distance_matrix_edge_collapse_rips_persistence.cpp2
-rw-r--r--src/Collapse/utilities/point_cloud_edge_collapse_rips_persistence.cpp2
-rw-r--r--src/Contraction/doc/so3.svg2
-rw-r--r--src/Contraction/example/Garland_heckbert/Error_quadric.h2
-rw-r--r--src/Contraction/example/Rips_contraction.cpp2
-rw-r--r--src/Contraction/include/gudhi/Edge_contraction.h12
-rw-r--r--src/Contraction/include/gudhi/Skeleton_blocker_contractor.h27
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Hasse_diagram_cell.h6
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h9
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h11
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h5
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutahedral_representation_iterators.h10
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Size_range.h2
-rw-r--r--src/Doxyfile.in188
-rw-r--r--src/GudhUI/todo.txt2
-rw-r--r--src/GudhUI/utils/Critical_points.h2
-rw-r--r--src/GudhUI/utils/Edge_contractor.h2
-rw-r--r--src/GudhUI/utils/Furthest_point_epsilon_net.h4
-rw-r--r--src/GudhUI/utils/K_nearest_builder.h2
-rw-r--r--src/GudhUI/utils/Lloyd_builder.h2
-rw-r--r--src/GudhUI/utils/Vertex_collapsor.h2
-rw-r--r--src/GudhUI/view/Viewer_instructor.h2
-rw-r--r--src/Nerve_GIC/doc/Intro_graph_induced_complex.h2
-rw-r--r--src/Nerve_GIC/utilities/km.py.COPYRIGHT2
-rw-r--r--src/Persistence_representations/include/gudhi/Persistence_intervals.h4
-rw-r--r--src/Persistence_representations/test/persistence_heat_maps_test.cpp2
-rw-r--r--src/Persistence_representations/test/persistence_lanscapes_test.cpp2
-rw-r--r--src/Persistent_cohomology/benchmark/performance_rips_persistence.cpp2
-rw-r--r--src/Persistent_cohomology/concept/FilteredComplex.h2
-rw-r--r--src/Persistent_cohomology/doc/Intro_persistent_cohomology.h21
-rw-r--r--src/Persistent_cohomology/example/custom_persistence_sort.cpp2
-rw-r--r--src/Persistent_cohomology/example/persistence_from_simple_simplex_tree.cpp2
-rw-r--r--src/Persistent_cohomology/example/rips_multifield_persistence.cpp2
-rw-r--r--src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h4
-rw-r--r--src/Rips_complex/doc/Intro_rips_complex.h5
-rw-r--r--src/Rips_complex/example/example_one_skeleton_rips_from_correlation_matrix.cpp2
-rw-r--r--src/Simplex_tree/doc/Intro_simplex_tree.h12
-rw-r--r--src/Simplex_tree/example/graph_expansion_with_blocker.cpp2
-rw-r--r--src/Simplex_tree/example/simple_simplex_tree.cpp2
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree.h42
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h129
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_node_explicit_storage.h10
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_siblings.h13
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree/indexing_tag.h2
-rw-r--r--src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp10
-rw-r--r--src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp4
-rw-r--r--src/Simplex_tree/test/simplex_tree_unit_test.cpp51
-rw-r--r--src/Skeleton_blocker/concept/SkeletonBlockerDS.h2
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h2
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simplex.h2
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h14
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker/internal/Trie.h4
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_triangles_iterators.h4
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h16
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker_link_complex.h2
-rwxr-xr-x[-rw-r--r--]src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h4
-rw-r--r--src/Tangential_complex/include/gudhi/Tangential_complex.h2
-rw-r--r--src/Witness_complex/include/gudhi/Active_witness/Active_witness.h2
-rw-r--r--src/Witness_complex/include/gudhi/Active_witness/Active_witness_iterator.h2
-rw-r--r--src/Witness_complex/include/gudhi/Strong_witness_complex.h2
-rw-r--r--src/Witness_complex/include/gudhi/Witness_complex.h2
-rw-r--r--src/Witness_complex/include/gudhi/Witness_complex/all_faces_in.h2
-rw-r--r--src/cmake/modules/FindTBB.cmake6
-rw-r--r--src/cmake/modules/GUDHI_doxygen_target.cmake14
-rw-r--r--src/cmake/modules/GUDHI_modules.cmake4
-rw-r--r--src/cmake/modules/GUDHI_options.cmake20
-rw-r--r--src/cmake/modules/GUDHI_submodules.cmake5
-rw-r--r--src/cmake/modules/GUDHI_third_party_libraries.cmake17
-rw-r--r--src/cmake/modules/GUDHI_user_version_target.cmake2
-rw-r--r--src/common/doc/examples.h3
-rw-r--r--src/common/doc/footer.html13
-rw-r--r--src/common/doc/header.html12
-rw-r--r--src/common/doc/installation.h262
-rw-r--r--src/common/doc/main_page.md11
-rwxr-xr-x[-rw-r--r--]src/common/doc/stylesheet.css1371
-rw-r--r--src/common/include/gudhi/distance_functions.h49
-rw-r--r--src/common/include/gudhi/reader_utils.h2
-rw-r--r--src/common/include/gudhi/writing_persistence_to_file.h4
-rw-r--r--src/python/CMakeLists.txt53
-rw-r--r--src/python/doc/alpha_complex_user.rst7
-rw-r--r--src/python/doc/cubical_complex_sklearn_itf_ref.rst102
-rw-r--r--src/python/doc/cubical_complex_sum.inc30
-rw-r--r--src/python/doc/cubical_complex_tflow_itf_ref.rst40
-rw-r--r--src/python/doc/cubical_complex_user.rst11
-rw-r--r--src/python/doc/datasets.inc (renamed from src/python/doc/datasets_generators.inc)4
-rw-r--r--src/python/doc/datasets.rst (renamed from src/python/doc/datasets_generators.rst)36
-rw-r--r--src/python/doc/differentiation_sum.inc12
-rw-r--r--src/python/doc/img/bunny.pngbin0 -> 48040 bytes
-rw-r--r--src/python/doc/img/sklearn.pngbin0 -> 9368 bytes
-rw-r--r--src/python/doc/img/spiral_2d.pngbin0 -> 279276 bytes
-rw-r--r--src/python/doc/img/tensorflow.pngbin0 -> 3846 bytes
-rw-r--r--src/python/doc/index.rst6
-rw-r--r--src/python/doc/installation.rst8
-rw-r--r--src/python/doc/ls_simplex_tree_tflow_itf_ref.rst53
-rw-r--r--src/python/doc/nerve_gic_complex_user.rst2
-rw-r--r--src/python/doc/persistence_graphical_tools_user.rst2
-rw-r--r--src/python/doc/persistent_cohomology_user.rst29
-rw-r--r--src/python/doc/rips_complex_sum.inc7
-rw-r--r--src/python/doc/rips_complex_tflow_itf_ref.rst48
-rw-r--r--src/python/doc/rips_complex_user.rst8
-rw-r--r--src/python/doc/simplex_tree_sum.inc25
-rwxr-xr-xsrc/python/example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py2
-rw-r--r--src/python/gudhi/__init__.py.in4
-rw-r--r--src/python/gudhi/alpha_complex.pyx29
-rw-r--r--src/python/gudhi/datasets/remote.py223
-rw-r--r--src/python/gudhi/hera/wasserstein.cc2
-rw-r--r--src/python/gudhi/persistence_graphical_tools.py349
-rw-r--r--src/python/gudhi/representations/preprocessing.py57
-rw-r--r--src/python/gudhi/representations/vector_methods.py18
-rw-r--r--src/python/gudhi/simplex_tree.pxd4
-rw-r--r--src/python/gudhi/simplex_tree.pyx24
-rw-r--r--src/python/gudhi/sklearn/__init__.py0
-rw-r--r--src/python/gudhi/sklearn/cubical_persistence.py110
-rw-r--r--src/python/gudhi/tensorflow/__init__.py5
-rw-r--r--src/python/gudhi/tensorflow/cubical_layer.py82
-rw-r--r--src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py87
-rw-r--r--src/python/gudhi/tensorflow/rips_layer.py93
-rw-r--r--src/python/gudhi/wasserstein/barycenter.py6
-rw-r--r--src/python/include/Alpha_complex_interface.h10
-rw-r--r--src/python/include/Persistent_cohomology_interface.h40
-rw-r--r--src/python/include/Simplex_tree_interface.h38
-rwxr-xr-xsrc/python/test/test_alpha_complex.py27
-rw-r--r--src/python/test/test_diff.py78
-rwxr-xr-xsrc/python/test/test_dtm.py16
-rw-r--r--src/python/test/test_persistence_graphical_tools.py121
-rw-r--r--src/python/test/test_remote_datasets.py87
-rwxr-xr-xsrc/python/test/test_representations.py21
-rw-r--r--src/python/test/test_representations_preprocessing.py39
-rwxr-xr-xsrc/python/test/test_simplex_tree.py24
-rw-r--r--src/python/test/test_sklearn_cubical_persistence.py59
-rwxr-xr-xsrc/python/test/test_subsampling.py4
173 files changed, 3272 insertions, 3689 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index f6a875dd..e2df5c87 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -58,10 +58,10 @@ jobs:
git submodule update
mkdir build
cd build
- cmake -DUSER_VERSION_DIR=version ..
+ cmake -DWITH_GUDHI_THIRD_PARTY=OFF -DUSER_VERSION_DIR=version ..
make user_version
cd version
- cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 .
+ cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 -DWITH_GUDHI_REMOTE_TEST=ON .
cd python
python3 setup.py build_ext --inplace
make sphinx
@@ -79,7 +79,7 @@ jobs:
doxygen:
docker:
- - image: gudhi/ci_for_gudhi:latest
+ - image: gudhi/doxygen_for_gudhi:latest
steps:
- checkout
- run:
@@ -89,15 +89,15 @@ jobs:
git submodule update
mkdir build
cd build
- cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=OFF -DUSER_VERSION_DIR=version ..
+ cmake -DWITH_GUDHI_THIRD_PARTY=OFF -DUSER_VERSION_DIR=version ..
make user_version
cd version
mkdir build
cd build
- cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=OFF ..
- make doxygen 2>&1 | tee dox.log
- grep warning dox.log
- cp dox.log html/
+ cmake -DWITH_GUDHI_THIRD_PARTY=OFF ..
+ make doxygen
+ grep warning doxygen.log
+ cp doxygen.log html/
cp -R html /tmp/doxygen
- store_artifacts:
diff --git a/.github/for_maintainers/tests_strategy.md b/.github/for_maintainers/tests_strategy.md
index c25acf9b..d0ae76ef 100644
--- a/.github/for_maintainers/tests_strategy.md
+++ b/.github/for_maintainers/tests_strategy.md
@@ -4,17 +4,22 @@ This document tries to sum up the tests strategy that has been put in place for
The aim is to help maintainers to anticipate third parties modifications, updates.
+## CMake options
+
+[CMake GUDHI options](../../src/cmake/modules/GUDHI_options.cmake) allows to activate/deactivate what should be built and tested.
+Note the special option `WITH_GUDHI_THIRD_PARTY` that, when set to `OFF`, accelerates doxygen documentation generation or `user_version` for instance.
+
## Builds
### Linux
-As all the third parties are already installed (thanks to docker), the compilations has been seperated by categories to be parallelized:
+As all the third parties are already installed (thanks to docker), the compilations have been separated in categories to be parallelized:
* examples (C++)
* tests (C++)
* utils (C++)
* doxygen (C++ documentation that is available in the artefacts)
-* python (including documentation and code coverage that are available in the artefacts)
+* python (including documentation and code coverage that are available in the artefacts; here the WITH_GUDHI_REMOTE_TEST option is enabled which adds datasets fetching test)
(cf. `.circleci/config.yml`)
@@ -25,9 +30,9 @@ Without CGAL, and, with or without Eigen builds are performed inside the docker
#### Update docker images
-C++ third parties installation are done thanks to apt on Ubuntu latest LTS.
+C++ third parties installation is done thanks to apt on Ubuntu latest LTS.
-Docker images need to be rebuild and push each time `.github/build-requirements`, `.github/test-requirements`, when a new third party is added, when a new CGAL version improves gudhi performances, ...
+Docker images need to be rebuilt and pushed each time `.github/build-requirements`, `.github/test-requirements`, when a new third party is added, when a new CGAL version improves gudhi performances, ...
```bash
docker build -f Dockerfile_for_circleci_image -t gudhi/ci_for_gudhi:latest .
@@ -39,35 +44,35 @@ docker push gudhi/ci_for_gudhi_wo_cgal:latest
### Windows
-The compilations are not parallelized, as installation time (about 30 minutes) is too much compare to
+The compilations are not parallelized, as installation time (about 30 minutes) is too much compared to
build and tests timings (about 30 minutes). Builds and tests include:
* examples (C++)
* tests (C++)
* utils (C++)
-* python
+* python (here the WITH_GUDHI_REMOTE_TEST option is enabled which adds datasets fetching test)
Doxygen (C++) is not generated.
(cf. `azure-pipelines.yml`)
-C++ third parties installation are done thanks to [vcpkg](https://github.com/microsoft/vcpkg/).
-In case of installation issue, check in [vcpkg issues](https://github.com/microsoft/vcpkg/issues).
+C++ third parties installation is done thanks to [vcpkg](https://github.com/microsoft/vcpkg/).
+In case of an installation issue, check in [vcpkg issues](https://github.com/microsoft/vcpkg/issues).
### OSx
The compilations are not parallelized, but they should, as installation time (about 4 minutes) is
-negligeable compare to build and tests timings (about 30 minutes). Builds and tests include:
+negligible compared to build and tests timings (about 30 minutes). Builds and tests include:
* examples (C++)
* tests (C++)
* utils (C++)
-* python
+* python (here the WITH_GUDHI_REMOTE_TEST option is enabled which adds datasets fetching test)
* Doxygen (C++)
(cf. `azure-pipelines.yml`)
-C++ third parties installation are done thanks to [brew](https://formulae.brew.sh/formula/).
-In case of installation issue, check in formula issues.
+C++ third parties installation is done thanks to [brew](https://formulae.brew.sh/formula/).
+In case of an installation issue, check in formula issues.
## Pip packaging
@@ -80,9 +85,9 @@ Only the Linux pip package is based on a docker image (`gudhi/pip_for_gudhi` bas
### Update docker image
-C++ third parties installation are done thanks to yum on an image based on `quay.io/pypa/manylinux2014_x86_64`.
+C++ third parties installation is done thanks to yum on an image based on `quay.io/pypa/manylinux2014_x86_64`.
-Docker image need to be rebuild and push each time `.github/build-requirements`, when a new third party is added, when a new CGAL version improves gudhi performances, ...
+Docker image needs to be rebuilt and pushed each time `.github/build-requirements`, when a new third party is added, when a new CGAL version improves gudhi performances, ...
As `.github/test-requirements` is not installed, no need to rebuild image when this file is modified.
```bash
diff --git a/.github/how_to_compile_gudhi_in_a_conda_env.md b/.github/how_to_compile_gudhi_in_a_conda_env.md
new file mode 100644
index 00000000..4acfca2e
--- /dev/null
+++ b/.github/how_to_compile_gudhi_in_a_conda_env.md
@@ -0,0 +1,93 @@
+# Install a conda development environment to compile GUDHI
+
+## Install miniconda
+
+Download the [installer](https://docs.conda.io/en/latest/miniconda.html) required by your system and follow the [instructions](https://conda.io/projects/conda/en/latest/user-guide/install/index.html).
+
+## Create a dedicated environment
+
+```bash
+conda install -c conda-forge mamba # installation with mamba is faster
+conda create --name gudhi
+conda activate gudhi
+mamba install -c conda-forge python cmake doxygen eigen cgal-cpp
+```
+
+Some of the requirements are in the gudhi-devel repository (please refer to
+[how to use github to contribute to gudhi](how_to_use_github_to_contribute_to_gudhi.md)).
+Once the gudhi-devel repository is cloned on your machine (`git clone...`) - let's call it `/workdir/gudhi-devel` i.e. -
+and once the submodules are initialised (`git submodule update --init`):
+
+```bash
+pip install -r ext/gudhi-deploy/build-requirements.txt
+pip install -r ext/gudhi-deploy/test-requirements.txt # pytorch can be painful to install - not mandatory
+```
+
+## Compilation
+
+In order to compile all c++ utilities, examples, benchmarks, unitary tests, and python module:
+```bash
+cd /workdir/gudhi-devel
+rm -rf build; mkdir build # /!\ any existing build folder will be removed
+cd build
+# To build all even examples and benchmarks
+cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=$CONDA_PREFIX -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_BENCHMARK=ON ..
+```
+
+### Specific python compilation
+
+In order to compile only python module
+```bash
+cd /workdir/gudhi-devel
+rm -rf build; mkdir build # /!\ any existing build folder will be removed
+cd build
+cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=$CONDA_PREFIX ..
+cd src/python
+# To build python module in parallel
+python setup.py build_ext -j 16 --inplace # 16 is the number of CPU that are used to compile the python module. Can be any other value.
+# to clean the build
+# python setup.py clean --all
+```
+
+In order to use freshly compiled gudhi python module:
+```bash
+PYTHONPATH=/workdir/gudhi-devel/build/src/python python # or ipython, jupyter, ...
+```
+
+### Specific C++ documentation generation
+
+```bash
+cd /workdir/gudhi-devel
+rm -rf build; mkdir build # /!\ any existing build folder will be removed
+cd build
+# python OFF to prevent python modules search makes cmake faster
+cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=$CONDA_PREFIX -DWITH_GUDHI_PYTHON=OFF -DUSER_VERSION_DIR=version ..
+make user_version;
+cd version
+mkdir build
+cd build
+# python OFF to prevent python modules search makes cmake faster
+cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=$CONDA_PREFIX -DWITH_GUDHI_PYTHON=OFF ..
+make doxygen 2>&1 | tee dox.log
+grep warning dox.log # Warnings can be lost with parallel doxygen
+firefox html/index.html # [optional] To display the c++ documentation. Anything else than firefox can be used.
+```
+
+### Specific python documentation generation
+
+```bash
+cd /workdir/gudhi-devel
+rm -rf build; mkdir build # /!\ any existing build folder will be removed
+cd build
+# python OFF to prevent python modules search makes cmake faster - it is the next cmake call in user version that matters
+cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=$CONDA_PREFIX -DWITH_GUDHI_PYTHON=OFF -DUSER_VERSION_DIR=version ..
+make user_version;
+cd version
+mkdir build
+cd build
+cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH=$CONDA_PREFIX ..
+cd python
+# To build python module in parallel
+python setup.py build_ext -j 16 --inplace # 16 is the number of CPU that are used to compile the python module. Can be any other value.
+firefox sphinx/index.html # [optional] To display the python documentation. Anything else than firefox can be used.
+``` \ No newline at end of file
diff --git a/.github/next_release.md b/.github/next_release.md
index f8085513..64bda353 100644
--- a/.github/next_release.md
+++ b/.github/next_release.md
@@ -1,30 +1,19 @@
-We are pleased to announce the release 3.X.X of the GUDHI library.
+We are pleased to announce the release 3.7.0 of the GUDHI library.
As a major new feature, the GUDHI library now offers ...
We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz).
-Below is a list of changes made since GUDHI 3.5.0:
+Below is a list of changes made since GUDHI 3.6.0:
-- [Alpha complex](https://gudhi.inria.fr/python/latest/alpha_complex_user.html)
- - the python weighted version for alpha complex is now available in any dimension D.
- - `alpha_complex = gudhi.AlphaComplex(off_file='/data/points/tore3D_300.off')` is deprecated, please use [read_points_from_off_file](https://gudhi.inria.fr/python/latest/point_cloud.html#gudhi.read_points_from_off_file) instead.
-
-- [Representations](https://gudhi.inria.fr/python/latest/representations.html#gudhi.representations.vector_methods.BettiCurve)
- - A more flexible Betti curve class capable of computing exact curves
-
-- [Simplex tree](https://gudhi.inria.fr/python/latest/simplex_tree_ref.html)
- - `__deepcopy__`, `copy` and copy constructors
-
-- Installation
- - Boost &ge; 1.66.0 is now required (was &ge; 1.56.0).
- - Python >= 3.5 and cython >= 0.27 are now required.
+- [Module](link)
+ - ...
- [Module](link)
- ...
- Miscellaneous
- - The [list of bugs that were solved since GUDHI-3.5.0](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.6.0+is%3Aclosed) is available on GitHub.
+ - The [list of bugs that were solved since GUDHI-3.6.0](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.7.0+is%3Aclosed) is available on GitHub.
All modules are distributed under the terms of the MIT license.
However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details.
diff --git a/.github/workflows/pip-build-windows.yml b/.github/workflows/pip-build-windows.yml
index 954b59d5..30b0bd94 100644
--- a/.github/workflows/pip-build-windows.yml
+++ b/.github/workflows/pip-build-windows.yml
@@ -30,14 +30,14 @@ jobs:
run: |
mkdir build
cd ".\build\"
- cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=c:\vcpkg\scripts\buildsystems\vcpkg.cmake -DVCPKG_TARGET_TRIPLET=x64-windows ..
+ cmake -DCMAKE_BUILD_TYPE=Release -DFORCE_EIGEN_DEFAULT_DENSE_INDEX_TYPE_TO_INT=ON -DCMAKE_TOOLCHAIN_FILE=c:\vcpkg\scripts\buildsystems\vcpkg.cmake -DVCPKG_TARGET_TRIPLET=x64-windows ..
Get-Location
dir
cd ".\src\python\"
- cp "C:\vcpkg\installed\x64-windows\bin\mpfr-6.dll" ".\gudhi\"
- cp "C:\vcpkg\installed\x64-windows\bin\gmp.dll" ".\gudhi\"
+ cp "C:\vcpkg\installed\x64-windows\bin\mpfr*.dll" ".\gudhi\"
+ cp "C:\vcpkg\installed\x64-windows\bin\gmp*.dll" ".\gudhi\"
python setup.py bdist_wheel
- ls dist
+ ls ".\dist\"
cd ".\dist\"
Get-ChildItem *.whl | ForEach-Object{python -m pip install --user $_.Name}
- name: Test python wheel
diff --git a/.github/workflows/pip-packaging-windows.yml b/.github/workflows/pip-packaging-windows.yml
index 962ae68a..48a98036 100644
--- a/.github/workflows/pip-packaging-windows.yml
+++ b/.github/workflows/pip-packaging-windows.yml
@@ -10,7 +10,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ['3.6', '3.7', '3.8', '3.9', '3.10']
+ python-version: ['3.7', '3.8', '3.9', '3.10']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v1
@@ -33,14 +33,14 @@ jobs:
run: |
mkdir build
cd ".\build\"
- cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=c:\vcpkg\scripts\buildsystems\vcpkg.cmake -DVCPKG_TARGET_TRIPLET=x64-windows ..
+ cmake -DCMAKE_BUILD_TYPE=Release -DFORCE_EIGEN_DEFAULT_DENSE_INDEX_TYPE_TO_INT=ON -DCMAKE_TOOLCHAIN_FILE=c:\vcpkg\scripts\buildsystems\vcpkg.cmake -DVCPKG_TARGET_TRIPLET=x64-windows ..
Get-Location
dir
cd ".\src\python\"
- cp "C:\vcpkg\installed\x64-windows\bin\mpfr-6.dll" ".\gudhi\"
- cp "C:\vcpkg\installed\x64-windows\bin\gmp.dll" ".\gudhi\"
+ cp "C:\vcpkg\installed\x64-windows\bin\mpfr*.dll" ".\gudhi\"
+ cp "C:\vcpkg\installed\x64-windows\bin\gmp*.dll" ".\gudhi\"
python setup.py bdist_wheel
- ls dist
+ ls ".\dist\"
cd ".\dist\"
Get-ChildItem *.whl | ForEach-Object{python -m pip install --user $_.Name}
- name: Test python wheel
diff --git a/.gitignore b/.gitignore
index 6aab7337..9f427fb2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,5 @@
# Classical CMake build directory
-build/
+/*build*
# Generated by Cython
src/python/gudhi/*.cpp
@@ -16,5 +16,3 @@ data/points/human.off_sc.txt
# IDE specific
# CLion
.idea/
-cmake-build-debug/
-
diff --git a/CMakeGUDHIVersion.txt b/CMakeGUDHIVersion.txt
index b7f93799..1dab47ab 100644
--- a/CMakeGUDHIVersion.txt
+++ b/CMakeGUDHIVersion.txt
@@ -1,6 +1,6 @@
# Must be conform to pep440 - https://www.python.org/dev/peps/pep-0440/#pre-releases
set (GUDHI_MAJOR_VERSION 3)
-set (GUDHI_MINOR_VERSION 6)
+set (GUDHI_MINOR_VERSION 7)
# GUDHI_PATCH_VERSION can be 'ZaN' for Alpha release, 'ZbN' for Beta release, 'ZrcN' for release candidate or 'Z' for a final release.
set (GUDHI_PATCH_VERSION 0a0)
set(GUDHI_VERSION ${GUDHI_MAJOR_VERSION}.${GUDHI_MINOR_VERSION}.${GUDHI_PATCH_VERSION})
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ac877eea..6c233459 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -13,8 +13,12 @@ set(GUDHI_MISSING_MODULES "" CACHE INTERNAL "GUDHI_MISSING_MODULES")
# This variable is used by Cython CMakeLists.txt and by GUDHI_third_party_libraries to know its path
set(GUDHI_PYTHON_PATH "src/python")
-# For third parties libraries management - To be done last as CGAL updates CMAKE_MODULE_PATH
-include(GUDHI_third_party_libraries NO_POLICY_SCOPE)
+include(GUDHI_submodules)
+
+if (WITH_GUDHI_THIRD_PARTY)
+ # For third parties libraries management - To be done last as CGAL updates CMAKE_MODULE_PATH
+ include(GUDHI_third_party_libraries NO_POLICY_SCOPE)
+endif()
include(GUDHI_compilation_flags)
@@ -52,7 +56,9 @@ foreach(GUDHI_MODULE ${GUDHI_MODULES})
endforeach()
endforeach()
-add_subdirectory(src/GudhUI)
+if (WITH_GUDHI_THIRD_PARTY)
+ add_subdirectory(src/GudhUI)
+endif()
if (WITH_GUDHI_PYTHON)
# specific for cython module
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 21664244..f54e593f 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -5,7 +5,7 @@ jobs:
timeoutInMinutes: 0
cancelTimeoutInMinutes: 60
pool:
- vmImage: macOS-10.15
+ vmImage: macOS-latest
variables:
pythonVersion: '3.7'
cmakeBuildType: Release
@@ -25,14 +25,14 @@ jobs:
python -m pip install --user -r ext/gudhi-deploy/test-requirements.txt
python -m pip uninstall -y pykeops
brew update || true
- brew install graphviz doxygen boost eigen gmp mpfr tbb cgal || true
+ brew install ninja graphviz doxygen boost eigen gmp mpfr tbb cgal || true
displayName: 'Install build dependencies'
- bash: |
mkdir build
cd build
- cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON ..
- make
- make doxygen
+ cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -GNinja -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DWITH_GUDHI_REMOTE_TEST=ON ..
+ ninja
+ ninja doxygen
ctest --output-on-failure
displayName: 'Build, test and documentation generation'
@@ -62,22 +62,30 @@ jobs:
# No PyKeOps on windows, let's workaround this one.
for /F "tokens=*" %%A in (ext\gudhi-deploy\test-requirements.txt) do python -m pip install %%A
vcpkg install boost-filesystem:x64-windows boost-test:x64-windows boost-program-options:x64-windows tbb:x64-windows eigen3:x64-windows cgal:x64-windows
+ choco install -y ninja --force --force-dependencies
displayName: 'Install build dependencies'
- script: |
- call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64
+ call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64
+ IF %errorlevel% NEQ 0 exit /b %errorlevel%
mkdir build
cd build
- cmake -G "Visual Studio 16 2019" -A x64 -DCMAKE_BUILD_TYPE=Release $(cmakeVcpkgFlags) $(cmakeFlags) ..
- MSBuild GUDHIdev.sln /m /p:Configuration=Release /p:Platform=x64
+ cmake -DCMAKE_BUILD_TYPE=Release -G "Ninja" -DFORCE_EIGEN_DEFAULT_DENSE_INDEX_TYPE_TO_INT=ON $(cmakeVcpkgFlags) $(cmakeFlags) ..
+ IF %errorlevel% NEQ 0 exit /b %errorlevel%
+ ninja
+ IF %errorlevel% NEQ 0 exit /b %errorlevel%
ctest --output-on-failure -C Release -E diff_files
- cmake -DWITH_GUDHI_PYTHON=ON .
+ IF %errorlevel% NEQ 0 exit /b %errorlevel%
+ cmake -DWITH_GUDHI_PYTHON=ON -DWITH_GUDHI_REMOTE_TEST=ON .
+ IF %errorlevel% NEQ 0 exit /b %errorlevel%
cd src\python
- copy "C:\vcpkg\installed\x64-windows\bin\mpfr-6.dll" ".\gudhi\"
- copy "C:\vcpkg\installed\x64-windows\bin\gmp.dll" ".\gudhi\"
+ copy "C:\vcpkg\installed\x64-windows\bin\mpfr*.dll" ".\gudhi\"
+ copy "C:\vcpkg\installed\x64-windows\bin\gmp*.dll" ".\gudhi\"
copy "C:\vcpkg\installed\x64-windows\bin\tbb.dll" ".\gudhi\"
copy "C:\vcpkg\installed\x64-windows\bin\tbbmalloc.dll" ".\gudhi\"
python setup.py build_ext --inplace
+ IF %errorlevel% NEQ 0 exit /b %errorlevel%
SET PYTHONPATH=%CD%;%PYTHONPATH%
echo %PYTHONPATH%
ctest --output-on-failure -C Release
+ IF %errorlevel% NEQ 0 exit /b %errorlevel%
displayName: 'Build and test'
diff --git a/biblio/bibliography.bib b/biblio/bibliography.bib
index e75e8db2..0a3ef43d 100644
--- a/biblio/bibliography.bib
+++ b/biblio/bibliography.bib
@@ -14,7 +14,7 @@ publisher = {JMLR.org},
title = {{Statistical analysis and parameter selection for Mapper}},
volume = {19},
year = {2018},
-url = {http://jmlr.org/papers/v19/17-291.html},
+url = {https://jmlr.org/papers/v19/17-291.html},
}
@inproceedings{Dey13,
@@ -151,10 +151,10 @@ language={English},
%% hal-00922572, version 2
-%% http://hal.inria.fr/hal-00922572
+%% https://hal.inria.fr/hal-00922572
@techreport{boissonnat:hal-00922572,
hal_id = {hal-00922572},
- url = {http://hal.inria.fr/hal-00922572},
+ url = {https://hal.inria.fr/hal-00922572},
title = {Computing Persistent Homology with Various Coefficient Fields in a Single Pass},
author = {Boissonnat, Jean-Daniel and Maria, Cl{\'e}ment},
abstract = {{In this article, we introduce the multi-field persistence diagram for the persistence homology of a filtered complex. It encodes compactly the superimposition of the persistence diagrams of the complex with several field coefficients, and provides a substantially more precise description of the topology of the filtered complex. Specifically, the multi-field persistence diagram encodes the Betti numbers of integral homology and the prime divisors of the torsion coefficients of the underlying shape. Moreover, it enjoys similar stability properties as the ones of standard persistence diagrams, with the appropriate notion of distance. These properties make the multi-field persistence diagram a useful tool in computational topology.}},
@@ -167,7 +167,7 @@ language={English},
number = {RR-8436},
year = {2013},
month = Dec,
- pdf = {http://hal.inria.fr/hal-00922572/PDF/RR-8436.pdf},
+ pdf = {https://hal.inria.fr/hal-00922572v5/document},
}
@@ -323,7 +323,7 @@ language={English},
%------------------------------------------------------------------
@article{rips2012,
hal_id = {hal-00785072},
- url = {http://hal.archives-ouvertes.fr/hal-00785072},
+ url = {https://hal.archives-ouvertes.fr/hal-00785072},
title = {{Vietoris-Rips Complexes also Provide Topologically Correct Reconstructions of Sampled Shapes}},
author = {Attali, Dominique and Lieutier, Andr{\'e} and Salinas, David},
keywords = {Shape reconstruction \sep Rips complexes \sep clique complexes \sep \v Cech complexes ; homotopy equivalence ; collapses ; high dimensions},
@@ -1115,7 +1115,7 @@ language={English}
author = {Nicholas J. Cavanna and Mahmoodreza Jahanseir and Donald R. Sheehy},
booktitle = {Proceedings of the Canadian Conference on Computational Geometry},
title = {A Geometric Perspective on Sparse Filtrations},
- url = {http://research.cs.queensu.ca/cccg2015/CCCG15-papers/01.pdf},
+ url = {https://research.cs.queensu.ca/cccg2015/CCCG15-papers/01.pdf},
year = {2015}
}
@@ -1151,7 +1151,7 @@ language={English}
editor = {Lars Arge and J{\'a}nos Pach},
publisher = {Schloss Dagstuhl--Leibniz-Zentrum fuer Informatik},
address = {Dagstuhl, Germany},
- URL = {http://drops.dagstuhl.de/opus/volltexte/2015/5098},
+ URL = {https://drops.dagstuhl.de/opus/volltexte/2015/5098/},
URN = {urn:nbn:de:0030-drops-50981},
doi = {10.4230/LIPIcs.SOCG.2015.642},
annote = {Keywords: Simplicial complex, Compact data structures, Automaton, NP-hard}
@@ -1164,7 +1164,7 @@ language={English}
journal = {CoRR},
volume = {abs/1607.08449},
year = {2016},
- url = {http://arxiv.org/abs/1607.08449},
+ url = {https://arxiv.org/abs/1607.08449},
archivePrefix = {arXiv},
eprint = {1607.08449},
timestamp = {Mon, 13 Aug 2018 16:46:26 +0200},
@@ -1341,12 +1341,18 @@ doi="10.1007/978-3-030-43408-3_2",
editor = {Sergio Cabello and Danny Z. Chen},
publisher = {Schloss Dagstuhl--Leibniz-Zentrum f{\"u}r Informatik},
address = {Dagstuhl, Germany},
- URL = {https://drops.dagstuhl.de/opus/volltexte/2020/12177},
+ URL = {https://drops.dagstuhl.de/opus/volltexte/2020/12177/},
URN = {urn:nbn:de:0030-drops-121777},
doi = {10.4230/LIPIcs.SoCG.2020.19},
annote = {Keywords: Computational Topology, Topological Data Analysis, Edge Collapse, Simple Collapse, Persistent homology}
}
+@misc{edgecollapsearxiv,
+ author = {Marc Glisse and Siddharth Pritam},
+ title = {{Swap, Shift and Trim to Edge Collapse a Filtration}},
+ url = {https://arxiv.org/abs/2203.07022},
+}
+
@phdthesis{KachanovichThesis,
TITLE = {{Meshing submanifolds using Coxeter triangulations}},
AUTHOR = {Kachanovich, Siargey},
@@ -1360,4 +1366,4 @@ doi="10.1007/978-3-030-43408-3_2",
PDF = {https://hal.inria.fr/tel-02419148v2/file/2019AZUR4072.pdf},
HAL_ID = {tel-02419148},
HAL_VERSION = {v2},
-} \ No newline at end of file
+}
diff --git a/biblio/how_to_cite_gudhi.bib.in b/biblio/how_to_cite_gudhi.bib.in
index 54d10857..579dbf41 100644
--- a/biblio/how_to_cite_gudhi.bib.in
+++ b/biblio/how_to_cite_gudhi.bib.in
@@ -78,7 +78,7 @@
}
@incollection{gudhi:SubSampling
-, author = "Cl\'ement Jamin, Siargey Kachanovich"
+, author = "Cl\'ement Jamin and Siargey Kachanovich"
, title = "Subsampling"
, publisher = "{GUDHI Editorial Board}"
, edition = "{@GUDHI_VERSION@}"
@@ -108,7 +108,7 @@
}
@incollection{gudhi:RipsComplex
-, author = "Cl\'ement Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse"
+, author = "Cl\'ement Maria and Pawel Dlotko and Vincent Rouvreau and Marc Glisse"
, title = "Rips complex"
, publisher = "{GUDHI Editorial Board}"
, edition = "{@GUDHI_VERSION@}"
@@ -158,7 +158,7 @@
}
@incollection{gudhi:Collapse
-, author = "Siddharth Pritam"
+, author = "Siddharth Pritam and Marc Glisse"
, title = "Edge collapse"
, publisher = "{GUDHI Editorial Board}"
, edition = "{@GUDHI_VERSION@}"
diff --git a/ext/gudhi-deploy b/ext/gudhi-deploy
-Subproject 290ade1086bedbc96a35df886cadecabbf4072e
+Subproject e9e9a4878731853d2d3149a5eac30df338a8197
diff --git a/src/Alpha_complex/doc/Intro_alpha_complex.h b/src/Alpha_complex/doc/Intro_alpha_complex.h
index 5ab23720..41e5e16d 100644
--- a/src/Alpha_complex/doc/Intro_alpha_complex.h
+++ b/src/Alpha_complex/doc/Intro_alpha_complex.h
@@ -107,6 +107,7 @@ Table of Contents
* \subsection filtrationcomputation Filtration value computation algorithm
* <br>
* \f$
+ * \begin{array}{l}
* \textbf{for } \text{i : dimension } \rightarrow 0 \textbf{ do}\\
* \quad \textbf{for all } \sigma \text{ of dimension i}\\
* \quad\quad \textbf{if } \text{filtration(} \sigma ) \text{ is NaN} \textbf{ then}\\
@@ -127,6 +128,7 @@ Table of Contents
* \textbf{end for}\\
* \text{make_filtration_non_decreasing()}\\
* \text{prune_above_filtration()}\\
+ * \end{array}
* \f$
*
* \subsubsection dimension2 Dimension 2
@@ -164,11 +166,11 @@ Table of Contents
* <b>Requires:</b> \ref eigen &ge; 3.1.0 and \ref cgal &ge; 5.1.0.
*
* A weighted version for Alpha complex is available (cf. Alpha_complex). It is like a usual Alpha complex, but based
- * on a <a href="https://doc.cgal.org/latest/Triangulation/index.html#title20">CGAL regular triangulation</a> instead
+ * on a <a href="https://doc.cgal.org/latest/Triangulation/index.html#TriangulationSecRT">CGAL regular triangulation</a> instead
* of Delaunay.
*
* This example builds the CGAL weighted alpha shapes from a small molecule, and initializes the alpha complex with
- * it. This example is taken from <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#title13">CGAL 3d
+ * it. This example is taken from <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#AlphaShape_3DExampleforWeightedAlphaShapes">CGAL 3d
* weighted alpha shapes</a>.
*
* Then, it is asked to display information about the alpha complex.
@@ -212,7 +214,7 @@ Table of Contents
* Gudhi::alpha_complex::complexity::EXACT.
*
* This example builds the CGAL 3d weighted alpha shapes from a small molecule, and initializes the alpha complex with
- * it. This example is taken from <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#title13">CGAL 3d
+ * it. This example is taken from <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#AlphaShape_3DExampleforWeightedAlphaShapes">CGAL 3d
* weighted alpha shapes</a>.
*
* Then, it is asked to display information about the alpha complex.
diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex.h b/src/Alpha_complex/include/gudhi/Alpha_complex.h
index 028ec9bb..aec8c1b1 100644
--- a/src/Alpha_complex/include/gudhi/Alpha_complex.h
+++ b/src/Alpha_complex/include/gudhi/Alpha_complex.h
@@ -69,7 +69,7 @@ template<typename D> struct Is_Epeck_D<CGAL::Epeck_d<D>> { static const bool val
* \ingroup alpha_complex
*
* \details
- * The data structure is constructing a CGAL Delaunay triangulation (for more informations on CGAL Delaunay
+ * The data structure is constructing a CGAL Delaunay triangulation (for more information on CGAL Delaunay
* triangulation, please refer to the corresponding chapter in page http://doc.cgal.org/latest/Triangulation/) from a
* range of points or from an OFF file (cf. Points_off_reader).
*
@@ -461,10 +461,10 @@ class Alpha_complex {
void propagate_alpha_filtration(SimplicialComplexForAlpha& complex, Simplex_handle f_simplex) {
// From SimplicialComplexForAlpha type required to assign filtration values.
using Filtration_value = typename SimplicialComplexForAlpha::Filtration_value;
- using Vertex_handle = typename SimplicialComplexForAlpha::Vertex_handle;
// ### Foreach Tau face of Sigma
- for (auto f_boundary : complex.boundary_simplex_range(f_simplex)) {
+ for (auto face_opposite_vertex : complex.boundary_opposite_vertex_simplex_range(f_simplex)) {
+ auto f_boundary = face_opposite_vertex.first;
#ifdef DEBUG_TRACES
std::clog << " | --------------------------------------------------\n";
std::clog << " | Tau ";
@@ -485,18 +485,10 @@ class Alpha_complex {
#endif // DEBUG_TRACES
// ### Else
} else {
- // Find which vertex of f_simplex is missing in f_boundary. We could actually write a variant of boundary_simplex_range that gives pairs (f_boundary, vertex). We rely on the fact that simplex_vertex_range is sorted.
- auto longlist = complex.simplex_vertex_range(f_simplex);
- auto shortlist = complex.simplex_vertex_range(f_boundary);
- auto longiter = std::begin(longlist);
- auto shortiter = std::begin(shortlist);
- auto enditer = std::end(shortlist);
- while(shortiter != enditer && *longiter == *shortiter) { ++longiter; ++shortiter; }
- Vertex_handle extra = *longiter;
auto const& cache=get_cache(complex, f_boundary);
- bool is_gab = kernel_.is_gabriel(cache, get_point_(extra));
+ bool is_gab = kernel_.is_gabriel(cache, get_point_(face_opposite_vertex.second));
#ifdef DEBUG_TRACES
- std::clog << " | Tau is_gabriel(Sigma)=" << is_gab << " - vertexForGabriel=" << extra << std::endl;
+ std::clog << " | Tau is_gabriel(Sigma)=" << is_gab << " - vertexForGabriel=" << face_opposite_vertex.second << std::endl;
#endif // DEBUG_TRACES
// ### If Tau is not Gabriel of Sigma
if (false == is_gab) {
diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
index df5c630e..562ef139 100644
--- a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
+++ b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
@@ -57,8 +57,6 @@ namespace Gudhi {
namespace alpha_complex {
-thread_local double RELATIVE_PRECISION_OF_TO_DOUBLE = 0.00001;
-
// Value_from_iterator returns the filtration value from an iterator on alpha shapes values
//
// FAST SAFE EXACT
@@ -100,7 +98,7 @@ struct Value_from_iterator<complexity::EXACT> {
* \tparam Periodic Boolean used to set/unset the periodic version of Alpha_complex_3d. Default value is false.
*
* For the weighted version, weights values are explained on CGAL
- * <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#title0">Alpha shapes 3d</a> and
+ * <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#Alpha_shapes_3Definitions">Alpha shapes 3d</a> and
* <a href="https://doc.cgal.org/latest/Triangulation_3/index.html#Triangulation3secclassRegulartriangulation">Regular
* triangulation</a> documentation.
*
diff --git a/src/Alpha_complex/utilities/alphacomplex.md b/src/Alpha_complex/utilities/alphacomplex.md
index 0d3c6027..1e3b8fab 100644
--- a/src/Alpha_complex/utilities/alphacomplex.md
+++ b/src/Alpha_complex/utilities/alphacomplex.md
@@ -64,7 +64,7 @@ N.B.:
* Weights values are explained on CGAL
[dD Triangulations](https://doc.cgal.org/latest/Triangulation/index.html)
and
-[Regular triangulation](https://doc.cgal.org/latest/Triangulation/index.html#title20) documentation.
+[Regular triangulation](https://doc.cgal.org/latest/Triangulation/index.html#TriangulationSecRT) documentation.
## alpha_complex_3d_persistence ##
@@ -131,6 +131,6 @@ N.B.:
* `alpha_complex_3d_persistence` only accepts OFF files in dimension 3.
* Filtration values are alpha square values.
* Weights values are explained on CGAL
-[Alpha shape](https://doc.cgal.org/latest/Alpha_shapes_3/index.html#title0)
+[Alpha shape](https://doc.cgal.org/latest/Alpha_shapes_3/index.html#Alpha_shapes_3Definitions)
and
[Regular triangulation](https://doc.cgal.org/latest/Triangulation_3/index.html#Triangulation3secclassRegulartriangulation) documentation.
diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h
index aa255ec2..4a6af3a4 100644
--- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h
+++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex.h
@@ -237,7 +237,7 @@ class Bitmap_cubical_complex : public T {
* Filtration_simplex_iterator class provides an iterator though the whole structure in the order of filtration.
* Secondary criteria for filtration are:
* (1) Dimension of a cube (lower dimensional comes first).
- * (2) Position in the data structure (the ones that are earlies in the data structure comes first).
+ * (2) Position in the data structure (the ones that are earliest in the data structure come first).
**/
class Filtration_simplex_range;
diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h
index f8f80ded..bafe7981 100644
--- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h
+++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h
@@ -43,7 +43,7 @@ namespace cubical_complex {
* Each cell is represented by a single
* bit (in case of black and white bitmaps, or by a single element of a type T
* (here T is a filtration type of a bitmap, typically a double).
- * All the informations needed for homology and
+ * All the information needed for homology and
* persistent homology computations (like dimension of a cell, boundary and
* coboundary elements of a cell, are then obtained from the
* position of the element in C.
diff --git a/src/Bottleneck_distance/include/gudhi/Neighbors_finder.h b/src/Bottleneck_distance/include/gudhi/Neighbors_finder.h
index c65e6082..1d56f0b4 100644
--- a/src/Bottleneck_distance/include/gudhi/Neighbors_finder.h
+++ b/src/Bottleneck_distance/include/gudhi/Neighbors_finder.h
@@ -86,7 +86,7 @@ class Neighbors_finder {
};
/** \internal \brief data structure used to find any point (including projections) in V near to a query point from U
- * (which can be a projection) in a layered graph layer given as parmeter.
+ * (which can be a projection) in a layered graph layer given as parameter.
*
* V points have to be added manually using their index and before the first pull. A neighbor pulled is automatically
* removed.
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 8023e04c..f9f77ef7 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -12,8 +12,12 @@ set(GUDHI_MISSING_MODULES "" CACHE INTERNAL "GUDHI_MISSING_MODULES")
# This variable is used by Cython CMakeLists.txt and by GUDHI_third_party_libraries to know its path
set(GUDHI_PYTHON_PATH "python")
-# For third parties libraries management - To be done last as CGAL updates CMAKE_MODULE_PATH
-include(GUDHI_third_party_libraries NO_POLICY_SCOPE)
+include(GUDHI_submodules)
+
+if (WITH_GUDHI_THIRD_PARTY)
+ # For third parties libraries management - To be done last as CGAL updates CMAKE_MODULE_PATH
+ include(GUDHI_third_party_libraries NO_POLICY_SCOPE)
+endif()
include(GUDHI_compilation_flags)
@@ -67,7 +71,9 @@ foreach(GUDHI_MODULE ${GUDHI_MODULES})
endforeach()
endforeach()
-add_subdirectory(GudhUI)
+if (WITH_GUDHI_THIRD_PARTY)
+ add_subdirectory(GudhUI)
+endif()
message("++ GUDHI_MODULES list is:\"${GUDHI_MODULES}\"")
message("++ GUDHI_MISSING_MODULES list is:\"${GUDHI_MISSING_MODULES}\"")
diff --git a/src/Cech_complex/benchmark/CMakeLists.txt b/src/Cech_complex/benchmark/CMakeLists.txt
index bc54c0f3..a6b3d70b 100644
--- a/src/Cech_complex/benchmark/CMakeLists.txt
+++ b/src/Cech_complex/benchmark/CMakeLists.txt
@@ -1,13 +1,15 @@
project(Cech_complex_benchmark)
-# Do not forget to copy test files in current binary dir
-file(COPY "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.0.1)
+ # Do not forget to copy test files in current binary dir
+ file(COPY "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
-if(TARGET Boost::filesystem)
- add_executable(cech_complex_benchmark cech_complex_benchmark.cpp)
- target_link_libraries(cech_complex_benchmark Boost::filesystem)
+ if(TARGET Boost::filesystem)
+ add_executable(cech_complex_benchmark cech_complex_benchmark.cpp)
+ target_link_libraries(cech_complex_benchmark Boost::filesystem)
- if (TBB_FOUND)
- target_link_libraries(cech_complex_benchmark ${TBB_LIBRARIES})
+ if (TBB_FOUND)
+ target_link_libraries(cech_complex_benchmark ${TBB_LIBRARIES})
+ endif()
endif()
-endif() \ No newline at end of file
+endif()
diff --git a/src/Cech_complex/benchmark/cech_complex_benchmark.cpp b/src/Cech_complex/benchmark/cech_complex_benchmark.cpp
index 2e4adce4..a0e727be 100644
--- a/src/Cech_complex/benchmark/cech_complex_benchmark.cpp
+++ b/src/Cech_complex/benchmark/cech_complex_benchmark.cpp
@@ -10,12 +10,13 @@
#include <gudhi/Points_off_io.h>
#include <gudhi/distance_functions.h>
-#include <gudhi/graph_simplicial_complex.h>
#include <gudhi/Clock.h>
#include <gudhi/Rips_complex.h>
#include <gudhi/Cech_complex.h>
#include <gudhi/Simplex_tree.h>
-#include <gudhi/Miniball.hpp>
+
+#include <CGAL/Epick_d.h>
+#include <CGAL/Epeck_d.h>
#include "boost/filesystem.hpp" // includes all needed Boost.Filesystem declarations
@@ -26,107 +27,81 @@
using Simplex_tree = Gudhi::Simplex_tree<>;
using Filtration_value = Simplex_tree::Filtration_value;
using Point = std::vector<Filtration_value>;
-using Point_cloud = std::vector<Point>;
using Points_off_reader = Gudhi::Points_off_reader<Point>;
-using Proximity_graph = Gudhi::Proximity_graph<Simplex_tree>;
using Rips_complex = Gudhi::rips_complex::Rips_complex<Filtration_value>;
-using Cech_complex = Gudhi::cech_complex::Cech_complex<Simplex_tree, Point_cloud>;
-
-class Minimal_enclosing_ball_radius {
- public:
- // boost::range_value is not SFINAE-friendly so we cannot use it in the return type
- template <typename Point>
- typename std::iterator_traits<typename boost::range_iterator<Point>::type>::value_type operator()(
- const Point& p1, const Point& p2) const {
- // Type def
- using Point_cloud = std::vector<Point>;
- using Point_iterator = typename Point_cloud::const_iterator;
- using Coordinate_iterator = typename Point::const_iterator;
- using Min_sphere =
- typename Gudhi::Miniball::Miniball<Gudhi::Miniball::CoordAccessor<Point_iterator, Coordinate_iterator>>;
-
- Point_cloud point_cloud;
- point_cloud.push_back(p1);
- point_cloud.push_back(p2);
-
- GUDHI_CHECK((p1.end() - p1.begin()) == (p2.end() - p2.begin()), "inconsistent point dimensions");
- Min_sphere min_sphere(p1.end() - p1.begin(), point_cloud.begin(), point_cloud.end());
- return std::sqrt(min_sphere.squared_radius());
- }
-};
+template<typename Kernel>
+Simplex_tree benchmark_cech(const std::string& off_file_points, const Filtration_value& radius, const int& dim_max, const bool exact) {
+ using Point_cgal = typename Kernel::Point_d;
+ using Points_off_reader_cgal = Gudhi::Points_off_reader<Point_cgal>;
+ using Cech_complex = Gudhi::cech_complex::Cech_complex<Kernel, Simplex_tree>;
+
+ // Extract the points from the file filepoints
+ Points_off_reader_cgal off_reader_cgal(off_file_points);
+
+ Gudhi::Clock cech_clock("Cech computation");
+ Cech_complex cech_complex_from_points(off_reader_cgal.get_point_cloud(), radius, exact);
+ Simplex_tree cech_stree;
+ cech_complex_from_points.create_complex(cech_stree, dim_max);
+
+ // ------------------------------------------
+ // Display information about the Cech complex
+ // ------------------------------------------
+ double cech_sec = cech_clock.num_seconds();
+ std::clog << cech_sec << " ; ";
+ return cech_stree;
+}
int main(int argc, char* argv[]) {
- std::string off_file_points = "tore3D_1307.off";
- Filtration_value threshold = 1e20;
-
- // Extract the points from the file filepoints
- Points_off_reader off_reader(off_file_points);
-
- Gudhi::Clock euclidean_clock("Gudhi::Euclidean_distance");
- // Compute the proximity graph of the points
- Proximity_graph euclidean_prox_graph = Gudhi::compute_proximity_graph<Simplex_tree>(
- off_reader.get_point_cloud(), threshold, Gudhi::Euclidean_distance());
-
- std::clog << euclidean_clock << std::endl;
-
- Gudhi::Clock miniball_clock("Minimal_enclosing_ball_radius");
- // Compute the proximity graph of the points
- Proximity_graph miniball_prox_graph = Gudhi::compute_proximity_graph<Simplex_tree>(
- off_reader.get_point_cloud(), threshold, Minimal_enclosing_ball_radius());
- std::clog << miniball_clock << std::endl;
-
- Gudhi::Clock common_miniball_clock("Gudhi::Minimal_enclosing_ball_radius()");
- // Compute the proximity graph of the points
- Proximity_graph common_miniball_prox_graph = Gudhi::compute_proximity_graph<Simplex_tree>(
- off_reader.get_point_cloud(), threshold, Gudhi::Minimal_enclosing_ball_radius());
- std::clog << common_miniball_clock << std::endl;
-
- boost::filesystem::path full_path(boost::filesystem::current_path());
- std::clog << "Current path is : " << full_path << std::endl;
-
- std::clog << "File name;Radius;Rips time;Cech time; Ratio Rips/Cech time;Rips nb simplices;Cech nb simplices;"
- << std::endl;
- boost::filesystem::directory_iterator end_itr; // default construction yields past-the-end
- for (boost::filesystem::directory_iterator itr(boost::filesystem::current_path()); itr != end_itr; ++itr) {
- if (!boost::filesystem::is_directory(itr->status())) {
- if (itr->path().extension() == ".off") // see below
- {
- Points_off_reader off_reader(itr->path().string());
- Point p0 = off_reader.get_point_cloud()[0];
-
- for (Filtration_value radius = 0.1; radius < 0.4; radius += 0.1) {
- std::clog << itr->path().stem() << ";";
- std::clog << radius << ";";
- Gudhi::Clock rips_clock("Rips computation");
- Rips_complex rips_complex_from_points(off_reader.get_point_cloud(), radius,
- Gudhi::Minimal_enclosing_ball_radius());
- Simplex_tree rips_stree;
- rips_complex_from_points.create_complex(rips_stree, p0.size() - 1);
- // ------------------------------------------
- // Display information about the Rips complex
- // ------------------------------------------
- double rips_sec = rips_clock.num_seconds();
- std::clog << rips_sec << ";";
-
- Gudhi::Clock cech_clock("Cech computation");
- Cech_complex cech_complex_from_points(off_reader.get_point_cloud(), radius);
- Simplex_tree cech_stree;
- cech_complex_from_points.create_complex(cech_stree, p0.size() - 1);
- // ------------------------------------------
- // Display information about the Cech complex
- // ------------------------------------------
- double cech_sec = cech_clock.num_seconds();
- std::clog << cech_sec << ";";
- std::clog << cech_sec / rips_sec << ";";
-
- assert(rips_stree.num_simplices() >= cech_stree.num_simplices());
- std::clog << rips_stree.num_simplices() << ";";
- std::clog << cech_stree.num_simplices() << ";" << std::endl;
+ boost::filesystem::path full_path(boost::filesystem::current_path());
+ std::clog << "Current path is : " << full_path << std::endl;
+
+ std::clog << "File name ; Radius ; Rips time ; Dim-3 Fast Cech time ; Dynamic_dim Fast Cech time ; "
+ "Dim-3 Safe Cech time ; Dynamic_dim Safe Cech time ; Dim-3 Exact Cech time ; Dynamic_dim Exact Cech time ; "
+ "Cech nb simplices ; Rips nb simplices;"
+ << std::endl;
+ boost::filesystem::directory_iterator end_itr; // default construction yields past-the-end
+ // For every ".off" file in the current directory, and for 3 predefined thresholds, compare Rips and various Cech constructions
+ for (boost::filesystem::directory_iterator itr(boost::filesystem::current_path()); itr != end_itr; ++itr) {
+ if (!boost::filesystem::is_directory(itr->status())) {
+ if (itr->path().extension() == ".off") {
+ Points_off_reader off_reader(itr->path().string());
+ Point p0 = off_reader.get_point_cloud()[0];
+ // Loop over the different thresholds
+ for (Filtration_value radius = 0.1; radius < 0.35; radius += 0.1) {
+ std::clog << itr->path().stem() << " ; ";
+ std::clog << radius << " ; ";
+
+ Gudhi::Clock rips_clock("Rips computation");
+ Rips_complex rips_complex_from_points(off_reader.get_point_cloud(), radius, Gudhi::Euclidean_distance());
+ Simplex_tree rips_stree;
+ int dim_max = p0.size() - 1;
+ rips_complex_from_points.create_complex(rips_stree, dim_max);
+ // ------------------------------------------
+ // Display information about the Rips complex
+ // ------------------------------------------
+ double rips_sec = rips_clock.num_seconds();
+ std::clog << rips_sec << " ; ";
+
+ // --------------
+ // Cech complex
+ // --------------
+ // Fast
+ benchmark_cech<CGAL::Epick_d<CGAL::Dimension_tag<3>>>(itr->path().string(), radius, dim_max, false);
+ benchmark_cech<CGAL::Epick_d<CGAL::Dynamic_dimension_tag>>(itr->path().string(), radius, dim_max, false);
+ // Safe
+ benchmark_cech<CGAL::Epeck_d<CGAL::Dimension_tag<3>>>(itr->path().string(), radius, dim_max, false);
+ benchmark_cech<CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>>(itr->path().string(), radius, dim_max, false);
+ // Exact
+ benchmark_cech<CGAL::Epeck_d<CGAL::Dimension_tag<3>>>(itr->path().string(), radius, dim_max, true);
+ auto cech_stree = benchmark_cech<CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>>(itr->path().string(), radius, dim_max, true);
+
+ std::clog << cech_stree.num_simplices() << " ; ";
+ std::clog << rips_stree.num_simplices() << ";" << std::endl;
+ }
+ }
}
- }
}
- }
- return 0;
+ return 0;
}
diff --git a/src/Cech_complex/concept/SimplicialComplexForCech.h b/src/Cech_complex/concept/SimplicialComplexForCech.h
index 00c7df3a..6202fe92 100644
--- a/src/Cech_complex/concept/SimplicialComplexForCech.h
+++ b/src/Cech_complex/concept/SimplicialComplexForCech.h
@@ -47,8 +47,8 @@ struct SimplicialComplexForCech {
};
-} // namespace alpha_complex
+} // namespace cech_complex
} // namespace Gudhi
-#endif // CONCEPT_ALPHA_COMPLEX_SIMPLICIAL_COMPLEX_FOR_ALPHA_H_
+#endif // CONCEPT_CECH_COMPLEX_SIMPLICIAL_COMPLEX_FOR_CECH_H_
diff --git a/src/Cech_complex/doc/Intro_cech_complex.h b/src/Cech_complex/doc/Intro_cech_complex.h
index 698f9749..595fb64b 100644
--- a/src/Cech_complex/doc/Intro_cech_complex.h
+++ b/src/Cech_complex/doc/Intro_cech_complex.h
@@ -28,7 +28,7 @@ namespace cech_complex {
* <a target="_blank" href="https://en.wikipedia.org/wiki/Simplicial_complex">simplicial complex</a> constructed
* from a proximity graph. The set of all simplices is filtered by the radius of their minimal enclosing ball.
*
- * The input shall be a point cloud in an Euclidean space.
+ * The input shall be a range of points where a point is defined as <a target="_blank" href="https://doc.cgal.org/latest/Kernel_d/classCGAL_1_1Point__d.html">CGAL kernel Point_d.</a>
*
* \remark For people only interested in the topology of the \ref cech_complex (for instance persistence),
* \ref alpha_complex is equivalent to the \ref cech_complex and much smaller if you do not bound the radii.
@@ -37,8 +37,7 @@ namespace cech_complex {
* \subsection cechalgorithm Algorithm
*
* Cech_complex first builds a proximity graph from a point cloud.
- * The filtration value of each edge of the `Gudhi::Proximity_graph` is computed from
- * `Gudhi::Minimal_enclosing_ball_radius` function.
+ * The filtration value of each edge of the `Gudhi::Proximity_graph` is computed using CGAL kernel functions.
*
* All edges that have a filtration value strictly greater than a user given maximal radius value, \f$max\_radius\f$,
* are not inserted into the complex.
@@ -60,20 +59,9 @@ namespace cech_complex {
*
* \image html "cech_complex_representation.png" "ÄŒech complex expansion"
*
- * The minimal ball radius computation is insured by
- * <a target="_blank" href="https://people.inf.ethz.ch/gaertner/subdir/software/miniball.html">
- * the miniball software (V3.0)</a> - Smallest Enclosing Balls of Points - and distributed with GUDHI.
- * Please refer to
- * <a target="_blank" href="https://people.inf.ethz.ch/gaertner/subdir/texts/own_work/esa99_final.pdf">
- * the miniball software design description</a> for more information about this computation.
- *
* This radius computation is the reason why the Cech_complex is taking much more time to be computed than the
* \ref rips_complex but it offers more topological guarantees.
*
- * If the Cech_complex interfaces are not detailed enough for your need, please refer to
- * <a href="cech_complex_step_by_step_8cpp-example.html">
- * cech_complex_step_by_step.cpp</a> example, where the graph construction over the Simplex_tree is more detailed.
- *
* \subsection cechpointscloudexample Example from a point cloud
*
* This example builds the proximity graph from the given points, and maximal radius values.
diff --git a/src/Cech_complex/example/CMakeLists.txt b/src/Cech_complex/example/CMakeLists.txt
index 1b08c7cb..7d52ed5e 100644
--- a/src/Cech_complex/example/CMakeLists.txt
+++ b/src/Cech_complex/example/CMakeLists.txt
@@ -1,17 +1,9 @@
project(Cech_complex_examples)
-if (TARGET Boost::program_options)
- add_executable ( Cech_complex_example_step_by_step cech_complex_step_by_step.cpp )
- target_link_libraries(Cech_complex_example_step_by_step Boost::program_options)
+if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.0.1)
+ add_executable ( Cech_complex_example_from_points cech_complex_example_from_points.cpp)
if (TBB_FOUND)
- target_link_libraries(Cech_complex_example_step_by_step ${TBB_LIBRARIES})
+ target_link_libraries(Cech_complex_example_from_points ${TBB_LIBRARIES})
endif()
- add_test(NAME Cech_complex_utility_from_rips_on_tore_3D COMMAND $<TARGET_FILE:Cech_complex_example_step_by_step>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-r" "0.25" "-d" "3")
+ add_test(NAME Cech_complex_example_from_points COMMAND $<TARGET_FILE:Cech_complex_example_from_points>)
endif()
-
-add_executable ( Cech_complex_example_from_points cech_complex_example_from_points.cpp)
-if (TBB_FOUND)
- target_link_libraries(Cech_complex_example_from_points ${TBB_LIBRARIES})
-endif()
-add_test(NAME Cech_complex_example_from_points COMMAND $<TARGET_FILE:Cech_complex_example_from_points>)
diff --git a/src/Cech_complex/example/cech_complex_example_from_points.cpp b/src/Cech_complex/example/cech_complex_example_from_points.cpp
index 1a1f708c..ef9071ec 100644
--- a/src/Cech_complex/example/cech_complex_example_from_points.cpp
+++ b/src/Cech_complex/example/cech_complex_example_from_points.cpp
@@ -1,30 +1,33 @@
#include <gudhi/Cech_complex.h>
#include <gudhi/Simplex_tree.h>
+#include <CGAL/Epeck_d.h> // For EXACT or SAFE version
+
#include <iostream>
#include <string>
#include <vector>
-#include <array>
int main() {
// Type definitions
- using Point_cloud = std::vector<std::array<double, 2>>;
using Simplex_tree = Gudhi::Simplex_tree<Gudhi::Simplex_tree_options_fast_persistence>;
using Filtration_value = Simplex_tree::Filtration_value;
- using Cech_complex = Gudhi::cech_complex::Cech_complex<Simplex_tree, Point_cloud>;
+ using Kernel = CGAL::Epeck_d<CGAL::Dimension_tag<2>>;
+ using Point = typename Kernel::Point_d;
+ using Point_cloud = std::vector<Point>;
+ using Cech_complex = Gudhi::cech_complex::Cech_complex<Kernel, Simplex_tree>;
Point_cloud points;
- points.push_back({1., 0.}); // 0
- points.push_back({0., 1.}); // 1
- points.push_back({2., 1.}); // 2
- points.push_back({3., 2.}); // 3
- points.push_back({0., 3.}); // 4
- points.push_back({3. + std::sqrt(3.), 3.}); // 5
- points.push_back({1., 4.}); // 6
- points.push_back({3., 4.}); // 7
- points.push_back({2., 4. + std::sqrt(3.)}); // 8
- points.push_back({0., 4.}); // 9
- points.push_back({-0.5, 2.}); // 10
+ points.emplace_back(1., 0.); // 0
+ points.emplace_back(0., 1.); // 1
+ points.emplace_back(2., 1.); // 2
+ points.emplace_back(3., 2.); // 3
+ points.emplace_back(0., 3.); // 4
+ points.emplace_back(3. + std::sqrt(3.), 3.); // 5
+ points.emplace_back(1., 4.); // 6
+ points.emplace_back(3., 4.); // 7
+ points.emplace_back(2., 4. + std::sqrt(3.)); // 8
+ points.emplace_back(0., 4.); // 9
+ points.emplace_back(-0.5, 2.); // 10
// ----------------------------------------------------------------------------
// Init of a Cech complex from points
diff --git a/src/Cech_complex/example/cech_complex_step_by_step.cpp b/src/Cech_complex/example/cech_complex_step_by_step.cpp
deleted file mode 100644
index f59f0293..00000000
--- a/src/Cech_complex/example/cech_complex_step_by_step.cpp
+++ /dev/null
@@ -1,154 +0,0 @@
-/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- * Author(s): Vincent Rouvreau
- *
- * Copyright (C) 2018 Inria
- *
- * Modification(s):
- * - YYYY/MM Author: Description of the modification
- */
-
-#include <gudhi/graph_simplicial_complex.h>
-#include <gudhi/distance_functions.h>
-#include <gudhi/Simplex_tree.h>
-#include <gudhi/Points_off_io.h>
-
-#include <gudhi/Miniball.hpp>
-
-#include <boost/program_options.hpp>
-
-#include <string>
-#include <vector>
-#include <limits> // infinity
-#include <utility> // for pair
-#include <map>
-
-// ----------------------------------------------------------------------------
-// rips_persistence_step_by_step is an example of each step that is required to
-// build a Rips over a Simplex_tree. Please refer to rips_persistence to see
-// how to do the same thing with the Rips_complex wrapper for less detailed
-// steps.
-// ----------------------------------------------------------------------------
-
-// Types definition
-using Simplex_tree = Gudhi::Simplex_tree<>;
-using Simplex_handle = Simplex_tree::Simplex_handle;
-using Filtration_value = Simplex_tree::Filtration_value;
-using Point = std::vector<double>;
-using Points_off_reader = Gudhi::Points_off_reader<Point>;
-using Proximity_graph = Gudhi::Proximity_graph<Simplex_tree>;
-
-class Cech_blocker {
- private:
- using Point_cloud = std::vector<Point>;
- using Point_iterator = Point_cloud::const_iterator;
- using Coordinate_iterator = Point::const_iterator;
- using Min_sphere = Gudhi::Miniball::Miniball<Gudhi::Miniball::CoordAccessor<Point_iterator, Coordinate_iterator>>;
-
- public:
- bool operator()(Simplex_handle sh) {
- std::vector<Point> points;
- for (auto vertex : simplex_tree_.simplex_vertex_range(sh)) {
- points.push_back(point_cloud_[vertex]);
-#ifdef DEBUG_TRACES
- std::clog << "#(" << vertex << ")#";
-#endif // DEBUG_TRACES
- }
- Filtration_value radius = Gudhi::Minimal_enclosing_ball_radius()(points);
-#ifdef DEBUG_TRACES
- std::clog << "radius = " << radius << " - " << (radius > max_radius_) << std::endl;
-#endif // DEBUG_TRACES
- simplex_tree_.assign_filtration(sh, radius);
- return (radius > max_radius_);
- }
- Cech_blocker(Simplex_tree& simplex_tree, Filtration_value max_radius, const std::vector<Point>& point_cloud)
- : simplex_tree_(simplex_tree), max_radius_(max_radius), point_cloud_(point_cloud) {
- dimension_ = point_cloud_[0].size();
- }
-
- private:
- Simplex_tree simplex_tree_;
- Filtration_value max_radius_;
- std::vector<Point> point_cloud_;
- int dimension_;
-};
-
-void program_options(int argc, char* argv[], std::string& off_file_points, Filtration_value& max_radius, int& dim_max);
-
-int main(int argc, char* argv[]) {
- std::string off_file_points;
- Filtration_value max_radius;
- int dim_max;
-
- program_options(argc, argv, off_file_points, max_radius, dim_max);
-
- // Extract the points from the file filepoints
- Points_off_reader off_reader(off_file_points);
-
- // Compute the proximity graph of the points
- Proximity_graph prox_graph = Gudhi::compute_proximity_graph<Simplex_tree>(off_reader.get_point_cloud(), max_radius,
- Gudhi::Minimal_enclosing_ball_radius());
-
- // Construct the Rips complex in a Simplex Tree
- Simplex_tree st;
- // insert the proximity graph in the simplex tree
- st.insert_graph(prox_graph);
- // expand the graph until dimension dim_max
- st.expansion_with_blockers(dim_max, Cech_blocker(st, max_radius, off_reader.get_point_cloud()));
-
- std::clog << "The complex contains " << st.num_simplices() << " simplices \n";
- std::clog << " and has dimension " << st.dimension() << " \n";
-
- // Sort the simplices in the order of the filtration
- st.initialize_filtration();
-
-#if DEBUG_TRACES
- std::clog << "********************************************************************\n";
- std::clog << "* The complex contains " << st.num_simplices() << " simplices - dimension=" << st.dimension() << "\n";
- std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
- for (auto f_simplex : st.filtration_simplex_range()) {
- std::clog << " "
- << "[" << st.filtration(f_simplex) << "] ";
- for (auto vertex : st.simplex_vertex_range(f_simplex)) {
- std::clog << static_cast<int>(vertex) << " ";
- }
- std::clog << std::endl;
- }
-#endif // DEBUG_TRACES
-
- return 0;
-}
-
-void program_options(int argc, char* argv[], std::string& off_file_points, Filtration_value& max_radius, int& dim_max) {
- namespace po = boost::program_options;
- po::options_description hidden("Hidden options");
- hidden.add_options()("input-file", po::value<std::string>(&off_file_points),
- "Name of an OFF file containing a point set.\n");
-
- po::options_description visible("Allowed options", 100);
- visible.add_options()("help,h", "produce help message")(
- "max-radius,r",
- po::value<Filtration_value>(&max_radius)->default_value(std::numeric_limits<Filtration_value>::infinity()),
- "Maximal length of an edge for the Rips complex construction.")(
- "cpx-dimension,d", po::value<int>(&dim_max)->default_value(1),
- "Maximal dimension of the Rips complex we want to compute.");
-
- po::positional_options_description pos;
- pos.add("input-file", 1);
-
- po::options_description all;
- all.add(visible).add(hidden);
-
- po::variables_map vm;
- po::store(po::command_line_parser(argc, argv).options(all).positional(pos).run(), vm);
- po::notify(vm);
-
- if (vm.count("help") || !vm.count("input-file")) {
- std::clog << std::endl;
- std::clog << "Construct a Cech complex defined on a set of input points.\n \n";
-
- std::clog << "Usage: " << argv[0] << " [options] input-file" << std::endl << std::endl;
- std::clog << visible << std::endl;
- exit(-1);
- }
-}
diff --git a/src/Cech_complex/include/gudhi/Cech_complex.h b/src/Cech_complex/include/gudhi/Cech_complex.h
index b0871e10..625f7c9c 100644
--- a/src/Cech_complex/include/gudhi/Cech_complex.h
+++ b/src/Cech_complex/include/gudhi/Cech_complex.h
@@ -11,14 +11,13 @@
#ifndef CECH_COMPLEX_H_
#define CECH_COMPLEX_H_
-#include <gudhi/distance_functions.h> // for Gudhi::Minimal_enclosing_ball_radius
+#include <gudhi/Sphere_circumradius.h> // for Gudhi::cech_complex::Sphere_circumradius
#include <gudhi/graph_simplicial_complex.h> // for Gudhi::Proximity_graph
#include <gudhi/Debug_utils.h> // for GUDHI_CHECK
#include <gudhi/Cech_complex_blocker.h> // for Gudhi::cech_complex::Cech_blocker
#include <iostream>
#include <stdexcept> // for exception management
-#include <vector>
namespace Gudhi {
@@ -26,55 +25,55 @@ namespace cech_complex {
/**
* \class Cech_complex
- * \brief Cech complex data structure.
+ * \brief Cech complex class.
*
* \ingroup cech_complex
*
* \details
- * The data structure is a proximity graph, containing edges when the edge length is less or equal
- * to a given max_radius. Edge length is computed from `Gudhi::Minimal_enclosing_ball_radius` distance function.
+ * Cech complex is a simplicial complex where the set of all simplices is filtered
+ * by the radius of their minimal enclosing ball and bounded by the given max_radius.
*
- * \tparam SimplicialComplexForProximityGraph furnishes `Vertex_handle` and `Filtration_value` type definition required
- * by `Gudhi::Proximity_graph`.
+ * \tparam Kernel CGAL kernel: either Epick_d or Epeck_d.
+ *
+ * \tparam SimplicialComplexForCechComplex furnishes `Vertex_handle` and `Filtration_value` type definition required
+ * by `Gudhi::Proximity_graph` and Cech blocker.
*
- * \tparam ForwardPointRange must be a range for which `std::begin()` and `std::end()` methods return input
- * iterators on a point. `std::begin()` and `std::end()` methods are also required for a point.
*/
-template <typename SimplicialComplexForProximityGraph, typename ForwardPointRange>
+template <typename Kernel, typename SimplicialComplexForCechComplex>
class Cech_complex {
private:
// Required by compute_proximity_graph
- using Vertex_handle = typename SimplicialComplexForProximityGraph::Vertex_handle;
- using Filtration_value = typename SimplicialComplexForProximityGraph::Filtration_value;
- using Proximity_graph = Gudhi::Proximity_graph<SimplicialComplexForProximityGraph>;
-
- // Retrieve Coordinate type from ForwardPointRange
- using Point_from_range_iterator = typename boost::range_const_iterator<ForwardPointRange>::type;
- using Point_from_range = typename std::iterator_traits<Point_from_range_iterator>::value_type;
- using Coordinate_iterator = typename boost::range_const_iterator<Point_from_range>::type;
- using Coordinate = typename std::iterator_traits<Coordinate_iterator>::value_type;
-
- public:
- // Point and Point_cloud type definition
- using Point = std::vector<Coordinate>;
- using Point_cloud = std::vector<Point>;
-
- public:
- /** \brief Cech_complex constructor from a list of points.
+ using Vertex_handle = typename SimplicialComplexForCechComplex::Vertex_handle;
+ using Filtration_value = typename SimplicialComplexForCechComplex::Filtration_value;
+ using Proximity_graph = Gudhi::Proximity_graph<SimplicialComplexForCechComplex>;
+
+ using cech_blocker = Cech_blocker<SimplicialComplexForCechComplex, Cech_complex, Kernel>;
+
+ using Point_d = typename cech_blocker::Point_d;
+ using Point_cloud = std::vector<Point_d>;
+
+ // Numeric type of coordinates in the kernel
+ using FT = typename cech_blocker::FT;
+ // Sphere is a pair of point and squared radius.
+ using Sphere = typename cech_blocker::Sphere;
+
+ public:
+ /** \brief Cech_complex constructor from a range of points.
*
- * @param[in] points Range of points.
+ * @param[in] points Range of points where each point is defined as `kernel::Point_d`.
* @param[in] max_radius Maximal radius value.
- *
- * \tparam ForwardPointRange must be a range of Point. Point must be a range of <b>copyable</b> Cartesian coordinates.
+ * @param[in] exact Exact filtration values computation. Not exact if `Kernel` is not <a target="_blank"
+ * href="https://doc.cgal.org/latest/Kernel_d/structCGAL_1_1Epeck__d.html">CGAL::Epeck_d</a>.
+ * Default is false.
*
*/
- Cech_complex(const ForwardPointRange& points, Filtration_value max_radius) : max_radius_(max_radius) {
- // Point cloud deep copy
- point_cloud_.reserve(boost::size(points));
- for (auto&& point : points) point_cloud_.emplace_back(std::begin(point), std::end(point));
+ template<typename InputPointRange >
+ Cech_complex(const InputPointRange & points, Filtration_value max_radius, const bool exact = false) : max_radius_(max_radius), exact_(exact) {
- cech_skeleton_graph_ = Gudhi::compute_proximity_graph<SimplicialComplexForProximityGraph>(
- point_cloud_, max_radius_, Gudhi::Minimal_enclosing_ball_radius());
+ point_cloud_.assign(std::begin(points), std::end(points));
+
+ cech_skeleton_graph_ = Gudhi::compute_proximity_graph<SimplicialComplexForCechComplex>(
+ point_cloud_, max_radius_, Sphere_circumradius<Kernel, Filtration_value>(exact));
}
/** \brief Initializes the simplicial complex from the proximity graph and expands it until a given maximal
@@ -85,7 +84,6 @@ class Cech_complex {
* @exception std::invalid_argument In debug mode, if `complex.num_vertices()` does not return 0.
*
*/
- template <typename SimplicialComplexForCechComplex>
void create_complex(SimplicialComplexForCechComplex& complex, int dim_max) {
GUDHI_CHECK(complex.num_vertices() == 0,
std::invalid_argument("Cech_complex::create_complex - simplicial complex is not empty"));
@@ -93,8 +91,7 @@ class Cech_complex {
// insert the proximity graph in the simplicial complex
complex.insert_graph(cech_skeleton_graph_);
// expand the graph until dimension dim_max
- complex.expansion_with_blockers(dim_max,
- Cech_blocker<SimplicialComplexForCechComplex, Cech_complex>(&complex, this));
+ complex.expansion_with_blockers(dim_max, cech_blocker(&complex, this));
}
/** @return max_radius value given at construction. */
@@ -103,12 +100,24 @@ class Cech_complex {
/** @param[in] vertex Point position in the range.
* @return The point.
*/
- const Point& get_point(Vertex_handle vertex) const { return point_cloud_[vertex]; }
+ const Point_d& get_point(Vertex_handle vertex) const { return point_cloud_[vertex]; }
+
+ /**
+ * @return Vector of cached spheres.
+ */
+ std::vector<Sphere> & get_cache() { return cache_; }
+
+ /** \brief Check exact option
+ * @return Exact option.
+ */
+ const bool is_exact() { return exact_; }
private:
Proximity_graph cech_skeleton_graph_;
Filtration_value max_radius_;
Point_cloud point_cloud_;
+ std::vector<Sphere> cache_;
+ const bool exact_;
};
} // namespace cech_complex
diff --git a/src/Cech_complex/include/gudhi/Cech_complex_blocker.h b/src/Cech_complex/include/gudhi/Cech_complex_blocker.h
index 31b9aab5..e78e37b7 100644
--- a/src/Cech_complex/include/gudhi/Cech_complex_blocker.h
+++ b/src/Cech_complex/include/gudhi/Cech_complex_blocker.h
@@ -11,10 +11,12 @@
#ifndef CECH_COMPLEX_BLOCKER_H_
#define CECH_COMPLEX_BLOCKER_H_
-#include <gudhi/distance_functions.h> // for Gudhi::Minimal_enclosing_ball_radius
+#include <CGAL/NT_converter.h> // for casting from FT to Filtration_value
+#include <CGAL/Lazy_exact_nt.h> // for CGAL::exact
#include <iostream>
#include <vector>
+#include <set>
#include <cmath> // for std::sqrt
namespace Gudhi {
@@ -30,37 +32,104 @@ namespace cech_complex {
* \details
* ÄŒech blocker is an oracle constructed from a Cech_complex and a simplicial complex.
*
- * \tparam SimplicialComplexForProximityGraph furnishes `Simplex_handle` and `Filtration_value` type definition,
+ * \tparam SimplicialComplexForCech furnishes `Simplex_handle` and `Filtration_value` type definition,
* `simplex_vertex_range(Simplex_handle sh)`and `assign_filtration(Simplex_handle sh, Filtration_value filt)` methods.
*
- * \tparam Chech_complex is required by the blocker.
+ * \tparam Cech_complex is required by the blocker.
+ *
+ * \tparam Kernel CGAL kernel: either Epick_d or Epeck_d.
*/
-template <typename SimplicialComplexForCech, typename Cech_complex>
+template <typename SimplicialComplexForCech, typename Cech_complex, typename Kernel>
class Cech_blocker {
+
+ public:
+
+ using Point_d = typename Kernel::Point_d;
+ // Numeric type of coordinates in the kernel
+ using FT = typename Kernel::FT;
+ // Sphere is a pair of point and squared radius.
+ using Sphere = typename std::pair<Point_d, FT>;
+
private:
- using Point_cloud = typename Cech_complex::Point_cloud;
using Simplex_handle = typename SimplicialComplexForCech::Simplex_handle;
using Filtration_value = typename SimplicialComplexForCech::Filtration_value;
+ using Simplex_key = typename SimplicialComplexForCech::Simplex_key;
+
+ template<class PointIterator>
+ Sphere get_sphere(PointIterator begin, PointIterator end) const {
+ Point_d c = kernel_.construct_circumcenter_d_object()(begin, end);
+ FT r = kernel_.squared_distance_d_object()(c, *begin);
+ return std::make_pair(std::move(c), std::move(r));
+ }
public:
+
/** \internal \brief ÄŒech complex blocker operator() - the oracle - assigns the filtration value from the simplex
* radius and returns if the simplex expansion must be blocked.
* \param[in] sh The Simplex_handle.
* \return true if the simplex radius is greater than the Cech_complex max_radius*/
bool operator()(Simplex_handle sh) {
+ using Point_cloud = std::vector<Point_d>;
+ Filtration_value radius = 0;
+ bool is_min_enclos_ball = false;
Point_cloud points;
- for (auto vertex : sc_ptr_->simplex_vertex_range(sh)) {
- points.push_back(cc_ptr_->get_point(vertex));
+ points.reserve(sc_ptr_->dimension(sh)+1);
+
+ // for each face of simplex sh, test outsider point is indeed inside enclosing ball, if yes, take it and exit loop, otherwise, new sphere is circumsphere of all vertices
+ for (auto face_opposite_vertex : sc_ptr_->boundary_opposite_vertex_simplex_range(sh)) {
+ auto k = sc_ptr_->key(face_opposite_vertex.first);
+ Simplex_key sph_key;
+ if(k != sc_ptr_->null_key()) {
+ sph_key = k;
+ }
+ else {
+ for (auto vertex : sc_ptr_->simplex_vertex_range(face_opposite_vertex.first)) {
+ points.push_back(cc_ptr_->get_point(vertex));
+#ifdef DEBUG_TRACES
+ std::clog << "#(" << vertex << ")#";
+#endif // DEBUG_TRACES
+ }
+ // Put edge sphere in cache
+ sph_key = cc_ptr_->get_cache().size();
+ sc_ptr_->assign_key(face_opposite_vertex.first, sph_key);
+ cc_ptr_->get_cache().push_back(get_sphere(points.cbegin(), points.cend()));
+ // Clear face points
+ points.clear();
+ }
+ // Check if the minimal enclosing ball of current face contains the extra point/opposite vertex
+ Sphere const& sph = cc_ptr_->get_cache()[sph_key];
+ if (kernel_.squared_distance_d_object()(sph.first, cc_ptr_->get_point(face_opposite_vertex.second)) <= sph.second) {
+ is_min_enclos_ball = true;
+ sc_ptr_->assign_key(sh, sph_key);
+ radius = sc_ptr_->filtration(face_opposite_vertex.first);
#ifdef DEBUG_TRACES
- std::clog << "#(" << vertex << ")#";
+ std::clog << "center: " << sph.first << ", radius: " << radius << std::endl;
#endif // DEBUG_TRACES
+ break;
+ }
}
- Filtration_value radius = Gudhi::Minimal_enclosing_ball_radius()(points);
+ // Spheres of each face don't contain the whole simplex
+ if(!is_min_enclos_ball) {
+ for (auto vertex : sc_ptr_->simplex_vertex_range(sh)) {
+ points.push_back(cc_ptr_->get_point(vertex));
+ }
+ Sphere sph = get_sphere(points.cbegin(), points.cend());
+#if CGAL_VERSION_NR >= 1050000000
+ if(cc_ptr_->is_exact()) CGAL::exact(sph.second);
+#endif
+ CGAL::NT_converter<FT, Filtration_value> cast_to_fv;
+ radius = std::sqrt(cast_to_fv(sph.second));
+
+ sc_ptr_->assign_key(sh, cc_ptr_->get_cache().size());
+ cc_ptr_->get_cache().push_back(std::move(sph));
+ }
+
#ifdef DEBUG_TRACES
if (radius > cc_ptr_->max_radius()) std::clog << "radius > max_radius => expansion is blocked\n";
#endif // DEBUG_TRACES
- sc_ptr_->assign_filtration(sh, radius);
+ // Check that the filtration to be assigned (radius) would be valid
+ if (radius > sc_ptr_->filtration(sh)) sc_ptr_->assign_filtration(sh, radius);
return (radius > cc_ptr_->max_radius());
}
@@ -70,6 +139,7 @@ class Cech_blocker {
private:
SimplicialComplexForCech* sc_ptr_;
Cech_complex* cc_ptr_;
+ Kernel kernel_;
};
} // namespace cech_complex
diff --git a/src/Cech_complex/include/gudhi/Miniball.COPYRIGHT b/src/Cech_complex/include/gudhi/Miniball.COPYRIGHT
deleted file mode 100644
index dbe4c553..00000000
--- a/src/Cech_complex/include/gudhi/Miniball.COPYRIGHT
+++ /dev/null
@@ -1,4 +0,0 @@
-The miniball software is available under the GNU General Public License (GPLv3 - https://www.gnu.org/copyleft/gpl.html).
-If your intended use is not compliant with this license, please buy a commercial license (EUR 500 - https://people.inf.ethz.ch/gaertner/subdir/software/miniball/license.html).
-You need a license if the software that you develop using Miniball V3.0 is not open source.
-
diff --git a/src/Cech_complex/include/gudhi/Miniball.README b/src/Cech_complex/include/gudhi/Miniball.README
deleted file mode 100644
index 033d8953..00000000
--- a/src/Cech_complex/include/gudhi/Miniball.README
+++ /dev/null
@@ -1,26 +0,0 @@
-https://people.inf.ethz.ch/gaertner/subdir/software/miniball.html
-
-Smallest Enclosing Balls of Points - Fast and Robust in C++.
-(high-quality software for smallest enclosing balls of balls is available in the computational geometry algorithms library CGAL)
-
-
-This is the miniball software (V3.0) for computing smallest enclosing balls of points in arbitrary dimensions. It consists of a C++ header file Miniball.hpp (around 500 lines of code) and two example programs miniball_example.cpp and miniball_example_containers.cpp that demonstrate the usage. The first example stores the coordinates of the input points in a two-dimensional array, the second example uses a list of vectors to show how generic containers can be used.
-
-Credits: Aditya Gupta and Alexandros Konstantinakis-Karmis have significantly contributed to this version of the software.
-
-Changes - https://people.inf.ethz.ch/gaertner/subdir/software/miniball/changes.txt - from previous versions.
-
-The theory - https://people.inf.ethz.ch/gaertner/subdir/texts/own_work/esa99_final.pdf - behind the miniball software (Proc. 7th Annual European Symposium on Algorithms (ESA), Lecture Notes in Computer Science 1643, Springer-Verlag, pp.325-338, 1999).
-
-Main Features:
-
- Very fast in low dimensions. 1 million points in 5-space are processed within 0.05 seconds on any recent machine.
-
- High numerical stability. Almost all input degeneracies (cospherical points, multiple points, points very close together) are routinely handled.
-
- Easily integrates into your code. You can freely choose the coordinate type of your points and the container to store the points. If you still need to adapt the code, the header is small and readable and contains documentation for all major methods.
-
-
-Changes done for the GUDHI version of MiniBall:
- - Add include guard
- - Move Miniball namespace inside a new Gudhi namespace
diff --git a/src/Cech_complex/include/gudhi/Miniball.hpp b/src/Cech_complex/include/gudhi/Miniball.hpp
deleted file mode 100644
index ce6cbb5b..00000000
--- a/src/Cech_complex/include/gudhi/Miniball.hpp
+++ /dev/null
@@ -1,523 +0,0 @@
-// Copright (C) 1999-2013, Bernd Gaertner
-// $Rev: 3581 $
-//
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see <http://www.gnu.org/licenses/>.
-//
-// Contact:
-// --------
-// Bernd Gaertner
-// Institute of Theoretical Computer Science
-// ETH Zuerich
-// CAB G31.1
-// CH-8092 Zuerich, Switzerland
-// http://www.inf.ethz.ch/personal/gaertner
-
-#ifndef MINIBALL_HPP_
-#define MINIBALL_HPP_
-
-#include <cassert>
-#include <algorithm>
-#include <list>
-#include <ctime>
-#include <limits>
-
-namespace Gudhi {
-
-namespace Miniball {
-
- // Global Functions
- // ================
- template <typename NT>
- inline NT mb_sqr (NT r) {return r*r;}
-
- // Functors
- // ========
-
- // functor to map a point iterator to the corresponding coordinate iterator;
- // generic version for points whose coordinate containers have begin()
- template < typename Pit_, typename Cit_ >
- struct CoordAccessor {
- typedef Pit_ Pit;
- typedef Cit_ Cit;
- inline Cit operator() (Pit it) const { return (*it).begin(); }
- };
-
- // partial specialization for points whose coordinate containers are arrays
- template < typename Pit_, typename Cit_ >
- struct CoordAccessor<Pit_, Cit_*> {
- typedef Pit_ Pit;
- typedef Cit_* Cit;
- inline Cit operator() (Pit it) const { return *it; }
- };
-
- // Class Declaration
- // =================
-
- template <typename CoordAccessor>
- class Miniball {
- private:
- // types
- // The iterator type to go through the input points
- typedef typename CoordAccessor::Pit Pit;
- // The iterator type to go through the coordinates of a single point.
- typedef typename CoordAccessor::Cit Cit;
- // The coordinate type
- typedef typename std::iterator_traits<Cit>::value_type NT;
- // The iterator to go through the support points
- typedef typename std::list<Pit>::iterator Sit;
-
- // data members...
- const int d; // dimension
- Pit points_begin;
- Pit points_end;
- CoordAccessor coord_accessor;
- double time;
- const NT nt0; // NT(0)
-
- //...for the algorithms
- std::list<Pit> L;
- Sit support_end;
- int fsize; // number of forced points
- int ssize; // number of support points
-
- // ...for the ball updates
- NT* current_c;
- NT current_sqr_r;
- NT** c;
- NT* sqr_r;
-
- // helper arrays
- NT* q0;
- NT* z;
- NT* f;
- NT** v;
- NT** a;
-
- public:
- // The iterator type to go through the support points
- typedef typename std::list<Pit>::const_iterator SupportPointIterator;
-
- // PRE: [begin, end) is a nonempty range
- // POST: computes the smallest enclosing ball of the points in the range
- // [begin, end); the functor a maps a point iterator to an iterator
- // through the d coordinates of the point
- Miniball (int d_, Pit begin, Pit end, CoordAccessor ca = CoordAccessor());
-
- // POST: returns a pointer to the first element of an array that holds
- // the d coordinates of the center of the computed ball
- const NT* center () const;
-
- // POST: returns the squared radius of the computed ball
- NT squared_radius () const;
-
- // POST: returns the number of support points of the computed ball;
- // the support points form a minimal set with the same smallest
- // enclosing ball as the input set; in particular, the support
- // points are on the boundary of the computed ball, and their
- // number is at most d+1
- int nr_support_points () const;
-
- // POST: returns an iterator to the first support point
- SupportPointIterator support_points_begin () const;
-
- // POST: returns a past-the-end iterator for the range of support points
- SupportPointIterator support_points_end () const;
-
- // POST: returns the maximum excess of any input point w.r.t. the computed
- // ball, divided by the squared radius of the computed ball. The
- // excess of a point is the difference between its squared distance
- // from the center and the squared radius; Ideally, the return value
- // is 0. subopt is set to the absolute value of the most negative
- // coefficient in the affine combination of the support points that
- // yields the center. Ideally, this is a convex combination, and there
- // is no negative coefficient in which case subopt is set to 0.
- NT relative_error (NT& subopt) const;
-
- // POST: return true if the relative error is at most tol, and the
- // suboptimality is 0; the default tolerance is 10 times the
- // coordinate type's machine epsilon
- bool is_valid (NT tol = NT(10) * std::numeric_limits<NT>::epsilon()) const;
-
- // POST: returns the time in seconds taken by the constructor call for
- // computing the smallest enclosing ball
- double get_time() const;
-
- // POST: deletes dynamically allocated arrays
- ~Miniball();
-
- private:
- void mtf_mb (Sit n);
- void mtf_move_to_front (Sit j);
- void pivot_mb (Pit n);
- void pivot_move_to_front (Pit j);
- NT excess (Pit pit) const;
- void pop ();
- bool push (Pit pit);
- NT suboptimality () const;
- void create_arrays();
- void delete_arrays();
- };
-
- // Class Definition
- // ================
- template <typename CoordAccessor>
- Miniball<CoordAccessor>::Miniball (int d_, Pit begin, Pit end,
- CoordAccessor ca)
- : d (d_),
- points_begin (begin),
- points_end (end),
- coord_accessor (ca),
- time (clock()),
- nt0 (NT(0)),
- L(),
- support_end (L.begin()),
- fsize(0),
- ssize(0),
- current_c (NULL),
- current_sqr_r (NT(-1)),
- c (NULL),
- sqr_r (NULL),
- q0 (NULL),
- z (NULL),
- f (NULL),
- v (NULL),
- a (NULL)
- {
- assert (points_begin != points_end);
- create_arrays();
-
- // set initial center
- for (int j=0; j<d; ++j) c[0][j] = nt0;
- current_c = c[0];
-
- // compute miniball
- pivot_mb (points_end);
-
- // update time
- time = (clock() - time) / CLOCKS_PER_SEC;
- }
-
- template <typename CoordAccessor>
- Miniball<CoordAccessor>::~Miniball()
- {
- delete_arrays();
- }
-
- template <typename CoordAccessor>
- void Miniball<CoordAccessor>::create_arrays()
- {
- c = new NT*[d+1];
- v = new NT*[d+1];
- a = new NT*[d+1];
- for (int i=0; i<d+1; ++i) {
- c[i] = new NT[d];
- v[i] = new NT[d];
- a[i] = new NT[d];
- }
- sqr_r = new NT[d+1];
- q0 = new NT[d];
- z = new NT[d+1];
- f = new NT[d+1];
- }
-
- template <typename CoordAccessor>
- void Miniball<CoordAccessor>::delete_arrays()
- {
- delete[] f;
- delete[] z;
- delete[] q0;
- delete[] sqr_r;
- for (int i=0; i<d+1; ++i) {
- delete[] a[i];
- delete[] v[i];
- delete[] c[i];
- }
- delete[] a;
- delete[] v;
- delete[] c;
- }
-
- template <typename CoordAccessor>
- const typename Miniball<CoordAccessor>::NT*
- Miniball<CoordAccessor>::center () const
- {
- return current_c;
- }
-
- template <typename CoordAccessor>
- typename Miniball<CoordAccessor>::NT
- Miniball<CoordAccessor>::squared_radius () const
- {
- return current_sqr_r;
- }
-
- template <typename CoordAccessor>
- int Miniball<CoordAccessor>::nr_support_points () const
- {
- assert (ssize < d+2);
- return ssize;
- }
-
- template <typename CoordAccessor>
- typename Miniball<CoordAccessor>::SupportPointIterator
- Miniball<CoordAccessor>::support_points_begin () const
- {
- return L.begin();
- }
-
- template <typename CoordAccessor>
- typename Miniball<CoordAccessor>::SupportPointIterator
- Miniball<CoordAccessor>::support_points_end () const
- {
- return support_end;
- }
-
- template <typename CoordAccessor>
- typename Miniball<CoordAccessor>::NT
- Miniball<CoordAccessor>::relative_error (NT& subopt) const
- {
- NT e, max_e = nt0;
- // compute maximum absolute excess of support points
- for (SupportPointIterator it = support_points_begin();
- it != support_points_end(); ++it) {
- e = excess (*it);
- if (e < nt0) e = -e;
- if (e > max_e) {
- max_e = e;
- }
- }
- // compute maximum excess of any point
- for (Pit i = points_begin; i != points_end; ++i)
- if ((e = excess (i)) > max_e)
- max_e = e;
-
- subopt = suboptimality();
- assert (current_sqr_r > nt0 || max_e == nt0);
- return (current_sqr_r == nt0 ? nt0 : max_e / current_sqr_r);
- }
-
- template <typename CoordAccessor>
- bool Miniball<CoordAccessor>::is_valid (NT tol) const
- {
- NT suboptimality;
- return ( (relative_error (suboptimality) <= tol) && (suboptimality == 0) );
- }
-
- template <typename CoordAccessor>
- double Miniball<CoordAccessor>::get_time() const
- {
- return time;
- }
-
- template <typename CoordAccessor>
- void Miniball<CoordAccessor>::mtf_mb (Sit n)
- {
- // Algorithm 1: mtf_mb (L_{n-1}, B), where L_{n-1} = [L.begin, n)
- // B: the set of forced points, defining the current ball
- // S: the superset of support points computed by the algorithm
- // --------------------------------------------------------------
- // from B. Gaertner, Fast and Robust Smallest Enclosing Balls, ESA 1999,
- // http://www.inf.ethz.ch/personal/gaertner/texts/own_work/esa99_final.pdf
-
- // PRE: B = S
- assert (fsize == ssize);
-
- support_end = L.begin();
- if ((fsize) == d+1) return;
-
- // incremental construction
- for (Sit i = L.begin(); i != n;)
- {
- // INV: (support_end - L.begin() == |S|-|B|)
- assert (std::distance (L.begin(), support_end) == ssize - fsize);
-
- Sit j = i++;
- if (excess(*j) > nt0)
- if (push(*j)) { // B := B + p_i
- mtf_mb (j); // mtf_mb (L_{i-1}, B + p_i)
- pop(); // B := B - p_i
- mtf_move_to_front(j);
- }
- }
- // POST: the range [L.begin(), support_end) stores the set S\B
- }
-
- template <typename CoordAccessor>
- void Miniball<CoordAccessor>::mtf_move_to_front (Sit j)
- {
- if (support_end == j)
- support_end++;
- L.splice (L.begin(), L, j);
- }
-
- template <typename CoordAccessor>
- void Miniball<CoordAccessor>::pivot_mb (Pit n)
- {
- // Algorithm 2: pivot_mb (L_{n-1}), where L_{n-1} = [L.begin, n)
- // --------------------------------------------------------------
- // from B. Gaertner, Fast and Robust Smallest Enclosing Balls, ESA 1999,
- // http://www.inf.ethz.ch/personal/gaertner/texts/own_work/esa99_final.pdf
- NT old_sqr_r;
- const NT* c;
- Pit pivot, k;
- NT e, max_e, sqr_r;
- Cit p;
- do {
- old_sqr_r = current_sqr_r;
- sqr_r = current_sqr_r;
-
- pivot = points_begin;
- max_e = nt0;
- for (k = points_begin; k != n; ++k) {
- p = coord_accessor(k);
- e = -sqr_r;
- c = current_c;
- for (int j=0; j<d; ++j)
- e += mb_sqr<NT>(*p++-*c++);
- if (e > max_e) {
- max_e = e;
- pivot = k;
- }
- }
-
- if (max_e > nt0) {
- // check if the pivot is already contained in the support set
- if (std::find(L.begin(), support_end, pivot) == support_end) {
- assert (fsize == 0);
- if (push (pivot)) {
- mtf_mb(support_end);
- pop();
- pivot_move_to_front(pivot);
- }
- }
- }
- } while (old_sqr_r < current_sqr_r);
- }
-
- template <typename CoordAccessor>
- void Miniball<CoordAccessor>::pivot_move_to_front (Pit j)
- {
- L.push_front(j);
- if (std::distance(L.begin(), support_end) == d+2)
- support_end--;
- }
-
- template <typename CoordAccessor>
- inline typename Miniball<CoordAccessor>::NT
- Miniball<CoordAccessor>::excess (Pit pit) const
- {
- Cit p = coord_accessor(pit);
- NT e = -current_sqr_r;
- NT* c = current_c;
- for (int k=0; k<d; ++k){
- e += mb_sqr<NT>(*p++-*c++);
- }
- return e;
- }
-
- template <typename CoordAccessor>
- void Miniball<CoordAccessor>::pop ()
- {
- --fsize;
- }
-
- template <typename CoordAccessor>
- bool Miniball<CoordAccessor>::push (Pit pit)
- {
- int i, j;
- NT eps = mb_sqr<NT>(std::numeric_limits<NT>::epsilon());
-
- Cit cit = coord_accessor(pit);
- Cit p = cit;
-
- if (fsize==0) {
- for (i=0; i<d; ++i)
- q0[i] = *p++;
- for (i=0; i<d; ++i)
- c[0][i] = q0[i];
- sqr_r[0] = nt0;
- }
- else {
- // set v_fsize to Q_fsize
- for (i=0; i<d; ++i)
- //v[fsize][i] = p[i]-q0[i];
- v[fsize][i] = *p++-q0[i];
-
- // compute the a_{fsize,i}, i< fsize
- for (i=1; i<fsize; ++i) {
- a[fsize][i] = nt0;
- for (j=0; j<d; ++j)
- a[fsize][i] += v[i][j] * v[fsize][j];
- a[fsize][i]*=(2/z[i]);
- }
-
- // update v_fsize to Q_fsize-\bar{Q}_fsize
- for (i=1; i<fsize; ++i) {
- for (j=0; j<d; ++j)
- v[fsize][j] -= a[fsize][i]*v[i][j];
- }
-
- // compute z_fsize
- z[fsize]=nt0;
- for (j=0; j<d; ++j)
- z[fsize] += mb_sqr<NT>(v[fsize][j]);
- z[fsize]*=2;
-
- // reject push if z_fsize too small
- if (z[fsize]<eps*current_sqr_r) {
- return false;
- }
-
- // update c, sqr_r
- p=cit;
- NT e = -sqr_r[fsize-1];
- for (i=0; i<d; ++i)
- e += mb_sqr<NT>(*p++-c[fsize-1][i]);
- f[fsize]=e/z[fsize];
-
- for (i=0; i<d; ++i)
- c[fsize][i] = c[fsize-1][i]+f[fsize]*v[fsize][i];
- sqr_r[fsize] = sqr_r[fsize-1] + e*f[fsize]/2;
- }
- current_c = c[fsize];
- current_sqr_r = sqr_r[fsize];
- ssize = ++fsize;
- return true;
- }
-
- template <typename CoordAccessor>
- typename Miniball<CoordAccessor>::NT
- Miniball<CoordAccessor>::suboptimality () const
- {
- NT* l = new NT[d+1];
- NT min_l = nt0;
- l[0] = NT(1);
- for (int i=ssize-1; i>0; --i) {
- l[i] = f[i];
- for (int k=ssize-1; k>i; --k)
- l[i]-=a[k][i]*l[k];
- if (l[i] < min_l) min_l = l[i];
- l[0] -= l[i];
- }
- if (l[0] < min_l) min_l = l[0];
- delete[] l;
- if (min_l < nt0)
- return -min_l;
- return nt0;
- }
-} // namespace Miniball
-
-} // namespace Gudhi
-
-#endif // MINIBALL_HPP_
diff --git a/src/Cech_complex/include/gudhi/Sphere_circumradius.h b/src/Cech_complex/include/gudhi/Sphere_circumradius.h
new file mode 100644
index 00000000..2f916c0a
--- /dev/null
+++ b/src/Cech_complex/include/gudhi/Sphere_circumradius.h
@@ -0,0 +1,78 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Hind Montassif
+ *
+ * Copyright (C) 2021 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef SPHERE_CIRCUMRADIUS_H_
+#define SPHERE_CIRCUMRADIUS_H_
+
+#include <CGAL/Epick_d.h> // for #include <CGAL/NT_converter.h> which is not working/compiling alone
+#include <CGAL/Lazy_exact_nt.h> // for CGAL::exact
+
+#include <cmath> // for std::sqrt
+#include <vector>
+
+namespace Gudhi {
+
+namespace cech_complex {
+
+/** \private @brief Compute the circumradius of the sphere passing through points given by a range of coordinates.
+ * The points are assumed to have the same dimension. */
+template<typename Kernel, typename Filtration_value>
+class Sphere_circumradius {
+ private:
+ Kernel kernel_;
+ const bool exact_;
+ public:
+ using FT = typename Kernel::FT;
+ using Point = typename Kernel::Point_d;
+ using Point_cloud = typename std::vector<Point>;
+
+ CGAL::NT_converter<FT, Filtration_value> cast_to_fv;
+
+ /** \brief Circumradius of sphere passing through two points using CGAL.
+ *
+ * @param[in] point_1
+ * @param[in] point_2
+ * @return Sphere circumradius passing through two points.
+ * \tparam Point must be a Kernel::Point_d from CGAL.
+ *
+ */
+ Filtration_value operator()(const Point& point_1, const Point& point_2) const {
+ auto squared_dist_obj = kernel_.squared_distance_d_object()(point_1, point_2);
+ if(exact_) CGAL::exact(squared_dist_obj);
+ return std::sqrt(cast_to_fv(squared_dist_obj)) / 2.;
+ }
+
+ /** \brief Circumradius of sphere passing through point cloud using CGAL.
+ *
+ * @param[in] point_cloud The points.
+ * @return Sphere circumradius passing through the points.
+ * \tparam Point_cloud must be a range of Kernel::Point_d points from CGAL.
+ *
+ */
+ Filtration_value operator()(const Point_cloud& point_cloud) const {
+ auto squared_radius_obj = kernel_.compute_squared_radius_d_object()(point_cloud.begin(), point_cloud.end());
+ if(exact_) CGAL::exact(squared_radius_obj);
+ return std::sqrt(cast_to_fv(squared_radius_obj));
+ }
+
+ /** \brief Constructor
+ * @param[in] exact Option for exact filtration values computation. Not exact if `Kernel` is not <a target="_blank"
+ * href="https://doc.cgal.org/latest/Kernel_d/structCGAL_1_1Epeck__d.html">CGAL::Epeck_d</a>.
+ * Default is false.
+ */
+ Sphere_circumradius(const bool exact = false) : exact_(exact) {}
+
+};
+
+} // namespace cech_complex
+
+} // namespace Gudhi
+
+#endif // SPHERE_CIRCUMRADIUS_H_
diff --git a/src/Cech_complex/test/CMakeLists.txt b/src/Cech_complex/test/CMakeLists.txt
index e6a2a18f..2d736f27 100644
--- a/src/Cech_complex/test/CMakeLists.txt
+++ b/src/Cech_complex/test/CMakeLists.txt
@@ -1,11 +1,14 @@
-include(GUDHI_boost_test)
+if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.0.1)
+ include(GUDHI_boost_test)
-add_executable ( Cech_complex_test_unit test_cech_complex.cpp )
-if (TBB_FOUND)
- target_link_libraries(Cech_complex_test_unit ${TBB_LIBRARIES})
-endif()
+ add_executable ( Cech_complex_test_unit test_cech_complex.cpp )
+ if (TBB_FOUND)
+ target_link_libraries(Cech_complex_test_unit ${TBB_LIBRARIES})
+ endif()
+
+ # Do not forget to copy test files in current binary dir
+ file(COPY "${CMAKE_SOURCE_DIR}/data/points/alphacomplexdoc.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
-# Do not forget to copy test files in current binary dir
-file(COPY "${CMAKE_SOURCE_DIR}/data/points/alphacomplexdoc.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
+ gudhi_add_boost_test(Cech_complex_test_unit)
-gudhi_add_boost_test(Cech_complex_test_unit)
+endif()
diff --git a/src/Cech_complex/test/test_cech_complex.cpp b/src/Cech_complex/test/test_cech_complex.cpp
index 6e00d7b5..f5980e6d 100644
--- a/src/Cech_complex/test/test_cech_complex.cpp
+++ b/src/Cech_complex/test/test_cech_complex.cpp
@@ -22,21 +22,20 @@
// to construct Cech_complex from a OFF file of points
#include <gudhi/Points_off_io.h>
#include <gudhi/Simplex_tree.h>
-#include <gudhi/distance_functions.h>
#include <gudhi/Unitary_tests_utils.h>
-#include <gudhi/Miniball.hpp>
+
+#include <CGAL/Epeck_d.h> // For EXACT or SAFE version
// Type definitions
using Simplex_tree = Gudhi::Simplex_tree<>;
using Filtration_value = Simplex_tree::Filtration_value;
-using Point = std::vector<Filtration_value>;
+using Kernel = CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>;
+using FT = typename Kernel::FT;
+using Point = typename Kernel::Point_d;
+
using Point_cloud = std::vector<Point>;
using Points_off_reader = Gudhi::Points_off_reader<Point>;
-using Cech_complex = Gudhi::cech_complex::Cech_complex<Simplex_tree, Point_cloud>;
-
-using Point_iterator = Point_cloud::const_iterator;
-using Coordinate_iterator = Point::const_iterator;
-using Min_sphere = Gudhi::Miniball::Miniball<Gudhi::Miniball::CoordAccessor<Point_iterator, Coordinate_iterator>>;
+using Cech_complex = Gudhi::cech_complex::Cech_complex<Kernel, Simplex_tree>;
BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) {
// ----------------------------------------------------------------------------
@@ -45,17 +44,29 @@ BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) {
//
// ----------------------------------------------------------------------------
Point_cloud points;
- points.push_back({1., 0.}); // 0
- points.push_back({0., 1.}); // 1
- points.push_back({2., 1.}); // 2
- points.push_back({3., 2.}); // 3
- points.push_back({0., 3.}); // 4
- points.push_back({3. + std::sqrt(3.), 3.}); // 5
- points.push_back({1., 4.}); // 6
- points.push_back({3., 4.}); // 7
- points.push_back({2., 4. + std::sqrt(3.)}); // 8
- points.push_back({0., 4.}); // 9
- points.push_back({-0.5, 2.}); // 10
+
+ std::vector<FT> point0({1., 0.});
+ points.emplace_back(point0.begin(), point0.end());
+ std::vector<FT> point1({0., 1.});
+ points.emplace_back(point1.begin(), point1.end());
+ std::vector<FT> point2({2., 1.});
+ points.emplace_back(point2.begin(), point2.end());
+ std::vector<FT> point3({3., 2.});
+ points.emplace_back(point3.begin(), point3.end());
+ std::vector<FT> point4({0., 3.});
+ points.emplace_back(point4.begin(), point4.end());
+ std::vector<FT> point5({3. + std::sqrt(3.), 3.});
+ points.emplace_back(point5.begin(), point5.end());
+ std::vector<FT> point6({1., 4.});
+ points.emplace_back(point6.begin(), point6.end());
+ std::vector<FT> point7({3., 4.});
+ points.emplace_back(point7.begin(), point7.end());
+ std::vector<FT> point8({2., 4. + std::sqrt(3.)});
+ points.emplace_back(point8.begin(), point8.end());
+ std::vector<FT> point9({0., 4.});
+ points.emplace_back(point9.begin(), point9.end());
+ std::vector<FT> point10({-0.5, 2.});
+ points.emplace_back(point10.begin(), point10.end());
Filtration_value max_radius = 1.0;
std::clog << "========== NUMBER OF POINTS = " << points.size() << " - Cech max_radius = " << max_radius
@@ -96,11 +107,11 @@ BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) {
std::clog << vertex << ",";
vp.push_back(points.at(vertex));
}
- std::clog << ") - distance =" << Gudhi::Minimal_enclosing_ball_radius()(vp.at(0), vp.at(1))
+ std::clog << ") - distance =" << Gudhi::cech_complex::Sphere_circumradius<Kernel, Filtration_value>()(vp.at(0), vp.at(1))
<< " - filtration =" << st.filtration(f_simplex) << std::endl;
BOOST_CHECK(vp.size() == 2);
GUDHI_TEST_FLOAT_EQUALITY_CHECK(st.filtration(f_simplex),
- Gudhi::Minimal_enclosing_ball_radius()(vp.at(0), vp.at(1)));
+ Gudhi::cech_complex::Sphere_circumradius<Kernel, Filtration_value>()(vp.at(0), vp.at(1)));
}
}
@@ -125,35 +136,34 @@ BOOST_AUTO_TEST_CASE(Cech_complex_for_documentation) {
for (std::size_t vertex = 0; vertex <= 2; vertex++) {
points012.push_back(cech_complex_for_doc.get_point(vertex));
}
- std::size_t dimension = points[0].end() - points[0].begin();
- Min_sphere ms012(dimension, points012.begin(), points012.end());
- Simplex_tree::Filtration_value f012 = st2.filtration(st2.find({0, 1, 2}));
- std::clog << "f012= " << f012 << " | ms012_radius= " << std::sqrt(ms012.squared_radius()) << std::endl;
+ Kernel kern;
+ Filtration_value f012 = st2.filtration(st2.find({0, 1, 2}));
+ std::clog << "f012= " << f012 << std::endl;
- GUDHI_TEST_FLOAT_EQUALITY_CHECK(f012, std::sqrt(ms012.squared_radius()));
+ CGAL::NT_converter<FT, Filtration_value> cast_to_fv;
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(f012, std::sqrt(cast_to_fv(kern.compute_squared_radius_d_object()(points012.begin(), points012.end()))));
Point_cloud points1410;
points1410.push_back(cech_complex_for_doc.get_point(1));
points1410.push_back(cech_complex_for_doc.get_point(4));
points1410.push_back(cech_complex_for_doc.get_point(10));
- Min_sphere ms1410(dimension, points1410.begin(), points1410.end());
- Simplex_tree::Filtration_value f1410 = st2.filtration(st2.find({1, 4, 10}));
- std::clog << "f1410= " << f1410 << " | ms1410_radius= " << std::sqrt(ms1410.squared_radius()) << std::endl;
+ Filtration_value f1410 = st2.filtration(st2.find({1, 4, 10}));
+ std::clog << "f1410= " << f1410 << std::endl;
- GUDHI_TEST_FLOAT_EQUALITY_CHECK(f1410, std::sqrt(ms1410.squared_radius()));
+ // In this case, the computed circumsphere using CGAL kernel does not match the minimal enclosing ball; the filtration value check is therefore done against a hardcoded value
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(f1410, 1.);
Point_cloud points469;
points469.push_back(cech_complex_for_doc.get_point(4));
points469.push_back(cech_complex_for_doc.get_point(6));
points469.push_back(cech_complex_for_doc.get_point(9));
- Min_sphere ms469(dimension, points469.begin(), points469.end());
- Simplex_tree::Filtration_value f469 = st2.filtration(st2.find({4, 6, 9}));
- std::clog << "f469= " << f469 << " | ms469_radius= " << std::sqrt(ms469.squared_radius()) << std::endl;
+ Filtration_value f469 = st2.filtration(st2.find({4, 6, 9}));
+ std::clog << "f469= " << f469 << std::endl;
- GUDHI_TEST_FLOAT_EQUALITY_CHECK(f469, std::sqrt(ms469.squared_radius()));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(f469, std::sqrt(cast_to_fv(kern.compute_squared_radius_d_object()(points469.begin(), points469.end()))));
BOOST_CHECK((st2.find({6, 7, 8}) == st2.null_simplex()));
BOOST_CHECK((st2.find({3, 5, 7}) == st2.null_simplex()));
@@ -235,7 +245,7 @@ BOOST_AUTO_TEST_CASE(Cech_create_complex_throw) {
//
// ----------------------------------------------------------------------------
std::string off_file_name("alphacomplexdoc.off");
- double max_radius = 12.0;
+ Filtration_value max_radius = 12.0;
std::clog << "========== OFF FILE NAME = " << off_file_name << " - Cech max_radius=" << max_radius
<< "==========" << std::endl;
diff --git a/src/Cech_complex/utilities/CMakeLists.txt b/src/Cech_complex/utilities/CMakeLists.txt
index b183c8d8..e80a698e 100644
--- a/src/Cech_complex/utilities/CMakeLists.txt
+++ b/src/Cech_complex/utilities/CMakeLists.txt
@@ -1,15 +1,17 @@
-project(Cech_complex_utilities)
+if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.0.1)
+ project(Cech_complex_utilities)
-if (TARGET Boost::program_options)
- add_executable(cech_persistence cech_persistence.cpp)
- target_link_libraries(cech_persistence Boost::program_options)
+ if (TARGET Boost::program_options)
+ add_executable(cech_persistence cech_persistence.cpp)
+ target_link_libraries(cech_persistence Boost::program_options)
- if (TBB_FOUND)
- target_link_libraries(cech_persistence ${TBB_LIBRARIES})
- endif()
+ if (TBB_FOUND)
+ target_link_libraries(cech_persistence ${TBB_LIBRARIES})
+ endif()
- add_test(NAME Cech_complex_utility_from_rips_on_tore_3D COMMAND $<TARGET_FILE:cech_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-r" "0.25" "-m" "0.5" "-d" "3" "-p" "3")
+ add_test(NAME Cech_complex_utility_from_rips_on_tore_3D COMMAND $<TARGET_FILE:cech_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-r" "0.25" "-m" "0.5" "-d" "3" "-p" "3")
- install(TARGETS cech_persistence DESTINATION bin)
-endif() \ No newline at end of file
+ install(TARGETS cech_persistence DESTINATION bin)
+ endif()
+endif()
diff --git a/src/Cech_complex/utilities/cech_persistence.cpp b/src/Cech_complex/utilities/cech_persistence.cpp
index daea08e2..75d10c0f 100644
--- a/src/Cech_complex/utilities/cech_persistence.cpp
+++ b/src/Cech_complex/utilities/cech_persistence.cpp
@@ -9,13 +9,14 @@
*/
#include <gudhi/Cech_complex.h>
-#include <gudhi/distance_functions.h>
#include <gudhi/Simplex_tree.h>
#include <gudhi/Persistent_cohomology.h>
#include <gudhi/Points_off_io.h>
#include <boost/program_options.hpp>
+#include <CGAL/Epeck_d.h> // For EXACT or SAFE version
+
#include <string>
#include <vector>
#include <limits> // infinity
@@ -23,10 +24,11 @@
// Types definition
using Simplex_tree = Gudhi::Simplex_tree<Gudhi::Simplex_tree_options_fast_persistence>;
using Filtration_value = Simplex_tree::Filtration_value;
-using Point = std::vector<double>;
-using Point_cloud = std::vector<Point>;
+
+using Kernel = CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>;
+using Point = typename Kernel::Point_d;
using Points_off_reader = Gudhi::Points_off_reader<Point>;
-using Cech_complex = Gudhi::cech_complex::Cech_complex<Simplex_tree, Point_cloud>;
+using Cech_complex = Gudhi::cech_complex::Cech_complex<Kernel, Simplex_tree>;
using Field_Zp = Gudhi::persistent_cohomology::Field_Zp;
using Persistent_cohomology = Gudhi::persistent_cohomology::Persistent_cohomology<Simplex_tree, Field_Zp>;
diff --git a/src/Collapse/doc/intro_edge_collapse.h b/src/Collapse/doc/intro_edge_collapse.h
index fde39707..12e909c8 100644
--- a/src/Collapse/doc/intro_edge_collapse.h
+++ b/src/Collapse/doc/intro_edge_collapse.h
@@ -17,68 +17,48 @@ namespace collapse {
/** \defgroup edge_collapse Edge collapse
*
- * \author Siddharth Pritam
+ * \author Siddharth Pritam and Marc Glisse
*
* @{
*
- * This module implements edge collapse of a filtered flag complex, in particular it reduces a filtration of
- * Vietoris-Rips complex from its graph to another smaller flag filtration with same persistence.
- * Where a filtration is a sequence of simplicial (here Rips) complexes connected with inclusions.
+ * This module implements edge collapse of a filtered flag complex as described in \cite edgecollapsearxiv, in
+ * particular it reduces a filtration of Vietoris-Rips complex represented by a graph to a smaller flag filtration with
+ * the same persistent homology.
*
* \section edge_collapse_definition Edge collapse definition
*
* An edge \f$e\f$ in a simplicial complex \f$K\f$ is called a <b>dominated edge</b> if the link of \f$e\f$ in
* \f$K\f$, \f$lk_K(e)\f$ is a simplicial cone, that is, there exists a vertex \f$v^{\prime} \notin e\f$ and a
- * subcomplex \f$L\f$ in \f$K\f$, such that \f$lk_K(e) = v^{\prime}L\f$. We say that the vertex \f$v^{\prime}\f$ is
- * {dominating} \f$e\f$ and \f$e\f$ is {dominated} by \f$v^{\prime}\f$.
- * An <b> elementary egde collapse </b> is the removal of a dominated edge \f$e\f$ from \f$K\f$,
- * which we denote with \f$K\f$ \f${\searrow\searrow}^1 \f$ \f$K\setminus e\f$.
- * The symbol \f$\mathbf{K\setminus e}\f$ (deletion of \f$e\f$ from \f$K\f$) refers to the subcomplex of \f$K\f$ which
- * has all simplices of \f$K\f$ except \f$e\f$ and the ones containing \f$e\f$.
- * There is an <b>edge collapse</b> from a simplicial complex \f$K\f$ to its subcomplex \f$L\f$,
- * if there exists a series of elementary edge collapses from \f$K\f$ to \f$L\f$, denoted as \f$K\f$
- * \f${\searrow\searrow}\f$ \f$L\f$.
- *
- * An edge collapse is a homotopy preserving operation, and it can be further expressed as sequence of the classical
- * elementary simple collapse.
- * A complex without any dominated edge is called a \f$1\f$- minimal complex and the core \f$K^1\f$ of simplicial
- * complex is a minimal complex such that \f$K\f$ \f${\searrow\searrow}\f$ \f$K^1\f$.
- * Computation of a core (not unique) involves computation of dominated edges and the dominated edges can be easily
- * characterized as follows:
+ * subcomplex \f$L\f$ in \f$K\f$, such that \f$lk_K(e) = v^{\prime}L\f$. We say that the vertex \f$v^{\prime}\f$
+ * \e dominates \f$e\f$ and \f$e\f$ is \e dominated by \f$v^{\prime}\f$.
+ * An <b> elementary edge collapse </b> is the removal of a dominated edge \f$e\f$ from \f$K\f$ (the cofaces of \f$e\f$
+ * are implicitly removed as well).
+ * Domination is used as a simple sufficient condition that ensures that this removal is a homotopy preserving
+ * operation.
+ *
+ * The dominated edges can be easily characterized as follows:
*
- * -- For general simplicial complex: An edge \f$e \in K\f$ is dominated by another vertex \f$v^{\prime} \in K\f$,
- * <i>if and only if</i> all the maximal simplices of \f$K\f$ that contain \f$e\f$ also contain \f$v^{\prime}\f$
+ * -- For a general simplicial complex: an edge \f$e \in K\f$ is dominated by another vertex \f$v^{\prime} \in K\f$,
+ * if and only if all the maximal simplices of \f$K\f$ that contain \f$e\f$ also contain \f$v^{\prime}\f$.
*
- * -- For a flag complex: An edge \f$e \in K\f$ is dominated by another vertex \f$v^{\prime} \in K\f$, <i>if and only
- * if</i> all the vertices in \f$K\f$ that has an edge with both vertices of \f$e\f$ also has an edge with
- * \f$v^{\prime}\f$.
+ * -- For a flag complex: an edge \f$e \in K\f$ is dominated by another vertex \f$v^{\prime} \in K\f$, if and only
+ * if all the vertices in \f$K\f$ that have an edge with both vertices of \f$e\f$ also have an edge with
+ * \f$v^{\prime}\f$. Notice that this only depends on the graph.
*
- * The algorithm to compute the smaller induced filtration is described in Section 5 \cite edgecollapsesocg2020.
- * Edge collapse can be successfully employed to reduce any given filtration of flag complexes to a smaller induced
+ * In the context of a filtration, an edge collapse may translate into an increase of the filtration value of an edge,
+ * or its removal if it already had the largest filtration value.
+ * The algorithm to compute the smaller induced filtration is described in \cite edgecollapsearxiv.
+ * Edge collapse can be successfully employed to reduce any input filtration of flag complexes to a smaller induced
* filtration which preserves the persistent homology of the original filtration and is a flag complex as well.
*
- * The general idea is that we consider edges in the filtered graph and sort them according to their filtration value
- * giving them a total order.
- * Each edge gets a unique index denoted as \f$i\f$ in this order. To reduce the filtration, we move forward with
- * increasing filtration value
- * in the graph and check if the current edge \f$e_i\f$ is dominated in the current graph \f$G_i := \{e_1, .. e_i\} \f$
- * or not.
- * If the edge \f$e_i\f$ is dominated we remove it from the filtration and move forward to the next edge \f$e_{i+1}\f$.
- * If \f$e_i\f$ is non-dominated then we keep it in the reduced filtration and then go backward in the current graph
- * \f$G_i\f$ to look for new non-dominated edges that was dominated before but might become non-dominated at this
- * point.
- * If an edge \f$e_j, j < i \f$ during the backward search is found to be non-dominated, we include \f$e_j\f$ in to the
- * reduced filtration and we set its new filtration value to be \f$i\f$ that is the index of \f$e_i\f$.
- * The precise mechanism for this reduction has been described in Section 5 \cite edgecollapsesocg2020.
- * Here we implement this mechanism for a filtration of Rips complex.
- * After perfoming the reduction the filtration reduces to a flag-filtration with the same persistence as the original
- * filtration.
- *
+ * The algorithm implemented here does not produce a minimal filtration. Taking its output and applying the algorithm a
+ * second time may further simplify the filtration.
+ *
* \subsection edgecollapseexample Basic edge collapse
*
* This example calls `Gudhi::collapse::flag_complex_collapse_edges()` from a proximity graph represented as a list of
* `Filtered_edge`.
- * Then it collapses edges and displays a new list of `Filtered_edge` (with less edges)
+ * Then it collapses edges and displays a new list of `Filtered_edge` (with fewer edges)
* that will preserve the persistence homology computation.
*
* \include edge_collapse_basic_example.cpp
@@ -88,7 +68,7 @@ namespace collapse {
* \code $> ./Edge_collapse_example_basic
* \endcode
*
- * the program output is:
+ * the program output could be:
*
* \include edge_collapse_example_basic.txt
*/
diff --git a/src/Collapse/example/edge_collapse_conserve_persistence.cpp b/src/Collapse/example/edge_collapse_conserve_persistence.cpp
index b2c55e7a..19960597 100644
--- a/src/Collapse/example/edge_collapse_conserve_persistence.cpp
+++ b/src/Collapse/example/edge_collapse_conserve_persistence.cpp
@@ -103,7 +103,7 @@ int main(int argc, char* argv[]) {
Gudhi::Euclidean_distance());
if (num_edges(proximity_graph) <= 0) {
- std::cerr << "Total number of egdes are zero." << std::endl;
+ std::cerr << "Total number of edges is zero." << std::endl;
exit(-1);
}
diff --git a/src/Collapse/include/gudhi/Flag_complex_edge_collapser.h b/src/Collapse/include/gudhi/Flag_complex_edge_collapser.h
index 713c6608..d0b3fe4a 100644
--- a/src/Collapse/include/gudhi/Flag_complex_edge_collapser.h
+++ b/src/Collapse/include/gudhi/Flag_complex_edge_collapser.h
@@ -1,11 +1,12 @@
/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
* See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- * Author(s): Siddharth Pritam
+ * Author(s): Siddharth Pritam, Marc Glisse
*
* Copyright (C) 2020 Inria
*
* Modification(s):
* - 2020/03 Vincent Rouvreau: integration to the gudhi library
+ * - 2021 Marc Glisse: complete rewrite
* - YYYY/MM Author: Description of the modification
*/
@@ -14,367 +15,319 @@
#include <gudhi/Debug_utils.h>
-#include <boost/functional/hash.hpp>
-#include <boost/iterator/iterator_facade.hpp>
-
-#include <Eigen/Sparse>
-#include <Eigen/src/Core/util/Macros.h> // for EIGEN_VERSION_AT_LEAST
+#include <boost/container/flat_map.hpp>
+#include <boost/container/flat_set.hpp>
#ifdef GUDHI_USE_TBB
#include <tbb/parallel_sort.h>
#endif
-#include <iostream>
-#include <utility> // for std::pair
+#include <utility>
#include <vector>
-#include <unordered_map>
-#include <unordered_set>
-#include <set>
-#include <tuple> // for std::tie
-#include <algorithm> // for std::includes
-#include <iterator> // for std::inserter
-#include <type_traits> // for std::decay
-
-// Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10
-#if !EIGEN_VERSION_AT_LEAST(3,1,0)
-# error Edge Collapse is only available for Eigen3 >= 3.1.0
-#endif
+#include <tuple>
+#include <algorithm>
+#include <limits>
namespace Gudhi {
namespace collapse {
/** \private
- *
- * \brief Flag complex sparse matrix data structure.
*
- * \details
- * This class stores a <a target="_blank" href="https://en.wikipedia.org/wiki/Clique_complex">Flag complex</a>
- * in an <a target="_blank" href="https://eigen.tuxfamily.org/dox/group__TutorialSparse.html">Eigen sparse matrix</a>.
+ * \brief Flag complex sparse matrix data structure.
*
- * \tparam Vertex type must be a signed integer type. It admits a total order <.
- * \tparam Filtration type for the value of the filtration function. Must be comparable with <.
+ * \tparam Vertex type must be an integer type.
+ * \tparam Filtration type for the value of the filtration function.
*/
-template<typename Vertex, typename Filtration>
-class Flag_complex_edge_collapser {
- public:
- /** \brief Re-define Vertex as Vertex_handle type to ease the interface with `Gudhi::Proximity_graph`. */
- using Vertex_handle = Vertex;
- /** \brief Re-define Filtration as Filtration_value type to ease the interface with `Gudhi::Proximity_graph`. */
- using Filtration_value = Filtration;
-
- private:
- // internal numbering of vertices and edges
- using IVertex = std::size_t;
- using Edge_index = std::size_t;
- using IEdge = std::pair<IVertex, IVertex>;
-
- // The sparse matrix data type
- // (Eigen::SparseMatrix<Edge_index, Eigen::RowMajor> has slow insertions)
- using Sparse_vector = Eigen::SparseVector<Edge_index>;
- using Sparse_row_matrix = std::vector<Sparse_vector>;
-
- // Range of neighbors of a vertex
- template<bool closed>
- struct Neighbours {
- class iterator : public boost::iterator_facade<iterator,
- IVertex, /* value_type */
- std::input_iterator_tag, // or boost::single_pass_traversal_tag
- IVertex /* reference */ >
- {
- public:
- iterator():ptr(nullptr){}
- iterator(Neighbours const*p):ptr(p){find_valid();}
- private:
- friend class boost::iterator_core_access;
- Neighbours const*ptr;
- void increment(){
- ++ptr->it;
- find_valid();
- }
- void find_valid(){
- auto& it = ptr->it;
- do {
- if(!it) { ptr=nullptr; break; }
- if(IVertex(it.index()) == ptr->u) {
- if(closed) break;
- else continue;
- }
- Edge_index e = it.value();
- if(e <= ptr->ec->current_backward || ptr->ec->critical_edge_indicator_[e]) break;
- } while(++it, true);
- }
- bool equal(iterator const& other) const { return ptr == other.ptr; }
- IVertex dereference() const { return ptr->it.index(); }
- };
- typedef iterator const_iterator;
- mutable typename Sparse_vector::InnerIterator it;
- Flag_complex_edge_collapser const*ec;
- IVertex u;
- iterator begin() const { return this; }
- iterator end() const { return {}; }
- explicit Neighbours(Flag_complex_edge_collapser const*p,IVertex u):it(p->sparse_row_adjacency_matrix_[u]),ec(p),u(u){}
- };
-
- // A range of row indices
- using IVertex_vector = std::vector<IVertex>;
-
- public:
- /** \brief Filtered_edge is a type to store an edge with its filtration value. */
- using Filtered_edge = std::tuple<Vertex_handle, Vertex_handle, Filtration_value>;
-
- private:
- // Map from row index to its vertex handle
- std::vector<Vertex_handle> row_to_vertex_;
-
- // Index of the current edge in the backwards walk. Edges <= current_backward are part of the temporary graph,
- // while edges > current_backward are removed unless critical_edge_indicator_.
- Edge_index current_backward = -1;
-
- // Map from IEdge to its index
- std::unordered_map<IEdge, Edge_index, boost::hash<IEdge>> iedge_to_index_map_;
-
- // Boolean vector to indicate if the edge is critical.
- std::vector<bool> critical_edge_indicator_;
-
- // Map from vertex handle to its row index
- std::unordered_map<Vertex_handle, IVertex> vertex_to_row_;
-
- // Stores the Sparse matrix of Filtration values representing the original graph.
- // The matrix rows and columns are indexed by IVertex.
- Sparse_row_matrix sparse_row_adjacency_matrix_;
-
- // The input, a vector of filtered edges.
- std::vector<Filtered_edge> f_edge_vector_;
-
- // Edge is the actual edge (u,v), with Vertex_handle u and v, not IVertex.
- bool edge_is_dominated(Vertex_handle u, Vertex_handle v) const
- {
- const IVertex rw_u = vertex_to_row_.at(u);
- const IVertex rw_v = vertex_to_row_.at(v);
-#ifdef DEBUG_TRACES
- std::cout << "The edge {" << u << ", " << v << "} is going for domination check." << std::endl;
-#endif // DEBUG_TRACES
- auto common_neighbours = open_common_neighbours_row_index(rw_u, rw_v);
-#ifdef DEBUG_TRACES
- std::cout << "And its common neighbours are." << std::endl;
- for (auto neighbour : common_neighbours) {
- std::cout << row_to_vertex_[neighbour] << ", " ;
- }
- std::cout<< std::endl;
-#endif // DEBUG_TRACES
- if (common_neighbours.size() == 1)
- return true;
- else
- for (auto rw_c : common_neighbours) {
- auto neighbours_c = neighbours_row_index<true>(rw_c);
- // If neighbours_c contains the common neighbours.
- if (std::includes(neighbours_c.begin(), neighbours_c.end(),
- common_neighbours.begin(), common_neighbours.end()))
- return true;
- }
- return false;
+template<typename Vertex, typename Filtration_value>
+struct Flag_complex_edge_collapser {
+ using Filtered_edge = std::tuple<Vertex, Vertex, Filtration_value>;
+ typedef std::pair<Vertex,Vertex> Edge;
+ struct Cmpi { template<class T, class U> bool operator()(T const&a, U const&b)const{return b<a; } };
+ typedef boost::container::flat_map<Vertex, Filtration_value> Ngb_list;
+ typedef std::vector<Ngb_list> Neighbors;
+ Neighbors neighbors; // closed neighborhood
+ std::size_t num_vertices;
+ std::vector<std::tuple<Vertex, Vertex, Filtration_value>> res;
+
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ // Minimal matrix interface
+ // Using this matrix generally helps performance, but the memory use may be excessive for a very sparse graph
+ // (and in extreme cases the constant initialization of the matrix may start to dominate the running time).
+ // Are there cases where the matrix is too big but a hash table would help?
+ std::vector<Filtration_value> neighbors_data;
+ void init_neighbors_dense(){
+ neighbors_data.clear();
+ neighbors_data.resize(num_vertices*num_vertices, std::numeric_limits<Filtration_value>::infinity());
}
+ Filtration_value& neighbors_dense(Vertex i, Vertex j){return neighbors_data[num_vertices*j+i];}
+#endif
- // Returns the edges connecting u and v (extremities of crit) to their common neighbors (not themselves)
- std::set<Edge_index> three_clique_indices(Edge_index crit) {
- std::set<Edge_index> edge_indices;
-
- Vertex_handle u = std::get<0>(f_edge_vector_[crit]);
- Vertex_handle v = std::get<1>(f_edge_vector_[crit]);
-
-#ifdef DEBUG_TRACES
- std::cout << "The current critical edge to re-check criticality with filt value is : f {" << u << "," << v
- << "} = " << std::get<2>(f_edge_vector_[crit]) << std::endl;
-#endif // DEBUG_TRACES
- auto rw_u = vertex_to_row_[u];
- auto rw_v = vertex_to_row_[v];
-
- IVertex_vector common_neighbours = open_common_neighbours_row_index(rw_u, rw_v);
+ // This does not touch the events list, only the adjacency matrix(es)
+ void delay_neighbor(Vertex u, Vertex v, Filtration_value f) {
+ neighbors[u][v]=f;
+ neighbors[v][u]=f;
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ neighbors_dense(u,v)=f;
+ neighbors_dense(v,u)=f;
+#endif
+ }
+ void remove_neighbor(Vertex u, Vertex v) {
+ neighbors[u].erase(v);
+ neighbors[v].erase(u);
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ neighbors_dense(u,v)=std::numeric_limits<Filtration_value>::infinity();
+ neighbors_dense(v,u)=std::numeric_limits<Filtration_value>::infinity();
+#endif
+ }
- for (auto rw_c : common_neighbours) {
- IEdge e_with_new_nbhr_v = std::minmax(rw_u, rw_c);
- IEdge e_with_new_nbhr_u = std::minmax(rw_v, rw_c);
- edge_indices.emplace(iedge_to_index_map_[e_with_new_nbhr_v]);
- edge_indices.emplace(iedge_to_index_map_[e_with_new_nbhr_u]);
+ template<class FilteredEdgeRange>
+ void read_edges(FilteredEdgeRange const&r){
+ neighbors.resize(num_vertices);
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ init_neighbors_dense();
+#endif
+ // Use the raw sequence to avoid maintaining the order
+ std::vector<typename Ngb_list::sequence_type> neighbors_seq(num_vertices);
+ for(auto&&e : r){
+ using std::get;
+ Vertex u = get<0>(e);
+ Vertex v = get<1>(e);
+ Filtration_value f = get<2>(e);
+ neighbors_seq[u].emplace_back(v, f);
+ neighbors_seq[v].emplace_back(u, f);
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ neighbors_dense(u,v)=f;
+ neighbors_dense(v,u)=f;
+#endif
+ }
+ for(std::size_t i=0;i<neighbors_seq.size();++i){
+ neighbors_seq[i].emplace_back(i, -std::numeric_limits<Filtration_value>::infinity());
+ neighbors[i].adopt_sequence(std::move(neighbors_seq[i])); // calls sort
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ neighbors_dense(i,i)=-std::numeric_limits<Filtration_value>::infinity();
+#endif
}
- return edge_indices;
}
- // Detect and set all edges that are becoming critical
- template<typename FilteredEdgeOutput>
- void set_edge_critical(Edge_index indx, Filtration_value filt, FilteredEdgeOutput filtered_edge_output) {
-#ifdef DEBUG_TRACES
- std::cout << "The curent index with filtration value " << indx << ", " << filt << " is primary critical" <<
- std::endl;
-#endif // DEBUG_TRACES
- std::set<Edge_index> effected_indices = three_clique_indices(indx);
- // Cannot use boost::adaptors::reverse in such dynamic cases apparently
- for (auto it = effected_indices.rbegin(); it != effected_indices.rend(); ++it) {
- current_backward = *it;
- Vertex_handle u = std::get<0>(f_edge_vector_[current_backward]);
- Vertex_handle v = std::get<1>(f_edge_vector_[current_backward]);
- // If current_backward is not critical so it should be processed, otherwise it stays in the graph
- if (!critical_edge_indicator_[current_backward]) {
- if (!edge_is_dominated(u, v)) {
-#ifdef DEBUG_TRACES
- std::cout << "The curent index became critical " << current_backward << std::endl;
-#endif // DEBUG_TRACES
- critical_edge_indicator_[current_backward] = true;
- filtered_edge_output(u, v, filt);
- std::set<Edge_index> inner_effected_indcs = three_clique_indices(current_backward);
- for (auto inr_idx : inner_effected_indcs) {
- if(inr_idx < current_backward) // && !critical_edge_indicator_[inr_idx]
- effected_indices.emplace(inr_idx);
- }
-#ifdef DEBUG_TRACES
- std::cout << "The following edge is critical with filt value: {" << u << "," << v << "}; "
- << filt << std::endl;
-#endif // DEBUG_TRACES
- }
+ // Open neighborhood
+ // At some point it helped gcc to add __attribute__((noinline)) here, otherwise we had +50% on the running time
+ // on one example. It looks ok now, or I forgot which example that was.
+ void common_neighbors(boost::container::flat_set<Vertex>& e_ngb,
+ std::vector<std::pair<Filtration_value, Vertex>>& e_ngb_later,
+ Vertex u, Vertex v, Filtration_value f_event){
+ // Using neighbors_dense here seems to hurt, even if we loop on the smaller of nu and nv.
+ Ngb_list const&nu = neighbors[u];
+ Ngb_list const&nv = neighbors[v];
+ auto ui = nu.begin();
+ auto ue = nu.end();
+ auto vi = nv.begin();
+ auto ve = nv.end();
+ assert(ui != ue && vi != ve);
+ while(ui != ue && vi != ve){
+ Vertex w = ui->first;
+ if(w < vi->first) { ++ui; continue; }
+ if(w > vi->first) { ++vi; continue; }
+ // nu and nv are closed, so we need to exclude e here.
+ if(w != u && w != v) {
+ Filtration_value f = std::max(ui->second, vi->second);
+ if(f > f_event)
+ e_ngb_later.emplace_back(f, w);
+ else
+ e_ngb.insert(e_ngb.end(), w);
}
+ ++ui; ++vi;
}
- // Clear the implicit "removed from graph" data structure
- current_backward = -1;
}
- // Returns list of neighbors of a particular vertex.
- template<bool closed>
- auto neighbours_row_index(IVertex rw_u) const
- {
- return Neighbours<closed>(this, rw_u);
+ // Test if the neighborhood of e is included in the closed neighborhood of c
+ template<class Ngb>
+ bool is_dominated_by(Ngb const& e_ngb, Vertex c, Filtration_value f){
+ // The best strategy probably depends on the distribution, how sparse / dense the adjacency matrix is,
+ // how (un)balanced the sizes of e_ngb and nc are.
+ // Some efficient operations on sets work best with bitsets, although the need for a map complicates things.
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ for(auto v : e_ngb) {
+ // if(v==c)continue;
+ if(neighbors_dense(v,c) > f) return false;
+ }
+ return true;
+#else
+ auto&&nc = neighbors[c];
+ // if few neighbors, use dichotomy? Seems slower.
+ // I tried storing a copy of neighbors as a vector<absl::flat_hash_map> and using it for nc, but it was
+ // a bit slower here. It did help with neighbors[dominator].find(w) in the main function though,
+ // sometimes enough, sometimes not.
+ auto ci = nc.begin();
+ auto ce = nc.end();
+ auto eni = e_ngb.begin();
+ auto ene = e_ngb.end();
+ assert(eni != ene);
+ assert(ci != ce);
+ // if(*eni == c && ++eni == ene) return true;
+ for(;;){
+ Vertex ve = *eni;
+ Vertex vc = ci->first;
+ while(ve > vc) {
+ // try a gallop strategy (exponential search)? Seems slower
+ if(++ci == ce) return false;
+ vc = ci->first;
+ }
+ if(ve < vc) return false;
+ // ve == vc
+ if(ci->second > f) return false;
+ if(++eni == ene)return true;
+ // If we stored an open neighborhood of c (excluding c), we would need to test for c here and before the loop
+ // if(*eni == c && ++eni == ene)return true;
+ if(++ci == ce) return false;
+ }
+#endif
}
- // Returns the list of open neighbours of the edge :{u,v}.
- IVertex_vector open_common_neighbours_row_index(IVertex rw_u, IVertex rw_v) const
- {
- auto non_zero_indices_u = neighbours_row_index<false>(rw_u);
- auto non_zero_indices_v = neighbours_row_index<false>(rw_v);
- IVertex_vector common;
- std::set_intersection(non_zero_indices_u.begin(), non_zero_indices_u.end(), non_zero_indices_v.begin(),
- non_zero_indices_v.end(), std::back_inserter(common));
-
- return common;
- }
+ template<class FilteredEdgeRange, class Delay>
+ void process_edges(FilteredEdgeRange const& edges, Delay&& delay) {
+ {
+ Vertex maxi = 0, maxj = 0;
+ for(auto& fe : edges) {
+ Vertex i = std::get<0>(fe);
+ Vertex j = std::get<1>(fe);
+ if (i > maxi) maxi = i;
+ if (j > maxj) maxj = j;
+ }
+ num_vertices = std::max(maxi, maxj) + 1;
+ }
- // Insert a vertex in the data structure
- IVertex insert_vertex(Vertex_handle vertex) {
- auto n = row_to_vertex_.size();
- auto result = vertex_to_row_.emplace(vertex, n);
- // If it was not already inserted - Value won't be updated by emplace if it is already present
- if (result.second) {
- // Expand the matrix. The size of rows is irrelevant.
- sparse_row_adjacency_matrix_.emplace_back((std::numeric_limits<Eigen::Index>::max)());
- // Initializing the diagonal element of the adjency matrix corresponding to rw_b.
- sparse_row_adjacency_matrix_[n].insert(n) = -1; // not an edge
- // Must be done after reading its size()
- row_to_vertex_.push_back(vertex);
+ read_edges(edges);
+
+ boost::container::flat_set<Vertex> e_ngb;
+ e_ngb.reserve(num_vertices);
+ std::vector<std::pair<Filtration_value, Vertex>> e_ngb_later;
+ for(auto&e:edges) {
+ {
+ Vertex u = std::get<0>(e);
+ Vertex v = std::get<1>(e);
+ Filtration_value input_time = std::get<2>(e);
+ auto time = delay(input_time);
+ auto start_time = time;
+ e_ngb.clear();
+ e_ngb_later.clear();
+ common_neighbors(e_ngb, e_ngb_later, u, v, time);
+ // If we identify a good candidate (the first common neighbor) for being a dominator of e until infinity,
+ // we could check that a bit more cheaply. It does not seem to help though.
+ auto cmp1=[](auto const&a, auto const&b){return a.first > b.first;};
+ auto e_ngb_later_begin=e_ngb_later.begin();
+ auto e_ngb_later_end=e_ngb_later.end();
+ bool heapified = false;
+
+ bool dead = false;
+ while(true) {
+ Vertex dominator = -1;
+ // special case for size 1
+ // if(e_ngb.size()==1){dominator=*e_ngb.begin();}else
+ // It is tempting to test the dominators in increasing order of filtration value, which is likely to reduce
+ // the number of calls to is_dominated_by before finding a dominator, but sorting, even partially / lazily,
+ // is very expensive.
+ for(auto c : e_ngb){
+ if(is_dominated_by(e_ngb, c, time)){
+ dominator = c;
+ break;
+ }
+ }
+ if(dominator==-1) break;
+ // Push as long as dominator remains a dominator.
+ // Iterate on times where at least one neighbor appears.
+ for (bool still_dominated = true; still_dominated; ) {
+ if(e_ngb_later_begin == e_ngb_later_end) {
+ dead = true; goto end_move;
+ }
+ if(!heapified) {
+ // Eagerly sorting can be slow
+ std::make_heap(e_ngb_later_begin, e_ngb_later_end, cmp1);
+ heapified=true;
+ }
+ time = e_ngb_later_begin->first; // first place it may become critical
+ // Update the neighborhood for this new time, while checking if any of the new neighbors break domination.
+ while (e_ngb_later_begin != e_ngb_later_end && e_ngb_later_begin->first <= time) {
+ Vertex w = e_ngb_later_begin->second;
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ if (neighbors_dense(dominator,w) > e_ngb_later_begin->first)
+ still_dominated = false;
+#else
+ auto& ngb_dom = neighbors[dominator];
+ auto wit = ngb_dom.find(w); // neighborhood may be open or closed, it does not matter
+ if (wit == ngb_dom.end() || wit->second > e_ngb_later_begin->first)
+ still_dominated = false;
+#endif
+ e_ngb.insert(w);
+ std::pop_heap(e_ngb_later_begin, e_ngb_later_end--, cmp1);
+ }
+ } // this doesn't seem to help that much...
+ }
+end_move:
+ if(dead) {
+ remove_neighbor(u, v);
+ } else if(start_time != time) {
+ delay_neighbor(u, v, time);
+ res.emplace_back(u, v, time);
+ } else {
+ res.emplace_back(u, v, input_time);
+ }
+ }
}
- return result.first->second;
}
- // Insert an edge in the data structure
- // @exception std::invalid_argument In debug mode, if u == v
- IEdge insert_new_edge(Vertex_handle u, Vertex_handle v, Edge_index idx)
- {
- GUDHI_CHECK((u != v), std::invalid_argument("Flag_complex_edge_collapser::insert_new_edge with u == v"));
- // The edge must not be added before, it should be a new edge.
- IVertex rw_u = insert_vertex(u);
- IVertex rw_v = insert_vertex(v);
-#ifdef DEBUG_TRACES
- std::cout << "Inserting the edge " << u <<", " << v << std::endl;
-#endif // DEBUG_TRACES
- sparse_row_adjacency_matrix_[rw_u].insert(rw_v) = idx;
- sparse_row_adjacency_matrix_[rw_v].insert(rw_u) = idx;
- return std::minmax(rw_u, rw_v);
+ std::vector<Filtered_edge> output() {
+ return std::move(res);
}
- public:
- /** \brief Flag_complex_edge_collapser constructor from a range of filtered edges.
- *
- * @param[in] edges Range of Filtered edges range.There is no need the range to be sorted, as it will be performed in
- * `Flag_complex_edge_collapser::process_edges`.
- *
- * \tparam FilteredEdgeRange must be a range for which std::begin and std::end return iterators on a
- * `Flag_complex_edge_collapser::Filtered_edge`.
- */
- template<typename FilteredEdgeRange>
- Flag_complex_edge_collapser(const FilteredEdgeRange& edges)
- : f_edge_vector_(std::begin(edges), std::end(edges)) { }
+};
- /** \brief Performs edge collapse in a increasing sequence of the filtration value.
- *
- * \tparam filtered_edge_output is a functor that is called on the output edges, in non-decreasing order of
- * filtration, as filtered_edge_output(u, v, f) where u and v are Vertex_handle representing the extremities of the
- * edge, and f is its new Filtration_value.
- */
- template<typename FilteredEdgeOutput>
- void process_edges(FilteredEdgeOutput filtered_edge_output) {
- // Sort edges
- auto sort_by_filtration = [](const Filtered_edge& edge_a, const Filtered_edge& edge_b) -> bool
- {
- return (std::get<2>(edge_a) < std::get<2>(edge_b));
- };
+template<class R> R to_range(R&& r) { return std::move(r); }
+template<class R, class T> R to_range(T&& t) { R r; r.insert(r.end(), t.begin(), t.end()); return r; }
+template<class FilteredEdgeRange, class Delay>
+auto flag_complex_collapse_edges(FilteredEdgeRange&& edges, Delay&&delay) {
+ // Would it help to label the points according to some spatial sorting?
+ auto first_edge_itr = std::begin(edges);
+ using Vertex = std::decay_t<decltype(std::get<0>(*first_edge_itr))>;
+ using Filtration_value = std::decay_t<decltype(std::get<2>(*first_edge_itr))>;
+ using Edge_collapser = Flag_complex_edge_collapser<Vertex, Filtration_value>;
+ if (first_edge_itr != std::end(edges)) {
+ auto edges2 = to_range<std::vector<typename Edge_collapser::Filtered_edge>>(std::forward<FilteredEdgeRange>(edges));
#ifdef GUDHI_USE_TBB
- tbb::parallel_sort(f_edge_vector_.begin(), f_edge_vector_.end(), sort_by_filtration);
+ // I think this sorting is always negligible compared to the collapse, but parallelizing it shouldn't hurt.
+ tbb::parallel_sort(edges2.begin(), edges2.end(),
+ [](auto const&a, auto const&b){return std::get<2>(a)>std::get<2>(b);});
#else
- std::sort(f_edge_vector_.begin(), f_edge_vector_.end(), sort_by_filtration);
+ std::sort(edges2.begin(), edges2.end(), [](auto const&a, auto const&b){return std::get<2>(a)>std::get<2>(b);});
#endif
-
- for (Edge_index endIdx = 0; endIdx < f_edge_vector_.size(); endIdx++) {
- Filtered_edge fec = f_edge_vector_[endIdx];
- Vertex_handle u = std::get<0>(fec);
- Vertex_handle v = std::get<1>(fec);
- Filtration_value filt = std::get<2>(fec);
-
- // Inserts the edge in the sparse matrix to update the graph (G_i)
- IEdge ie = insert_new_edge(u, v, endIdx);
-
- iedge_to_index_map_.emplace(ie, endIdx);
- critical_edge_indicator_.push_back(false);
-
- if (!edge_is_dominated(u, v)) {
- critical_edge_indicator_[endIdx] = true;
- filtered_edge_output(u, v, filt);
- if (endIdx > 1)
- set_edge_critical(endIdx, filt, filtered_edge_output);
- }
- }
+ Edge_collapser edge_collapser;
+ edge_collapser.process_edges(edges2, std::forward<Delay>(delay));
+ return edge_collapser.output();
}
-
-};
+ return std::vector<typename Edge_collapser::Filtered_edge>();
+}
/** \brief Implicitly constructs a flag complex from edges as an input, collapses edges while preserving the persistent
- * homology and returns the remaining edges as a range.
+ * homology and returns the remaining edges as a range. The filtration value of vertices is irrelevant to this function.
*
- * \param[in] edges Range of Filtered edges.There is no need the range to be sorted, as it will be performed.
+ * \param[in] edges Range of Filtered edges. There is no need for the range to be sorted, as it will be done internally.
*
- * \tparam FilteredEdgeRange furnishes `std::begin` and `std::end` methods and returns an iterator on a
- * FilteredEdge of type `std::tuple<Vertex_handle, Vertex_handle, Filtration_value>` where `Vertex_handle` is the type
- * of a vertex index and `Filtration_value` is the type of an edge filtration value.
+ * \tparam FilteredEdgeRange Range of `std::tuple<Vertex_handle, Vertex_handle, Filtration_value>`
+ * where `Vertex_handle` is the type of a vertex index.
*
* \return Remaining edges after collapse as a range of
* `std::tuple<Vertex_handle, Vertex_handle, Filtration_value>`.
- *
+ *
* \ingroup edge_collapse
- *
+ *
+ * \note
+ * Advanced: Defining the macro GUDHI_COLLAPSE_USE_DENSE_ARRAY tells gudhi to allocate a square table of size the
+ * maximum vertex index. This usually speeds up the computation for dense graphs. However, for sparse graphs, the memory
+ * use may be problematic and initializing this large table may be slow.
*/
template<class FilteredEdgeRange> auto flag_complex_collapse_edges(const FilteredEdgeRange& edges) {
- auto first_edge_itr = std::begin(edges);
- using Vertex_handle = std::decay_t<decltype(std::get<0>(*first_edge_itr))>;
- using Filtration_value = std::decay_t<decltype(std::get<2>(*first_edge_itr))>;
- using Edge_collapser = Flag_complex_edge_collapser<Vertex_handle, Filtration_value>;
- std::vector<typename Edge_collapser::Filtered_edge> remaining_edges;
- if (first_edge_itr != std::end(edges)) {
- Edge_collapser edge_collapser(edges);
- edge_collapser.process_edges(
- [&remaining_edges](Vertex_handle u, Vertex_handle v, Filtration_value filtration) {
- // insert the edge
- remaining_edges.emplace_back(u, v, filtration);
- });
- }
- return remaining_edges;
+ return flag_complex_collapse_edges(edges, [](auto const&d){return d;});
}
} // namespace collapse
diff --git a/src/Collapse/test/collapse_unit_test.cpp b/src/Collapse/test/collapse_unit_test.cpp
index b8876246..f41dbedd 100644
--- a/src/Collapse/test/collapse_unit_test.cpp
+++ b/src/Collapse/test/collapse_unit_test.cpp
@@ -98,8 +98,8 @@ BOOST_AUTO_TEST_CASE(collapse) {
// o---o
// 0 3
edges.emplace_back(0, 2, 2.);
- edges.emplace_back(1, 3, 2.);
- trace_and_check_collapse(edges, {{1, 3, 2.}});
+ edges.emplace_back(1, 3, 2.1);
+ trace_and_check_collapse(edges, {{1, 3, 2.1}});
// 1 2 4
// o---o---o
@@ -121,8 +121,8 @@ BOOST_AUTO_TEST_CASE(collapse) {
// o---o---o
// 0 3 5
edges.emplace_back(2, 5, 4.);
- edges.emplace_back(4, 3, 4.);
- trace_and_check_collapse(edges, {{1, 3, 2.}, {4, 3, 4.}});
+ edges.emplace_back(4, 3, 4.1);
+ trace_and_check_collapse(edges, {{1, 3, 2.}, {4, 3, 4.1}});
// 1 2 4
// o---o---o
@@ -132,8 +132,8 @@ BOOST_AUTO_TEST_CASE(collapse) {
// o---o---o
// 0 3 5
edges.emplace_back(1, 5, 5.);
- edges.emplace_back(0, 4, 5.);
- trace_and_check_collapse(edges, {{1, 3, 2.}, {4, 3, 4.}, {0, 4, 5.}});
+ edges.emplace_back(0, 4, 5.1);
+ trace_and_check_collapse(edges, {{1, 3, 2.}, {4, 3, 4.}, {0, 4, 5.1}});
}
BOOST_AUTO_TEST_CASE(collapse_from_array) {
@@ -150,8 +150,8 @@ BOOST_AUTO_TEST_CASE(collapse_from_array) {
{2, 3, 1.},
{3, 0, 1.},
{0, 2, 2.},
- {1, 3, 2.}}};
- trace_and_check_collapse(f_edge_array, {{1, 3, 2.}});
+ {1, 3, 2.1}}};
+ trace_and_check_collapse(f_edge_array, {{1, 3, 2.1}});
}
BOOST_AUTO_TEST_CASE(collapse_from_proximity_graph) {
diff --git a/src/Collapse/utilities/distance_matrix_edge_collapse_rips_persistence.cpp b/src/Collapse/utilities/distance_matrix_edge_collapse_rips_persistence.cpp
index 11ee5871..38efb9e6 100644
--- a/src/Collapse/utilities/distance_matrix_edge_collapse_rips_persistence.cpp
+++ b/src/Collapse/utilities/distance_matrix_edge_collapse_rips_persistence.cpp
@@ -45,7 +45,7 @@ int main(int argc, char* argv[]) {
min_persistence);
Distance_matrix distances = Gudhi::read_lower_triangular_matrix_from_csv_file<Filtration_value>(csv_matrix_file);
- std::cout << "Read the distance matrix succesfully, of size: " << distances.size() << std::endl;
+ std::cout << "Read the distance matrix successfully, of size: " << distances.size() << std::endl;
Proximity_graph proximity_graph = Gudhi::compute_proximity_graph<Simplex_tree>(boost::irange((size_t)0,
distances.size()),
diff --git a/src/Collapse/utilities/point_cloud_edge_collapse_rips_persistence.cpp b/src/Collapse/utilities/point_cloud_edge_collapse_rips_persistence.cpp
index 0eea742c..d8f42ab6 100644
--- a/src/Collapse/utilities/point_cloud_edge_collapse_rips_persistence.cpp
+++ b/src/Collapse/utilities/point_cloud_edge_collapse_rips_persistence.cpp
@@ -77,7 +77,7 @@ int main(int argc, char* argv[]) {
Gudhi::Euclidean_distance());
if (num_edges(proximity_graph) <= 0) {
- std::cerr << "Total number of egdes are zero." << std::endl;
+ std::cerr << "Total number of edges is zero." << std::endl;
exit(-1);
}
diff --git a/src/Contraction/doc/so3.svg b/src/Contraction/doc/so3.svg
index adea3f38..f10cab98 100644
--- a/src/Contraction/doc/so3.svg
+++ b/src/Contraction/doc/so3.svg
@@ -177,7 +177,7 @@
x="309.4176"
y="300.58682"
id="tspan4515-4"
- style="text-align:center;text-anchor:middle">Rips complex built uppon these points</tspan><tspan
+ style="text-align:center;text-anchor:middle">Rips complex built upon these points</tspan><tspan
sodipodi:role="line"
x="309.4176"
y="308.96704"
diff --git a/src/Contraction/example/Garland_heckbert/Error_quadric.h b/src/Contraction/example/Garland_heckbert/Error_quadric.h
index 49250d7a..ae46232c 100644
--- a/src/Contraction/example/Garland_heckbert/Error_quadric.h
+++ b/src/Contraction/example/Garland_heckbert/Error_quadric.h
@@ -29,7 +29,7 @@ template <typename Point> class Error_quadric {
* Quadric corresponding to the L2 distance to the plane.
*
* According to the notation of Garland Heckbert, they
- * denote a quadric symetric matrix as :
+ * denote a quadric symmetric matrix as :
* Q = [ q11 q12 q13 q14]
* [ q12 q22 q23 q24]
* [ q13 q23 q33 q34]
diff --git a/src/Contraction/example/Rips_contraction.cpp b/src/Contraction/example/Rips_contraction.cpp
index 42dd0910..547c290e 100644
--- a/src/Contraction/example/Rips_contraction.cpp
+++ b/src/Contraction/example/Rips_contraction.cpp
@@ -39,7 +39,7 @@ void build_rips(ComplexType& complex, double offset) {
int main(int argc, char *argv[]) {
if (argc != 3) {
std::cerr << "Usage " << argv[0] << " ../../../data/meshes/SO3_10000.off 0.3 to load the file " <<
- "../../data/SO3_10000.off and contract the Rips complex built with paremeter 0.3.\n";
+ "../../data/SO3_10000.off and contract the Rips complex built with parameter 0.3.\n";
return -1;
}
diff --git a/src/Contraction/include/gudhi/Edge_contraction.h b/src/Contraction/include/gudhi/Edge_contraction.h
index 6c0f4c78..dff6dc14 100644
--- a/src/Contraction/include/gudhi/Edge_contraction.h
+++ b/src/Contraction/include/gudhi/Edge_contraction.h
@@ -26,6 +26,7 @@ namespace contraction {
/** \defgroup contr Edge contraction
+@{
\author David Salinas
@@ -45,9 +46,9 @@ the operations needed for edge contraction algorithms have polynomial complexity
Therefore, the simplification can be done without enumerating the set of simplices that is often non tracktable in high-dimension and is then very efficient
(sub-linear with regards to the number of simplices in practice).
-A typical application of this package is homology group computation. It is illustrated in the next figure where a Rips complex is built uppon a set of high-dimensional points and
+A typical application of this package is homology group computation. It is illustrated in the next figure where a Rips complex is built upon a set of high-dimensional points and
simplified with edge contractions.
-It has initially a big number of simplices (around 20 millions) but simplifying it to a much reduced form with only 15 vertices (and 714 simplices) takes only few seconds on a desktop machine (see the example bellow).
+It has initially a big number of simplices (around 20 millions) but simplifying it to a much reduced form with only 15 vertices (and 714 simplices) takes only few seconds on a desktop machine (see the example below).
One can then compute homology group with a simplicial complex having very few simplices instead of running the homology algorithm on the much bigger initial set of
simplices which would take much more time and memory.
@@ -64,7 +65,7 @@ This class design is policy based and heavily inspired from the similar edge col
Four policies can be customized in this package:
\li Cost_policy: specify how much cost an edge contraction of a given edge. The edge with lowest cost is iteratively picked and contracted if valid.
-\li Valid_contraction_policy: specify if a given edge contraction is valid. For instance, this policy can check the link condition which ensures that the homotopy type is preserved afer the edge contraction.
+\li Valid_contraction_policy: specify if a given edge contraction is valid. For instance, this policy can check the link condition which ensures that the homotopy type is preserved after the edge contraction.
\li Placement_policy: every time an edge is contracted, its points are merge to one point specified by this policy. This may be the middle of the edge of some more sophisticated point such as the minimum of a cost as in
\cite Garland.
@@ -91,7 +92,7 @@ Despite this package is able to deal with \a arbitrary simplicial complexes (any
it is still \a 65% times faster than the CGAL package which is focused on 2-manifold.
The main reason is that few blockers appears during the simplification and hence,
the algorithm only have to deal with the graph and not higher-dimensional simplices
-(in this case triangles). However, we recall that higher-dimensional simplices are \a implicitely
+(in this case triangles). However, we recall that higher-dimensional simplices are \a implicitly
stored in the \ref skbl data-structure. Hence, one has to store
simplices in an external map if some information needs to be associated with them (information that could be a filtration value or
an orientation for instance).
@@ -152,7 +153,7 @@ void build_rips(ComplexType& complex, double offset){
int main (int argc, char *argv[])
{
if (argc!=3){
- std::cerr << "Usage "<<argv[0]<<" ../../data/SO3_10000.off 0.3 to load the file ../../data/SO3_10000.off and contract the Rips complex built with paremeter 0.3.\n";
+ std::cerr << "Usage "<<argv[0]<<" ../../data/SO3_10000.off 0.3 to load the file ../../data/SO3_10000.off and contract the Rips complex built with parameter 0.3.\n";
return -1;
}
@@ -195,7 +196,6 @@ int main (int argc, char *argv[])
return EXIT_SUCCESS;
}
-}
\endcode
\verbatim
diff --git a/src/Contraction/include/gudhi/Skeleton_blocker_contractor.h b/src/Contraction/include/gudhi/Skeleton_blocker_contractor.h
index a0d9f2b2..6911ca2e 100644
--- a/src/Contraction/include/gudhi/Skeleton_blocker_contractor.h
+++ b/src/Contraction/include/gudhi/Skeleton_blocker_contractor.h
@@ -171,8 +171,13 @@ typename GeometricSimplifiableComplex::Vertex_handle> {
Self const* algorithm_;
};
+#if CGAL_VERSION_NR < 1050500000
typedef CGAL::Modifiable_priority_queue<Edge_handle, Compare_cost, Undirected_edge_id> PQ;
- typedef typename PQ::handle pq_handle;
+#else
+ typedef CGAL::Modifiable_priority_queue<Edge_handle, Compare_cost, Undirected_edge_id, CGAL::CGAL_BOOST_PENDING_RELAXED_HEAP> PQ;
+#endif
+
+ typedef bool pq_handle;
// An Edge_data is associated with EVERY edge in the complex (collapsible or not).
@@ -196,7 +201,7 @@ typename GeometricSimplifiableComplex::Vertex_handle> {
}
bool is_in_PQ() const {
- return PQHandle_ != PQ::null_handle();
+ return PQHandle_ != false;
}
void set_PQ_handle(pq_handle h) {
@@ -204,7 +209,7 @@ typename GeometricSimplifiableComplex::Vertex_handle> {
}
void reset_PQ_handle() {
- PQHandle_ = PQ::null_handle();
+ PQHandle_ = false;
}
private:
@@ -238,16 +243,22 @@ typename GeometricSimplifiableComplex::Vertex_handle> {
}
void insert_in_PQ(Edge_handle edge, Edge_data& data) {
- data.set_PQ_handle(heap_PQ_->push(edge));
+ heap_PQ_->push(edge);
+ data.set_PQ_handle(true);
++current_num_edges_heap_;
}
void update_in_PQ(Edge_handle edge, Edge_data& data) {
+#if CGAL_VERSION_NR < 1050500000
data.set_PQ_handle(heap_PQ_->update(edge, data.PQ_handle()));
+#else
+ heap_PQ_->update(edge);
+#endif
}
void remove_from_PQ(Edge_handle edge, Edge_data& data) {
- data.set_PQ_handle(heap_PQ_->erase(edge, data.PQ_handle()));
+ heap_PQ_->erase(edge);
+ data.set_PQ_handle(false);
--current_num_edges_heap_;
}
@@ -280,7 +291,7 @@ typename GeometricSimplifiableComplex::Vertex_handle> {
std::size_t id = 0;
- // xxx do a parralel for
+ // xxx do a parallel for
for (auto edge : complex_.edge_range()) {
complex_[edge].index() = id++;
Profile const& profile = create_profile(edge);
@@ -474,7 +485,7 @@ typename GeometricSimplifiableComplex::Vertex_handle> {
}
void update_changed_edges() {
- // xxx do a parralel for
+ // xxx do a parallel for
DBG("update edges");
// sequential loop
@@ -530,7 +541,7 @@ typename GeometricSimplifiableComplex::Vertex_handle> {
// by definition of a blocker
// todo uniqument utile pour la link condition
- // laisser a l'utilisateur ? booleen update_heap_on_removed_blocker?
+ // laisser a l'utilisateur ? boolean update_heap_on_removed_blocker?
Simplex blocker_copy(*blocker);
for (auto x = blocker_copy.begin(); x != blocker_copy.end(); ++x) {
for (auto y = x; ++y != blocker_copy.end();) {
diff --git a/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Hasse_diagram_cell.h b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Hasse_diagram_cell.h
index 59e9a350..9b57da3c 100644
--- a/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Hasse_diagram_cell.h
+++ b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Hasse_diagram_cell.h
@@ -95,7 +95,7 @@ class Hasse_diagram_cell {
deleted_(false) {}
/**
- * Construcor of a cell of dimension dim having given additional information.
+ * Constructor of a cell of dimension dim having given additional information.
**/
Hasse_diagram_cell(Additional_information ai, int dim)
: dimension(dim), additional_info(ai), position(0), deleted_(false) {}
@@ -125,7 +125,7 @@ class Hasse_diagram_cell {
inline Additional_information& get_additional_information() { return this->additional_info; }
/**
- * Procedure to retrive position of the cell in the structure. It is used in
+ * Procedure to retrieve the position of the cell in the structure. It is used in
* the implementation of Hasse diagram and set by it. Note that removal of
* cell and subsequent call of clean_up_the_structure will change those
* positions.
@@ -186,7 +186,7 @@ class Hasse_diagram_cell {
friend std::ostream& operator<<(
std::ostream& out, const Hasse_diagram_cell<Incidence_type, Filtration_type, Additional_information>& c) {
// cout << "position : " << c.position << ", dimension : " << c.dimension << ", filtration: " << c.filtration << ",
- // size of boudary : " << c.boundary.size() << "\n";
+ // size of boundary : " << c.boundary.size() << "\n";
out << c.position << " " << c.dimension << " " << c.filtration << std::endl;
for (std::size_t bd = 0; bd != c.boundary.size(); ++bd) {
// do not write out the cells that has been deleted
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h
index b29f0906..a9e2d507 100644
--- a/src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h
@@ -51,13 +51,13 @@ struct Function_affine_plane_in_Rd {
* plane in the d-dimensional Euclidean space.
*
* @param[in] normal_matrix A normal matrix of the affine plane. The number of rows should
- * correspond to the ambient dimension, the number of columns should corespond to
+ * correspond to the ambient dimension, the number of columns should correspond to
* the size of the normal basis (codimension).
* @param[in] offset The offset vector of the affine plane.
* The dimension of the vector should be the ambient dimension of the manifold.
*/
Function_affine_plane_in_Rd(const Eigen::MatrixXd& normal_matrix, const Eigen::VectorXd& offset)
- : normal_matrix_(normal_matrix), d_(normal_matrix.rows()), k_(normal_matrix.cols()), m_(d_ - k_), off_(offset) {
+ : normal_matrix_(normal_matrix), d_(normal_matrix.rows()), k_(normal_matrix.cols()), off_(offset) {
normal_matrix_.colwise().normalize();
}
@@ -66,21 +66,20 @@ struct Function_affine_plane_in_Rd {
* plane in the d-dimensional Euclidean space that passes through origin.
*
* @param[in] normal_matrix A normal matrix of the affine plane. The number of rows should
- * correspond to the ambient dimension, the number of columns should corespond to
+ * correspond to the ambient dimension, the number of columns should correspond to
* the size of the normal basis (codimension).
*/
Function_affine_plane_in_Rd(const Eigen::MatrixXd& normal_matrix)
: normal_matrix_(normal_matrix),
d_(normal_matrix.rows()),
k_(normal_matrix.cols()),
- m_(d_ - k_),
off_(Eigen::VectorXd::Zero(d_)) {
normal_matrix_.colwise().normalize();
}
private:
Eigen::MatrixXd normal_matrix_;
- std::size_t d_, k_, m_;
+ std::size_t d_, k_;
Eigen::VectorXd off_;
};
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h
index 11b379f3..f315d794 100644
--- a/src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h
@@ -46,6 +46,11 @@ struct Function_moment_curve_in_Rd {
return result;
}
+ /** @brief Returns the radius of the moment curve. */
+ double get_radius() const{
+ return r_;
+ }
+
/**
* \brief Constructor of the function that defines an implicit moment curve
* in the d-dimensional Euclidean space.
@@ -53,7 +58,7 @@ struct Function_moment_curve_in_Rd {
* @param[in] r Numerical parameter.
* @param[in] d The ambient dimension.
*/
- Function_moment_curve_in_Rd(double r, std::size_t d) : m_(1), k_(d - 1), d_(d), r_(r) {}
+ Function_moment_curve_in_Rd(double r, std::size_t d) : k_(d - 1), d_(d), r_(r) {}
/**
* \brief Constructor of the function that defines an implicit moment curve
@@ -64,10 +69,10 @@ struct Function_moment_curve_in_Rd {
* @param[in] offset The offset of the moment curve.
*/
Function_moment_curve_in_Rd(double r, std::size_t d, Eigen::VectorXd& offset)
- : m_(1), k_(d - 1), d_(d), r_(r), off_(offset) {}
+ : k_(d - 1), d_(d), r_(r), off_(offset) {}
private:
- std::size_t m_, k_, d_;
+ std::size_t k_, d_;
double r_;
Eigen::VectorXd off_;
};
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h
index 3ee73754..594b6fbf 100644
--- a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h
@@ -68,7 +68,7 @@ class Integer_combination_iterator
public:
template <class Bound_range>
Integer_combination_iterator(const uint& n, const uint& k, const Bound_range& bounds)
- : value_(k + 2), is_end_(n == 0 || k == 0), n_(n), k_(k) {
+ : value_(k + 2), is_end_(n == 0 || k == 0), k_(k) {
bounds_.reserve(k + 2);
uint sum_radices = 0;
for (auto b : bounds) {
@@ -96,13 +96,12 @@ class Integer_combination_iterator
}
// Used for the creating an end iterator
- Integer_combination_iterator() : is_end_(true), n_(0), k_(0) {}
+ Integer_combination_iterator() : is_end_(true), k_(0) {}
private:
value_t value_; // the dereference value
bool is_end_; // is true when the current integer combination is the final one
- uint n_;
uint k_;
std::vector<uint> bounds_;
};
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutahedral_representation_iterators.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutahedral_representation_iterators.h
index db145741..1a63d2f7 100644
--- a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutahedral_representation_iterators.h
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutahedral_representation_iterators.h
@@ -26,12 +26,12 @@ namespace Gudhi {
namespace coxeter_triangulation {
-/* \addtogroup coxeter_triangulation
+/** \addtogroup coxeter_triangulation
* Iterator types for Permutahedral_representation
* @{
*/
-/* \brief Iterator over the vertices of a simplex
+/** \brief Iterator over the vertices of a simplex
* represented by its permutahedral representation.
*
* Forward iterator, 'value_type' is Permutahedral_representation::Vertex.*/
@@ -83,7 +83,7 @@ class Vertex_iterator
}; // Vertex_iterator
/*---------------------------------------------------------------------------*/
-/* \brief Iterator over the k-faces of a simplex
+/** \brief Iterator over the k-faces of a simplex
* given by its permutahedral representation.
*
* Forward iterator, value_type is Permutahedral_representation. */
@@ -141,7 +141,7 @@ class Face_iterator : public boost::iterator_facade<Face_iterator<Permutahedral_
}; // Face_iterator
/*---------------------------------------------------------------------------*/
-/* \brief Iterator over the k-cofaces of a simplex
+/** \brief Iterator over the k-cofaces of a simplex
* given by its permutahedral representation.
*
* Forward iterator, value_type is Permutahedral_representation. */
@@ -247,6 +247,8 @@ class Coface_iterator
}; // Coface_iterator
+/** @} */
+
} // namespace coxeter_triangulation
} // namespace Gudhi
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Size_range.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Size_range.h
index c43effc8..6b137744 100644
--- a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Size_range.h
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Size_range.h
@@ -19,7 +19,7 @@ namespace Gudhi {
namespace coxeter_triangulation {
-/** \brief Auxillary iterator class for sizes of parts in an ordered set partition.
+/** \brief Auxiliary iterator class for sizes of parts in an ordered set partition.
*/
template <class T_it>
class Size_iterator
diff --git a/src/Doxyfile.in b/src/Doxyfile.in
index ae8db1a3..1ec190d9 100644
--- a/src/Doxyfile.in
+++ b/src/Doxyfile.in
@@ -152,7 +152,7 @@ FULL_PATH_NAMES = YES
# will be relative from the directory where doxygen is started.
# This tag requires that the tag FULL_PATH_NAMES is set to YES.
-STRIP_FROM_PATH =
+STRIP_FROM_PATH = @CMAKE_SOURCE_DIR@
# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
# path mentioned in the documentation of a class, which tells the reader which
@@ -162,7 +162,8 @@ STRIP_FROM_PATH =
# using the -I flag.
STRIP_FROM_INC_PATH = include \
- concept
+ concept \
+ @CMAKE_SOURCE_DIR@
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
# less readable) file names. This can be useful is your file systems doesn't
@@ -229,13 +230,7 @@ TAB_SIZE = 2
# "Side Effects:". You can put \n's in the value part of an alias to insert
# newlines.
-ALIASES =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding "class=itcl::class"
-# will allow you to use the command class in the itcl::class meaning.
-
-TCL_SUBST =
+ALIASES = gudhi_example_link{2}="@ref \2 \"\1/\2\""
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
# only. Doxygen will then generate output that is more tailored for C. For
@@ -717,7 +712,7 @@ CITE_BIB_FILES = @CMAKE_SOURCE_DIR@/biblio/bibliography.bib \
# messages are off.
# The default value is: NO.
-QUIET = NO
+QUIET = YES
# The WARNINGS tag can be used to turn on/off the warning messages that are
# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
@@ -771,7 +766,7 @@ WARN_FORMAT = "$file:$line: $text"
# messages should be written. If left blank the output is written to standard
# error (stderr).
-WARN_LOGFILE =
+WARN_LOGFILE = doxygen.log
#---------------------------------------------------------------------------
# Configuration options related to the input files
@@ -838,7 +833,7 @@ EXCLUDE = @CMAKE_SOURCE_DIR@/data/ \
@CMAKE_SOURCE_DIR@/ext/ \
@CMAKE_SOURCE_DIR@/README.md \
@CMAKE_SOURCE_DIR@/.github \
- @CMAKE_CURRENT_BINARY_DIR@/new_gudhi_version_creation.md \
+ @CMAKE_CURRENT_BINARY_DIR@ \
@GUDHI_DOXYGEN_SOURCE_PREFIX@/GudhUI/ \
@GUDHI_DOXYGEN_SOURCE_PREFIX@/cmake/ \
@GUDHI_DOXYGEN_SOURCE_PREFIX@/python/
@@ -874,7 +869,7 @@ EXCLUDE_SYMBOLS =
# that contain example code fragments that are included (see the \include
# command).
-EXAMPLE_PATH = @CMAKE_SOURCE_DIR@/biblio/ \
+EXAMPLE_PATH = @CMAKE_SOURCE_DIR@ \
@CMAKE_SOURCE_DIR@/data/ \
@GUDHI_DOXYGEN_EXAMPLE_PATH@
@@ -1040,25 +1035,6 @@ USE_HTAGS = NO
VERBATIM_HEADERS = YES
-# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
-# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
-# cost of reduced performance. This can be particularly helpful with template
-# rich C++ code for which doxygen's built-in parser lacks the necessary type
-# information.
-# Note: The availability of this option depends on whether or not doxygen was
-# generated with the -Duse-libclang=ON option for CMake.
-# The default value is: NO.
-
-CLANG_ASSISTED_PARSING = NO
-
-# If clang assisted parsing is enabled you can provide the compiler with command
-# line options that you would normally use when invoking the compiler. Note that
-# the include paths will already be set by doxygen for the files and directories
-# specified with INPUT and INCLUDE_PATH.
-# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
-
-CLANG_OPTIONS =
-
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
@@ -1070,13 +1046,6 @@ CLANG_OPTIONS =
ALPHABETICAL_INDEX = YES
-# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
-# which the alphabetical index list will be split.
-# Minimum value: 1, maximum value: 20, default value: 5.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-COLS_IN_ALPHA_INDEX = 5
-
# In case all classes in a project start with a common prefix, all classes will
# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
# can be used to specify a prefix (or a list of prefixes) that should be ignored
@@ -1149,7 +1118,7 @@ HTML_FOOTER = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/footer.html
# obsolete.
# This tag requires that the tag GENERATE_HTML is set to YES.
-HTML_STYLESHEET = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/stylesheet.css
+HTML_STYLESHEET =
# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
# cascading style sheets that are included after the standard style sheets
@@ -1162,7 +1131,7 @@ HTML_STYLESHEET = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/stylesheet.css
# list). For an example see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
-HTML_EXTRA_STYLESHEET =
+HTML_EXTRA_STYLESHEET = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/stylesheet.css
# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the HTML output directory. Note
@@ -1174,6 +1143,11 @@ HTML_EXTRA_STYLESHEET =
HTML_EXTRA_FILES =
+# Default here is AUTO_LIGHT which means "Automatically set the mode according
+# to the user preference, use light mode if no preference is set".
+# Force it to LIGHT (white), as the rest of the documentation is white.
+HTML_COLORSTYLE = LIGHT
+
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the style sheet and background images according to
# this color. Hue is specified as an angle on a colorwheel, see
@@ -1483,17 +1457,6 @@ EXT_LINKS_IN_WINDOW = NO
FORMULA_FONTSIZE = 10
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are not
-# supported properly for IE 6.0, but are supported on all modern browsers.
-#
-# Note that when changing this option you need to delete any form_*.png files in
-# the HTML output directory before the changes have effect.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_TRANSPARENT = YES
-
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# http://www.mathjax.org) which uses client side Javascript for the rendering
# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
@@ -1505,6 +1468,17 @@ FORMULA_TRANSPARENT = YES
USE_MATHJAX = YES
+# With MATHJAX_VERSION it is possible to specify the MathJax version to be used.
+# Note that the different versions of MathJax have different requirements with
+# regards to the different settings, so it is possible that also other MathJax
+# settings have to be changed when switching between the different MathJax
+# versions.
+# Possible values are: MathJax_2 and MathJax_3.
+# The default value is: MathJax_2.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+@GUDHI_DOXYGEN_MATHJAX_VERSION@
+
# When MathJax is enabled you can set the default output format to be used for
# the MathJax output. See the MathJax site (see:
# http://docs.mathjax.org/en/latest/output.html) for more details.
@@ -1526,15 +1500,14 @@ MATHJAX_FORMAT = HTML-CSS
# The default value is: http://cdn.mathjax.org/mathjax/latest.
# This tag requires that the tag USE_MATHJAX is set to YES.
-MATHJAX_RELPATH = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2
+MATHJAX_RELPATH =
# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
# extension names that should be enabled during MathJax rendering. For example
# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# This tag requires that the tag USE_MATHJAX is set to YES.
-MATHJAX_EXTENSIONS = TeX/AMSmath \
- TeX/AMSsymbols
+MATHJAX_EXTENSIONS = @GUDHI_DOXYGEN_MATHJAX_EXTENSIONS@
# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
# of code that will be used on startup of the MathJax code. See the MathJax site
@@ -1775,16 +1748,6 @@ LATEX_BATCHMODE = NO
LATEX_HIDE_INDICES = NO
-# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
-# code with syntax highlighting in the LaTeX output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_SOURCE_CODE = NO
-
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
@@ -1857,16 +1820,6 @@ RTF_STYLESHEET_FILE =
RTF_EXTENSIONS_FILE =
-# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
-# with syntax highlighting in the RTF output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_SOURCE_CODE = NO
-
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
@@ -1956,15 +1909,6 @@ GENERATE_DOCBOOK = NO
DOCBOOK_OUTPUT = docbook
-# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
-# program listings (including syntax highlighting and cross-referencing
-# information) to the DOCBOOK output. Note that enabling this will significantly
-# increase the size of the DOCBOOK output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
-
-DOCBOOK_PROGRAMLISTING = NO
-
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
@@ -2139,33 +2083,11 @@ EXTERNAL_GROUPS = YES
EXTERNAL_PAGES = YES
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of 'which perl').
-# The default file (with absolute path) is: /usr/bin/perl.
-
-PERL_PATH = /usr/bin/perl
-
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
-# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
-# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
-# NO turns the diagrams off. Note that this option also works with HAVE_DOT
-# disabled, but it is recommended to install and use dot, since it yields more
-# powerful graphs.
-# The default value is: YES.
-
-CLASS_DIAGRAMS = NO
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see:
-# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH =
+@GUDHI_DOXYGEN_CLASS_DIAGRAMS@
# You can include diagrams made with dia in doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
@@ -2199,26 +2121,38 @@ HAVE_DOT = YES
DOT_NUM_THREADS = 0
-# When you want a differently looking font in the dot files that doxygen
-# generates you can specify the font name using DOT_FONTNAME. You need to make
-# sure dot is able to find the font, which can be done by putting it in a
-# standard location or by setting the DOTFONTPATH environment variable or by
-# setting DOT_FONTPATH to the directory containing the font.
-# The default value is: Helvetica.
+# DOT_COMMON_ATTR is common attributes for nodes, edges and labels of
+# subgraphs. When you want a differently looking font in the dot files that
+# doxygen generates you can specify fontname, fontcolor and fontsize attributes.
+# For details please see <a href=https://graphviz.org/doc/info/attrs.html>Node,
+# Edge and Graph Attributes specification</a> You need to make sure dot is able
+# to find the font, which can be done by putting it in a standard location or by
+# setting the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
+# directory containing the font. Default graphviz fontsize is 14.
+# The default value is: fontname=Helvetica,fontsize=10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_COMMON_ATTR = "fontname=Helvetica,fontsize=10"
+
+# DOT_EDGE_ATTR is concatenated with DOT_COMMON_ATTR. For elegant style you can
+# add 'arrowhead=open, arrowtail=open, arrowsize=0.5'. <a
+# href=https://graphviz.org/doc/info/arrows.html>Complete documentation about
+# arrows shapes.</a>
+# The default value is: labelfontname=Helvetica,labelfontsize=10.
# This tag requires that the tag HAVE_DOT is set to YES.
-DOT_FONTNAME = Helvetica
+DOT_EDGE_ATTR = "labelfontname=Helvetica,labelfontsize=10"
-# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
-# dot graphs.
-# Minimum value: 4, maximum value: 24, default value: 10.
+# DOT_NODE_ATTR is concatenated with DOT_COMMON_ATTR. For view without boxes
+# around nodes set 'shape=plain' or 'shape=plaintext' <a
+# href=https://www.graphviz.org/doc/info/shapes.html>Shapes specification</a>
+# The default value is: shape=box,height=0.2,width=0.4.
# This tag requires that the tag HAVE_DOT is set to YES.
-DOT_FONTSIZE = 10
+DOT_NODE_ATTR = "shape=box,height=0.2,width=0.4"
-# By default doxygen will tell dot to use the default font as specified with
-# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
-# the path where dot can find it using this tag.
+# You can set the path where dot can find font specified with fontname in
+# DOT_COMMON_ATTR and others dot attributes.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTPATH =
@@ -2430,18 +2364,6 @@ DOT_GRAPH_MAX_NODES = 50
MAX_DOT_GRAPH_DEPTH = 0
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not seem
-# to support this out of the box.
-#
-# Warning: Depending on the platform used, enabling this option may lead to
-# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
-# read).
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_TRANSPARENT = NO
-
# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10) support
diff --git a/src/GudhUI/todo.txt b/src/GudhUI/todo.txt
index 19d99a77..e59d06d4 100644
--- a/src/GudhUI/todo.txt
+++ b/src/GudhUI/todo.txt
@@ -18,5 +18,5 @@ x faire le lien MainWindow - Model
-- bug
-x bug ordre contraction -> just that first vertex placement dont work great
+x bug ordre contraction -> just that first vertex placement doesn't work great
x pb construction rips
diff --git a/src/GudhUI/utils/Critical_points.h b/src/GudhUI/utils/Critical_points.h
index 97e58737..65695434 100644
--- a/src/GudhUI/utils/Critical_points.h
+++ b/src/GudhUI/utils/Critical_points.h
@@ -103,7 +103,7 @@ template<typename SkBlComplex> class Critical_points {
// reduced to one point -> contractible
return 1;
else
- // we dont know
+ // we don't know
return 2;
}
diff --git a/src/GudhUI/utils/Edge_contractor.h b/src/GudhUI/utils/Edge_contractor.h
index 0707b186..a71d0742 100644
--- a/src/GudhUI/utils/Edge_contractor.h
+++ b/src/GudhUI/utils/Edge_contractor.h
@@ -65,7 +65,7 @@ template<typename SkBlComplex> class Edge_contractor {
/**
* @brief Modify complex to be the expansion of the k-nearest neighbor
- * symetric graph.
+ * symmetric graph.
*/
Edge_contractor(SkBlComplex& complex, unsigned num_contractions) :
complex_(complex), num_contractions_(num_contractions) {
diff --git a/src/GudhUI/utils/Furthest_point_epsilon_net.h b/src/GudhUI/utils/Furthest_point_epsilon_net.h
index 6eb71071..195d0014 100644
--- a/src/GudhUI/utils/Furthest_point_epsilon_net.h
+++ b/src/GudhUI/utils/Furthest_point_epsilon_net.h
@@ -27,7 +27,7 @@ template<typename SkBlComplex> class Furthest_point_epsilon_net {
/**
* Let V be the set of vertices.
- * Initially v0 is one arbitrarly vertex and the set V0 is {v0}.
+ * Initially v0 is one, arbitrary, vertex and the set V0 is {v0}.
* Then Vk is computed as follows.
* First we compute the vertex pk that is the furthest from Vk
* then Vk = Vk \cup pk.
@@ -54,7 +54,7 @@ template<typename SkBlComplex> class Furthest_point_epsilon_net {
/**
* @brief Modify complex to be the expansion of the k-nearest neighbor
- * symetric graph.
+ * symmetric graph.
*/
Furthest_point_epsilon_net(SkBlComplex& complex) :
complex_(complex) {
diff --git a/src/GudhUI/utils/K_nearest_builder.h b/src/GudhUI/utils/K_nearest_builder.h
index 34483e58..454b2587 100644
--- a/src/GudhUI/utils/K_nearest_builder.h
+++ b/src/GudhUI/utils/K_nearest_builder.h
@@ -41,7 +41,7 @@ template<typename SkBlComplex> class K_nearest_builder {
public:
/**
* @brief Modify complex to be the expansion of the k-nearest neighbor
- * symetric graph.
+ * symmetric graph.
*/
K_nearest_builder(SkBlComplex& complex, unsigned k) : complex_(complex) {
complex.keep_only_vertices();
diff --git a/src/GudhUI/utils/Lloyd_builder.h b/src/GudhUI/utils/Lloyd_builder.h
index c042564f..57e3dc0f 100644
--- a/src/GudhUI/utils/Lloyd_builder.h
+++ b/src/GudhUI/utils/Lloyd_builder.h
@@ -27,7 +27,7 @@ template<typename SkBlComplex> class Lloyd_builder {
/**
* @brief Modify complex to be the expansion of the k-nearest neighbor
- * symetric graph.
+ * symmetric graph.
*/
Lloyd_builder(SkBlComplex& complex, unsigned num_iterations) : complex_(complex), dim(-1) {
if (!complex_.empty()) {
diff --git a/src/GudhUI/utils/Vertex_collapsor.h b/src/GudhUI/utils/Vertex_collapsor.h
index 030e4bb0..b1c48efd 100644
--- a/src/GudhUI/utils/Vertex_collapsor.h
+++ b/src/GudhUI/utils/Vertex_collapsor.h
@@ -31,7 +31,7 @@ template<typename SkBlComplex> class Vertex_collapsor {
/**
* @brief Modify complex to be the expansion of the k-nearest neighbor
- * symetric graph.
+ * symmetric graph.
*/
Vertex_collapsor(SkBlComplex& complex, size_t num_collapses) :
complex_(complex), num_collapses_(num_collapses) {
diff --git a/src/GudhUI/view/Viewer_instructor.h b/src/GudhUI/view/Viewer_instructor.h
index 58cbcd31..09ed102f 100644
--- a/src/GudhUI/view/Viewer_instructor.h
+++ b/src/GudhUI/view/Viewer_instructor.h
@@ -11,7 +11,7 @@
#ifndef VIEW_VIEWER_INSTRUCTOR_H_
#define VIEW_VIEWER_INSTRUCTOR_H_
-// todo do a viewer instructor that have directely a pointer to a QGLviewer and buffer ot not triangles
+// todo do a viewer instructor that has directly a pointer to a QGLviewer and buffer ot not triangles
#include <QFileDialog>
#include <QKeyEvent>
diff --git a/src/Nerve_GIC/doc/Intro_graph_induced_complex.h b/src/Nerve_GIC/doc/Intro_graph_induced_complex.h
index a6098860..e1ab7cb3 100644
--- a/src/Nerve_GIC/doc/Intro_graph_induced_complex.h
+++ b/src/Nerve_GIC/doc/Intro_graph_induced_complex.h
@@ -24,7 +24,7 @@ namespace cover_complex {
* Visualizations of the simplicial complexes can be done with either
* neato (from <a target="_blank" href="http://www.graphviz.org/">graphviz</a>),
* <a target="_blank" href="http://www.geomview.org/">geomview</a>,
- * <a target="_blank" href="https://github.com/MLWave/kepler-mapper">KeplerMapper</a>.
+ * <a target="_blank" href="https://github.com/scikit-tda/kepler-mapper">KeplerMapper</a>.
* Input point clouds are assumed to be \ref FileFormatsOFF "OFF files"
*
* \section covers Covers
diff --git a/src/Nerve_GIC/utilities/km.py.COPYRIGHT b/src/Nerve_GIC/utilities/km.py.COPYRIGHT
index bef7b121..5358d287 100644
--- a/src/Nerve_GIC/utilities/km.py.COPYRIGHT
+++ b/src/Nerve_GIC/utilities/km.py.COPYRIGHT
@@ -1,7 +1,7 @@
km.py is a fork of https://github.com/MLWave/kepler-mapper.
Only the visualization part has been kept (Mapper part has been removed).
-This file has te following Copyright :
+This file has the following Copyright :
The MIT License (MIT)
diff --git a/src/Persistence_representations/include/gudhi/Persistence_intervals.h b/src/Persistence_representations/include/gudhi/Persistence_intervals.h
index a6c1d6f0..f4324cb2 100644
--- a/src/Persistence_representations/include/gudhi/Persistence_intervals.h
+++ b/src/Persistence_representations/include/gudhi/Persistence_intervals.h
@@ -109,7 +109,7 @@ class Persistence_intervals {
std::vector<size_t> cumulative_histogram_of_lengths(size_t number_of_bins = 10) const;
/**
- * In this procedure we assume that each barcode is a characteristic function of a hight equal to its length. The
+ * In this procedure we assume that each barcode is a characteristic function of a height equal to its length. The
*persistence diagram is a sum of such a functions. The procedure below construct a function being a
* sum of the characteristic functions of persistence intervals. The first two parameters are the range in which the
*function is to be computed and the last parameter is the number of bins in
@@ -207,7 +207,7 @@ class Persistence_intervals {
/**
* This is a simple function projecting the persistence intervals to a real number. The function we use here is a sum
*of squared lengths of intervals. It can be naturally interpreted as
- * sum of step function, where the step hight it equal to the length of the interval.
+ * sum of step function, where the step height it equal to the length of the interval.
* At the moment this function is not tested, since it is quite likely to be changed in the future. Given this, when
*using it, keep in mind that it
* will be most likely changed in the next versions.
diff --git a/src/Persistence_representations/test/persistence_heat_maps_test.cpp b/src/Persistence_representations/test/persistence_heat_maps_test.cpp
index b3240758..bf531773 100644
--- a/src/Persistence_representations/test/persistence_heat_maps_test.cpp
+++ b/src/Persistence_representations/test/persistence_heat_maps_test.cpp
@@ -78,7 +78,7 @@ BOOST_AUTO_TEST_CASE(check_compute_percentage_of_active_of_heat_maps) {
to_compute_percentage_of_active.push_back(&q);
to_compute_percentage_of_active.push_back(&r);
Persistence_heat_maps<constant_scaling_function> percentage_of_active;
- percentage_of_active.compute_percentage_of_active(to_compute_percentage_of_active, 0.1);
+ percentage_of_active.compute_percentage_of_active(to_compute_percentage_of_active, 0);
Persistence_heat_maps<constant_scaling_function> template_percentage_of_active;
template_percentage_of_active.load_from_file("data/template_percentage_of_active_of_heat_maps");
diff --git a/src/Persistence_representations/test/persistence_lanscapes_test.cpp b/src/Persistence_representations/test/persistence_lanscapes_test.cpp
index 21ef18a0..59924f16 100644
--- a/src/Persistence_representations/test/persistence_lanscapes_test.cpp
+++ b/src/Persistence_representations/test/persistence_lanscapes_test.cpp
@@ -238,7 +238,7 @@ if ( argc != 2 )
double integral = p.compute_integral_of_landscape();
cout << "integral : " << integral <<endl;
- //compute integral for each level separatelly
+ //compute integral for each level separately
for ( size_t level = 0 ; level != p.size() ; ++level )
{
cout << p.compute_integral_of_landscape( level ) << endl;
diff --git a/src/Persistent_cohomology/benchmark/performance_rips_persistence.cpp b/src/Persistent_cohomology/benchmark/performance_rips_persistence.cpp
index 030b072a..3bec8830 100644
--- a/src/Persistent_cohomology/benchmark/performance_rips_persistence.cpp
+++ b/src/Persistent_cohomology/benchmark/performance_rips_persistence.cpp
@@ -49,7 +49,7 @@ void timing_persistence(FilteredComplex & cpx
* with a Hasse diagram. The Hasse diagram represents explicitly all
* codimension 1 incidence relations in the complex, and hence leads to
* a faster computation of persistence because boundaries are precomputed.
- * Hovewer, the simplex tree may be constructed directly from a point cloud and
+ * However, the simplex tree may be constructed directly from a point cloud and
* is more compact.
* We compute persistent homology with coefficient fields Z/2Z and Z/1223Z.
* We present also timings for the computation of multi-field persistent
diff --git a/src/Persistent_cohomology/concept/FilteredComplex.h b/src/Persistent_cohomology/concept/FilteredComplex.h
index 26ac7ac8..59ce25e3 100644
--- a/src/Persistent_cohomology/concept/FilteredComplex.h
+++ b/src/Persistent_cohomology/concept/FilteredComplex.h
@@ -103,7 +103,7 @@ Filtration_simplex_range filtration_simplex_range();
/** @} */
-/* \brief Iterator over the simplices of the complex,
+/** \brief Iterator over the simplices of the complex,
* in an arbitrary order.
*
* 'value_type' must be 'Simplex_handle'.*/
diff --git a/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h b/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h
index a3613d0d..94579564 100644
--- a/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h
+++ b/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h
@@ -131,8 +131,7 @@ namespace persistent_cohomology {
We provide several example files: run these examples with -h for details on their use, and read the README file.
-\li <a href="rips_persistence_8cpp-example.html">
-Rips_complex/rips_persistence.cpp</a> computes the Rips complex of a point cloud and outputs its persistence
+\li \gudhi_example_link{Rips_complex,rips_persistence.cpp} computes the Rips complex of a point cloud and outputs its persistence
diagram.
\code $> ./rips_persistence ../../data/points/tore3D_1307.off -r 0.25 -m 0.5 -d 3 -p 3 \endcode
\code The complex contains 177838 simplices
@@ -144,12 +143,10 @@ diagram.
More details on the <a href="../../ripscomplex/">Rips complex utilities</a> dedicated page.
-\li <a href="rips_multifield_persistence_8cpp-example.html">
-Persistent_cohomology/rips_multifield_persistence.cpp</a> computes the Rips complex of a point cloud and outputs its
+\li \gudhi_example_link{Persistent_cohomology,rips_multifield_persistence.cpp} computes the Rips complex of a point cloud and outputs its
persistence diagram with a family of field coefficients.
-\li <a href="rips_distance_matrix_persistence_8cpp-example.html">
-Rips_complex/rips_distance_matrix_persistence.cpp</a> computes the Rips complex of a distance matrix and
+\li \gudhi_example_link{Rips_complex,rips_distance_matrix_persistence.cpp} computes the Rips complex of a distance matrix and
outputs its persistence diagram.
The file should contain square or lower triangular distance matrix with semicolons as separators.
@@ -158,8 +155,7 @@ Please refer to data/distance_matrix/lower_triangular_distance_matrix.csv for an
More details on the <a href="../../ripscomplex/">Rips complex utilities</a> dedicated page.
-\li <a href="rips_correlation_matrix_persistence_8cpp-example.html">
-Rips_complex/rips_correlation_matrix_persistence.cpp</a>
+\li \gudhi_example_link{Rips_complex,rips_correlation_matrix_persistence.cpp}
computes the Rips complex of a correlation matrix and outputs its persistence diagram.
Note that no check is performed if the matrix given as the input is a correlation matrix.
@@ -169,8 +165,7 @@ Please refer to data/correlation_matrix/lower_triangular_correlation_matrix.csv
More details on the <a href="../../ripscomplex/">Rips complex utilities</a> dedicated page.
-\li <a href="alpha_complex_3d_persistence_8cpp-example.html">
-Alpha_complex/alpha_complex_3d_persistence.cpp</a> computes the persistent homology with
+\li \gudhi_example_link{Alpha_complex,alpha_complex_3d_persistence.cpp} computes the persistent homology with
\f$\mathbb{Z}/2\mathbb{Z}\f$ coefficients of the alpha complex on points sampling from an OFF file.
\code $> ./alpha_complex_3d_persistence ../../data/points/tore3D_300.off -p 2 -m 0.45 \endcode
\code Simplex_tree dim: 3
@@ -235,8 +230,7 @@ Note that the lengths of the sides of the periodic cuboid have to be the same.<b
3 2 36.8838 inf
3 3 58.6783 inf \endcode
-\li <a href="alpha_complex_persistence_8cpp-example.html">
-Alpha_complex/alpha_complex_persistence.cpp</a> computes the persistent homology with
+\li \gudhi_example_link{Alpha_complex,alpha_complex_persistence.cpp} computes the persistent homology with
\f$\mathbb{Z}/p\mathbb{Z}\f$ coefficients of the alpha complex on points sampling from an OFF file.
\code $> ./alpha_complex_persistence -r 32 -p 2 -m 0.45 ../../data/points/tore3D_300.off \endcode
\code Alpha complex is of dimension 3 - 9273 simplices - 300 vertices.
@@ -248,8 +242,7 @@ Simplex_tree dim: 3
More details on the <a href="../../alphacomplex/">Alpha complex utilities</a> dedicated page.
-\li <a href="plain_homology_8cpp-example.html">
-Persistent_cohomology/plain_homology.cpp</a> computes the plain homology of a simple simplicial complex without
+\li \gudhi_example_link{Persistent_cohomology,plain_homology.cpp} computes the plain homology of a simple simplicial complex without
filtration values.
*/
diff --git a/src/Persistent_cohomology/example/custom_persistence_sort.cpp b/src/Persistent_cohomology/example/custom_persistence_sort.cpp
index 410cd987..bba0b2f7 100644
--- a/src/Persistent_cohomology/example/custom_persistence_sort.cpp
+++ b/src/Persistent_cohomology/example/custom_persistence_sort.cpp
@@ -33,7 +33,7 @@ using Persistent_cohomology = Gudhi::persistent_cohomology::Persistent_cohomolog
Gudhi::persistent_cohomology::Field_Zp >;
std::vector<Point> random_points() {
- // Instanciate a random point generator
+ // Instantiate a random point generator
CGAL::Random rng(0);
// Generate "points_number" random points in a vector
diff --git a/src/Persistent_cohomology/example/persistence_from_simple_simplex_tree.cpp b/src/Persistent_cohomology/example/persistence_from_simple_simplex_tree.cpp
index bffaabdd..3da6771e 100644
--- a/src/Persistent_cohomology/example/persistence_from_simple_simplex_tree.cpp
+++ b/src/Persistent_cohomology/example/persistence_from_simple_simplex_tree.cpp
@@ -95,7 +95,7 @@ int main(int argc, char * const argv[]) {
SimplexVector = {9, 10, 11};
st.insert_simplex_and_subfaces(SimplexVector, 0.3);
- // ++ NINETH
+ // ++ NINTH
std::clog << " - INSERT (2,10,12)" << std::endl;
SimplexVector = {2, 10, 12};
st.insert_simplex_and_subfaces(SimplexVector, 0.3);
diff --git a/src/Persistent_cohomology/example/rips_multifield_persistence.cpp b/src/Persistent_cohomology/example/rips_multifield_persistence.cpp
index 2edf5bc4..ca26a5b9 100644
--- a/src/Persistent_cohomology/example/rips_multifield_persistence.cpp
+++ b/src/Persistent_cohomology/example/rips_multifield_persistence.cpp
@@ -104,7 +104,7 @@ void program_options(int argc, char * argv[]
("min-field-charac,p", po::value<int>(&min_p)->default_value(2),
"Minimal characteristic p of the coefficient field Z/pZ.")
("max-field-charac,q", po::value<int>(&max_p)->default_value(1223),
- "Minimial characteristic q of the coefficient field Z/pZ.")
+ "Maximal characteristic q of the coefficient field Z/pZ.")
("min-persistence,m", po::value<Filtration_value>(&min_persistence),
"Minimal lifetime of homology feature to be recorded. Default is 0");
diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
index d428e497..c00bd33d 100644
--- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
+++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
@@ -211,7 +211,7 @@ class Persistent_cohomology {
/** \brief Update the cohomology groups under the insertion of an edge.
*
* The 0-homology is maintained with a simple Union-Find data structure, which
- * explains the existance of a specific function of edge insertions. */
+ * explains the existence of a specific function of edge insertions. */
void update_cohomology_groups_edge(Simplex_handle sigma) {
Simplex_handle u, v;
boost::tie(u, v) = cpx_->endpoints(sigma);
@@ -723,7 +723,7 @@ class Persistent_cohomology {
boost::disjoint_sets<int *, Simplex_key *> dsets_;
/* The compressed annotation matrix fields.*/
Cam cam_;
- /* Dictionary establishing the correspondance between the Simplex_key of
+ /* Dictionary establishing the correspondence between the Simplex_key of
* the root vertex in the union-find ds and the Simplex_key of the vertex which
* created the connected component as a 0-dimension homology feature.*/
std::map<Simplex_key, Simplex_key> zero_cocycles_;
diff --git a/src/Rips_complex/doc/Intro_rips_complex.h b/src/Rips_complex/doc/Intro_rips_complex.h
index 3888ec8f..cd77b327 100644
--- a/src/Rips_complex/doc/Intro_rips_complex.h
+++ b/src/Rips_complex/doc/Intro_rips_complex.h
@@ -63,9 +63,8 @@ namespace rips_complex {
* value set with \f$max(filtration(4,5), filtration(4,6), filtration(5,6))\f$.
* And so on for simplex (0,1,2,3).
*
- * If the Rips_complex interfaces are not detailed enough for your need, please refer to
- * <a href="rips_persistence_step_by_step_8cpp-example.html">
- * rips_persistence_step_by_step.cpp</a> example, where the constructions of the graph and
+ * If the Rips_complex interfaces are not detailed enough for your need, please refer to the example
+ * \gudhi_example_link{Persistent_cohomology,rips_persistence_step_by_step.cpp} , where the constructions of the graph and
* the Simplex_tree are more detailed.
*
* \section sparserips Sparse Rips complex
diff --git a/src/Rips_complex/example/example_one_skeleton_rips_from_correlation_matrix.cpp b/src/Rips_complex/example/example_one_skeleton_rips_from_correlation_matrix.cpp
index 3d2ba54f..3811d1f1 100644
--- a/src/Rips_complex/example/example_one_skeleton_rips_from_correlation_matrix.cpp
+++ b/src/Rips_complex/example/example_one_skeleton_rips_from_correlation_matrix.cpp
@@ -40,7 +40,7 @@ int main() {
throw "The input matrix is not a correlation matrix. The program will now terminate.\n";
}
correlations[i][j] = 1 - correlations[i][j];
- // Here we make sure that we will get the treshold value equal to maximal
+ // Here we make sure that we will get the threshold value equal to maximal
// distance in the matrix.
if (correlations[i][j] > threshold) threshold = correlations[i][j];
}
diff --git a/src/Simplex_tree/doc/Intro_simplex_tree.h b/src/Simplex_tree/doc/Intro_simplex_tree.h
index ef8dec91..2d3ecdec 100644
--- a/src/Simplex_tree/doc/Intro_simplex_tree.h
+++ b/src/Simplex_tree/doc/Intro_simplex_tree.h
@@ -39,11 +39,9 @@ namespace Gudhi {
* \subsubsection filteredcomplexessimplextreeexamples Examples
*
* Here is a list of simplex tree examples :
- * \li <a href="simple_simplex_tree_8cpp-example.html">
- * Simplex_tree/simple_simplex_tree.cpp</a> - Simple simplex tree construction and basic function use.
+ * \li \gudhi_example_link{Simplex_tree,simple_simplex_tree.cpp} - Simple simplex tree construction and basic function use.
*
- * \li <a href="simplex_tree_from_cliques_of_graph_8cpp-example.html">
- * Simplex_tree/simplex_tree_from_cliques_of_graph.cpp</a> - Simplex tree construction from cliques of graph read in
+ * \li \gudhi_example_link{Simplex_tree,simplex_tree_from_cliques_of_graph.cpp} - Simplex tree construction from cliques of graph read in
* a file.
*
* Simplex tree construction with \f$\mathbb{Z}/3\mathbb{Z}\f$ coefficients on weighted graph Klein bottle file:
@@ -54,12 +52,10 @@ Expand the simplex tree in 3.8e-05 s.
Information of the Simplex Tree:
Number of vertices = 10 Number of simplices = 98 \endcode
*
- * \li <a href="example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
- * Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp</a> - Simplex tree is computed and displayed
+ * \li \gudhi_example_link{Simplex_tree,example_alpha_shapes_3_simplex_tree_from_off_file.cpp} - Simplex tree is computed and displayed
* from a 3D alpha complex (Requires CGAL, GMP and GMPXX to be installed).
*
- * \li <a href="graph_expansion_with_blocker_8cpp-example.html">
- * Simplex_tree/graph_expansion_with_blocker.cpp</a> - Simple simplex tree construction from a one-skeleton graph with
+ * \li \gudhi_example_link{Simplex_tree,graph_expansion_with_blocker.cpp} - Simple simplex tree construction from a one-skeleton graph with
* a simple blocker expansion method.
*
* \subsection filteredcomplexeshassecomplex Hasse complex
diff --git a/src/Simplex_tree/example/graph_expansion_with_blocker.cpp b/src/Simplex_tree/example/graph_expansion_with_blocker.cpp
index df52bf43..eef8b665 100644
--- a/src/Simplex_tree/example/graph_expansion_with_blocker.cpp
+++ b/src/Simplex_tree/example/graph_expansion_with_blocker.cpp
@@ -42,7 +42,7 @@ int main(int argc, char* const argv[]) {
std::clog << vertex << ", ";
}
std::clog << "] ( " << stree.filtration(sh);
- // User can re-assign a new filtration value directly in the blocker (default is the maximal value of boudaries)
+ // User can re-assign a new filtration value directly in the blocker (default is the maximal value of boundaries)
stree.assign_filtration(sh, stree.filtration(sh) + 1.);
std::clog << " + 1. ) = " << result << std::endl;
diff --git a/src/Simplex_tree/example/simple_simplex_tree.cpp b/src/Simplex_tree/example/simple_simplex_tree.cpp
index e8bec596..965711da 100644
--- a/src/Simplex_tree/example/simple_simplex_tree.cpp
+++ b/src/Simplex_tree/example/simple_simplex_tree.cpp
@@ -129,7 +129,7 @@ int main(int argc, char* const argv[]) {
std::clog << " - 3 NOT INSERTED" << std::endl;
}
- // ++ NINETH
+ // ++ NINTH
std::clog << " * INSERT (3,0)" << std::endl;
typeVectorVertex ninethSimplexVector = {3, 0};
returnValue = simplexTree.insert_simplex(ninethSimplexVector, Filtration_value(SECOND_FILTRATION_VALUE));
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h
index 7d2b12ba..ef9f8428 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h
@@ -43,6 +43,10 @@
namespace Gudhi {
+/** \addtogroup simplex_tree
+ * @{
+ */
+
/**
* \class Extended_simplex_type Simplex_tree.h gudhi/Simplex_tree.h
* \brief Extended simplex type data structure for representing the type of simplices in an extended filtration.
@@ -98,8 +102,7 @@ class Simplex_tree {
// Simplex_key next to each other).
typedef typename boost::container::flat_map<Vertex_handle, Node> Dictionary;
- /* \brief Set of nodes sharing a same parent in the simplex tree. */
- /* \brief Set of nodes sharing a same parent in the simplex tree. */
+ /** \brief Set of nodes sharing a same parent in the simplex tree. */
typedef Simplex_tree_siblings<Simplex_tree, Dictionary> Siblings;
@@ -188,6 +191,12 @@ class Simplex_tree {
typedef Simplex_tree_boundary_simplex_iterator<Simplex_tree> Boundary_simplex_iterator;
/** \brief Range over the simplices of the boundary of a simplex. */
typedef boost::iterator_range<Boundary_simplex_iterator> Boundary_simplex_range;
+ /** \brief Iterator over the simplices of the boundary of a simplex and their opposite vertices.
+ *
+ * 'value_type' is std::pair<Simplex_handle, Vertex_handle>. */
+ typedef Simplex_tree_boundary_opposite_vertex_simplex_iterator<Simplex_tree> Boundary_opposite_vertex_simplex_iterator;
+ /** \brief Range over the simplices of the boundary of a simplex and their opposite vertices. */
+ typedef boost::iterator_range<Boundary_opposite_vertex_simplex_iterator> Boundary_opposite_vertex_simplex_range;
/** \brief Iterator over the simplices of the simplicial complex.
*
* 'value_type' is Simplex_handle. */
@@ -297,6 +306,23 @@ class Simplex_tree {
Boundary_simplex_iterator(this));
}
+ /** \brief Given a simplex, returns a range over the simplices of its boundary and their opposite vertices.
+ *
+ * The boundary of a simplex is the set of codimension \f$1\f$ subsimplices of the simplex.
+ * If the simplex is \f$[v_0, \cdots ,v_d]\f$, with canonical orientation induced by \f$ v_0 < \cdots < v_d \f$, the
+ * iterator enumerates the simplices of the boundary in the order:
+ * \f$[v_0,\cdots,\widehat{v_i},\cdots,v_d]\f$ for \f$i\f$ from \f$d\f$ to \f$0\f$, where \f$\widehat{v_i}\f$ means
+ * that the vertex \f$v_i\f$, known as the opposite vertex, is omitted from boundary, but returned as the second
+ * element of a pair.
+ *
+ * @param[in] sh Simplex for which the boundary is computed.
+ */
+ template<class SimplexHandle>
+ Boundary_opposite_vertex_simplex_range boundary_opposite_vertex_simplex_range(SimplexHandle sh) {
+ return Boundary_opposite_vertex_simplex_range(Boundary_opposite_vertex_simplex_iterator(this, sh),
+ Boundary_opposite_vertex_simplex_iterator(this));
+ }
+
/** @} */ // end range and iterator methods
/** \name Constructor/Destructor
* @{ */
@@ -945,7 +971,7 @@ class Simplex_tree {
// If we reached the end of the vertices, and the simplex has more vertices than the given simplex
// => we found a coface
- // Add a coface if we wan't the star or if the number of vertices of the current simplex matches with nbVertices
+ // Add a coface if we want the star or if the number of vertices of the current simplex matches with nbVertices
bool addCoface = (star || curr_nbVertices == nbVertices);
if (addCoface)
cofaces.push_back(simplex);
@@ -1063,8 +1089,8 @@ class Simplex_tree {
*
* Inserts all vertices and edges given by a OneSkeletonGraph.
* OneSkeletonGraph must be a model of
- * <a href="http://www.boost.org/doc/libs/1_76_0/libs/graph/doc/VertexAndEdgeListGraph.html">boost::VertexAndEdgeListGraph</a>
- * and <a href="http://www.boost.org/doc/libs/1_76_0/libs/graph/doc/PropertyGraph.html">boost::PropertyGraph</a>.
+ * <a href="https://www.boost.org/doc/libs/release/libs/graph/doc/VertexAndEdgeListGraph.html">boost::VertexAndEdgeListGraph</a>
+ * and <a href="https://www.boost.org/doc/libs/release/libs/graph/doc/PropertyGraph.html">boost::PropertyGraph</a>.
*
* The vertex filtration value is accessible through the property tag
* vertex_filtration_t.
@@ -1318,7 +1344,7 @@ class Simplex_tree {
}
}
- /* \private Returns the Simplex_handle composed of the vertex list (from the Simplex_handle), plus the given
+ /** \private Returns the Simplex_handle composed of the vertex list (from the Simplex_handle), plus the given
* Vertex_handle if the Vertex_handle is found in the Simplex_handle children list.
* Returns null_simplex() if it does not exist
*/
@@ -1471,7 +1497,7 @@ class Simplex_tree {
int sh_dimension = dimension(sh);
if (sh_dimension >= dimension_)
- // Stop browsing as soon as the dimension is reached, no need to go furter
+ // Stop browsing as soon as the dimension is reached, no need to go further
return false;
new_dimension = (std::max)(new_dimension, sh_dimension);
}
@@ -1781,7 +1807,7 @@ struct Simplex_tree_options_fast_persistence {
static const bool contiguous_vertices = true;
};
-/** @} */ // end defgroup simplex_tree
+/** @}*/ // end addtogroup simplex_tree
} // namespace Gudhi
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h
index e5522cc7..b63a5595 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h
@@ -5,6 +5,7 @@
* Copyright (C) 2014 Inria
*
* Modification(s):
+ * - 2022/04 Vincent Rouvreau: Add Simplex_tree_boundary_opposite_vertex_simplex_iterator for alpha and cech purpose
* - YYYY/MM Author: Description of the modification
*/
@@ -17,15 +18,16 @@
#include <boost/container/static_vector.hpp>
#include <vector>
+#include <utility> // for std::pair
namespace Gudhi {
-/* \addtogroup simplex_tree
+/** \addtogroup simplex_tree
* Iterators and range types for the Simplex_tree.
- * @{
+ * @{
*/
-/* \brief Iterator over the vertices of a simplex
+/** \brief Iterator over the vertices of a simplex
* in a SimplexTree.
*
* Forward iterator, 'value_type' is SimplexTree::Vertex_handle.*/
@@ -71,7 +73,7 @@ class Simplex_tree_simplex_vertex_iterator : public boost::iterator_facade<
};
/*---------------------------------------------------------------------------*/
-/* \brief Iterator over the simplices of the boundary of a
+/** \brief Iterator over the simplices of the boundary of a
* simplex.
*
* Forward iterator, value_type is SimplexTree::Simplex_handle.*/
@@ -123,7 +125,7 @@ class Simplex_tree_boundary_simplex_iterator : public boost::iterator_facade<
private:
friend class boost::iterator_core_access;
-// valid when iterating along the SAME boundary.
+ // valid when iterating along the SAME boundary.
bool equal(Simplex_tree_boundary_simplex_iterator const& other) const {
return sh_ == other.sh_;
}
@@ -178,8 +180,118 @@ class Simplex_tree_boundary_simplex_iterator : public boost::iterator_facade<
Simplex_handle sh_; // current Simplex_handle in the boundary
SimplexTree * st_; // simplex containing the simplicial complex
};
+
+/** \brief Iterator over the simplices of the boundary of a simplex and their opposite vertices.
+ *
+ * Forward iterator, value_type is std::pair<SimplexTree::Simplex_handle, SimplexTree::Vertex_handle>.*/
+template<class SimplexTree>
+class Simplex_tree_boundary_opposite_vertex_simplex_iterator : public boost::iterator_facade<
+ Simplex_tree_boundary_opposite_vertex_simplex_iterator<SimplexTree>,
+ std::pair<typename SimplexTree::Simplex_handle, typename SimplexTree::Vertex_handle> const, boost::forward_traversal_tag> {
+ public:
+ using Simplex_handle = typename SimplexTree::Simplex_handle;
+ using Vertex_handle = typename SimplexTree::Vertex_handle;
+ using Siblings = typename SimplexTree::Siblings;
+
+ // For cython purpose only. The object it initializes should be overwritten ASAP and never used before it is
+ // overwritten.
+ Simplex_tree_boundary_opposite_vertex_simplex_iterator()
+ : sib_(nullptr),
+ st_(nullptr) {
+ }
+
+ // any end() iterator
+ explicit Simplex_tree_boundary_opposite_vertex_simplex_iterator(SimplexTree * st)
+ : last_(st->null_vertex()),
+ next_(st->null_vertex()),
+ sib_(nullptr),
+ baov_(st->null_simplex(), st->null_vertex()),
+ st_(st) {
+ }
+
+ template<class SimplexHandle>
+ Simplex_tree_boundary_opposite_vertex_simplex_iterator(SimplexTree * st, SimplexHandle sh)
+ : last_(sh->first),
+ next_(st->null_vertex()),
+ sib_(nullptr),
+ baov_(st->null_simplex(), sh->first),
+ st_(st) {
+ // Only check once at the beginning instead of for every increment, as this is expensive.
+ if (SimplexTree::Options::contiguous_vertices)
+ GUDHI_CHECK(st_->contiguous_vertices(), "The set of vertices is not { 0, ..., n } without holes");
+ Siblings * sib = st->self_siblings(sh);
+ next_ = sib->parent();
+ sib_ = sib->oncles();
+ if (sib_ != nullptr) {
+ if (SimplexTree::Options::contiguous_vertices && sib_->oncles() == nullptr)
+ // Only relevant for edges
+ baov_.first = sib_->members_.begin()+next_;
+ else
+ baov_.first = sib_->find(next_);
+ }
+ }
+
+ private:
+ friend class boost::iterator_core_access;
+
+ // valid when iterating along the SAME boundary.
+ bool equal(Simplex_tree_boundary_opposite_vertex_simplex_iterator const& other) const {
+ return (baov_.first == other.baov_.first);
+ }
+
+ std::pair<Simplex_handle, Vertex_handle> const& dereference() const {
+ return baov_;
+ }
+
+ void increment() {
+ if (sib_ == nullptr) {
+ baov_.first = st_->null_simplex();
+ return; // ------>>
+ }
+ Siblings * for_sib = sib_;
+ Siblings * new_sib = sib_->oncles();
+ auto rit = suffix_.rbegin();
+ if (SimplexTree::Options::contiguous_vertices && new_sib == nullptr) {
+ // We reached the root, use a short-cut to find a vertex.
+ if (rit == suffix_.rend()) {
+ baov_.second = baov_.first->first;
+ // Segment, this vertex is the last boundary simplex
+ baov_.first = for_sib->members_.begin()+last_;
+ sib_ = nullptr;
+ return;
+ } else {
+ // Dim >= 2, initial step of the descent
+ baov_.first = for_sib->members_.begin()+*rit;
+ for_sib = baov_.first->second.children();
+ ++rit;
+ }
+ }
+ for (; rit != suffix_.rend(); ++rit) {
+ baov_.first = for_sib->find(*rit);
+ for_sib = baov_.first->second.children();
+ }
+ baov_.first = for_sib->find(last_); // baov_.first points to the right simplex now
+ suffix_.push_back(next_);
+ next_ = sib_->parent();
+ sib_ = new_sib;
+ baov_.second = suffix_.back();
+ }
+
+ // Most of the storage should be moved to the range, iterators should be light.
+ Vertex_handle last_; // last vertex of the simplex
+ Vertex_handle next_; // next vertex to push in suffix_
+ // 40 seems a conservative bound on the dimension of a Simplex_tree for now,
+ // as it would not fit on the biggest hard-drive.
+ boost::container::static_vector<Vertex_handle, 40> suffix_;
+ // static_vector still has some overhead compared to a trivial hand-made
+ // version using std::aligned_storage, or compared to making suffix_ static.
+ Siblings * sib_; // where the next search will start from
+ std::pair<Simplex_handle, Vertex_handle> baov_; // a pair containing the current Simplex_handle in the boundary and its opposite vertex
+ SimplexTree * st_; // simplex containing the simplicial complex
+};
+
/*---------------------------------------------------------------------------*/
-/* \brief Iterator over the simplices of a simplicial complex.
+/** \brief Iterator over the simplices of a simplicial complex.
*
* Forward iterator, value_type is SimplexTree::Simplex_handle.*/
template<class SimplexTree>
@@ -252,7 +364,7 @@ class Simplex_tree_complex_simplex_iterator : public boost::iterator_facade<
SimplexTree * st_;
};
-/* \brief Iterator over the simplices of the skeleton of a given
+/** \brief Iterator over the simplices of the skeleton of a given
* dimension of the simplicial complex.
*
* Forward iterator, value_type is SimplexTree::Simplex_handle.*/
@@ -335,7 +447,8 @@ class Simplex_tree_skeleton_simplex_iterator : public boost::iterator_facade<
int curr_dim_;
};
-/* @} */ // end addtogroup simplex_tree
+/** @}*/ // end addtogroup simplex_tree
+
} // namespace Gudhi
#endif // SIMPLEX_TREE_SIMPLEX_TREE_ITERATORS_H_
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_node_explicit_storage.h b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_node_explicit_storage.h
index ae140859..63023daa 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_node_explicit_storage.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_node_explicit_storage.h
@@ -15,16 +15,15 @@
namespace Gudhi {
-/* \addtogroup simplex_tree
+/** \addtogroup simplex_tree
* Represents a node of a Simplex_tree.
* @{
*/
-/*
- * \brief Node of a simplex tree with filtration value
+/** \brief Node of a simplex tree with filtration value
* and simplex key.
*
- * It stores explicitely its own filtration value and its own Simplex_key.
+ * It stores explicitly its own filtration value and its own Simplex_key.
*/
template<class SimplexTree>
struct Simplex_tree_node_explicit_storage : SimplexTree::Filtration_simplex_base, SimplexTree::Key_simplex_base {
@@ -54,7 +53,8 @@ struct Simplex_tree_node_explicit_storage : SimplexTree::Filtration_simplex_base
Siblings * children_;
};
-/* @} */ // end addtogroup simplex_tree
+/** @}*/ // end addtogroup simplex_tree
+
} // namespace Gudhi
#endif // SIMPLEX_TREE_SIMPLEX_TREE_NODE_EXPLICIT_STORAGE_H_
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_siblings.h b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_siblings.h
index b53bad29..d849eeba 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_siblings.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_siblings.h
@@ -20,12 +20,12 @@
namespace Gudhi {
-/* \addtogroup simplex_tree
+/** \addtogroup simplex_tree
* Represents a set of node of a Simplex_tree that share the same parent.
* @{
*/
-/* \brief Data structure to store a set of nodes in a SimplexTree sharing
+/** \brief Data structure to store a set of nodes in a SimplexTree sharing
* the same parent node.*/
template<class SimplexTree, class MapContainer>
class Simplex_tree_siblings {
@@ -36,6 +36,7 @@ class Simplex_tree_siblings {
template<class T> friend class Simplex_tree_boundary_simplex_iterator;
template<class T> friend class Simplex_tree_complex_simplex_iterator;
template<class T> friend class Simplex_tree_skeleton_simplex_iterator;
+ template<class T> friend class Simplex_tree_boundary_opposite_vertex_simplex_iterator;
typedef typename SimplexTree::Vertex_handle Vertex_handle;
typedef typename SimplexTree::Filtration_value Filtration_value;
@@ -57,7 +58,7 @@ class Simplex_tree_siblings {
members_() {
}
- /* \brief Constructor with initialized set of members.
+ /** \brief Constructor with initialized set of members.
*
* 'members' must be sorted and unique.*/
template<typename RandomAccessVertexRange>
@@ -71,8 +72,7 @@ class Simplex_tree_siblings {
}
}
- /*
- * \brief Inserts a Node in the set of siblings nodes.
+ /** \brief Inserts a Node in the set of siblings nodes.
*
* If already present, assigns the minimal filtration value
* between input filtration_value and the value already
@@ -113,7 +113,8 @@ class Simplex_tree_siblings {
Dictionary members_;
};
-/* @} */ // end addtogroup simplex_tree
+/** @}*/ // end addtogroup simplex_tree
+
} // namespace Gudhi
#endif // SIMPLEX_TREE_SIMPLEX_TREE_SIBLINGS_H_
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree/indexing_tag.h b/src/Simplex_tree/include/gudhi/Simplex_tree/indexing_tag.h
index 3e395ae2..29c76e50 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree/indexing_tag.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree/indexing_tag.h
@@ -20,7 +20,7 @@ namespace Gudhi {
struct linear_indexing_tag {
};
-/* \brief Tag for a zigzag ordering of simplices. */
+/** \brief Tag for a zigzag ordering of simplices. */
// struct zigzag_indexing_tag {};
} // namespace Gudhi
diff --git a/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp
index 229ae46f..f6118fe0 100644
--- a/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp
+++ b/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp
@@ -98,8 +98,16 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_copy_constructor, Simplex_tree, list_of_te
BOOST_CHECK(st == st4);
BOOST_CHECK(st3 == st);
+#ifdef __clang__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wself-assign-overloaded"
+#endif
st = st;
- print_simplex_filtration(st4, "Third self copy assignment from the default Simplex_tree");
+#ifdef __clang__
+#pragma GCC diagnostic pop
+#endif
+
+ print_simplex_filtration(st, "Third self copy assignment from the default Simplex_tree");
BOOST_CHECK(st3 == st);
diff --git a/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp
index 6d63d8ae..54e23204 100644
--- a/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp
+++ b/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp
@@ -93,7 +93,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_blockers_3, typeST, li
std::clog << vertex << ", ";
}
std::clog << "] ( " << simplex_tree.filtration(sh);
- // User can re-assign a new filtration value directly in the blocker (default is the maximal value of boudaries)
+ // User can re-assign a new filtration value directly in the blocker (default is the maximal value of boundaries)
simplex_tree.assign_filtration(sh, simplex_tree.filtration(sh) + 1.);
std::clog << " + 1. ) = " << result << std::endl;
@@ -160,7 +160,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_blockers_2, typeST, li
std::clog << vertex << ", ";
}
std::clog << "] ( " << simplex_tree.filtration(sh);
- // User can re-assign a new filtration value directly in the blocker (default is the maximal value of boudaries)
+ // User can re-assign a new filtration value directly in the blocker (default is the maximal value of boundaries)
simplex_tree.assign_filtration(sh, simplex_tree.filtration(sh) + 1.);
std::clog << " + 1. ) = " << result << std::endl;
diff --git a/src/Simplex_tree/test/simplex_tree_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_unit_test.cpp
index bdd41d34..79bb5a93 100644
--- a/src/Simplex_tree/test/simplex_tree_unit_test.cpp
+++ b/src/Simplex_tree/test/simplex_tree_unit_test.cpp
@@ -17,6 +17,8 @@
#include <limits>
#include <functional> // greater
#include <tuple> // std::tie
+#include <iterator> // for std::distance
+#include <cstddef> // for std::size_t
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE "simplex_tree"
@@ -285,7 +287,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var
set_and_test_simplex_tree_dim_fil(st, eighthSimplexVector.size(), eighthSimplex.second);
BOOST_CHECK(st.num_vertices() == (size_t) 4);
- // ++ NINETH
+ // ++ NINTH
std::clog << " - INSERT (3,0)" << std::endl;
typeVectorVertex ninethSimplexVector{3, 0};
BOOST_CHECK(ninethSimplexVector.size() == 2);
@@ -359,7 +361,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_insertion, typeST, list_of_tested_var
test_simplex_tree_contains(st, seventhSimplex, 8); // (2,1,0) -> 8
std::clog << "simplex_tree_insertion - eighth - 3" << std::endl;
test_simplex_tree_contains(st, eighthSimplex, 3); // (3) -> 3
- std::clog << "simplex_tree_insertion - nineth - 7" << std::endl;
+ std::clog << "simplex_tree_insertion - ninth - 7" << std::endl;
test_simplex_tree_contains(st, ninethSimplex, 7); // (3,0) -> 7
// Display the Simplex_tree - Can not be done in the middle of 2 inserts
@@ -991,3 +993,48 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_reset_filtration, typeST, list_of_tes
}
+BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_boundaries_and_opposite_vertex_iterator, typeST, list_of_tested_variants) {
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF BOUNDARIES AND OPPOSITE VERTEX ITERATORS" << std::endl;
+ typeST st;
+
+ st.insert_simplex_and_subfaces({2, 1, 0}, 3.);
+ st.insert_simplex_and_subfaces({3, 0}, 2.);
+ st.insert_simplex_and_subfaces({3, 4, 5}, 3.);
+ st.insert_simplex_and_subfaces({0, 1, 6, 7}, 4.);
+
+ /* Inserted simplex: */
+ /* 1 6 */
+ /* o---o */
+ /* /X\7/ */
+ /* o---o---o---o */
+ /* 2 0 3\X/4 */
+ /* o */
+ /* 5 */
+ using Simplex = std::vector<typename typeST::Vertex_handle>;
+ // simplices must be kept sorted by vertex number for std::vector to use operator== - cf. last BOOST_CHECK
+ std::vector<Simplex> simplices = {{0, 1, 2}, {0, 3}, {0, 1, 6, 7}, {3, 4, 5}, {3, 5}, {2}};
+ for (auto simplex : simplices) {
+ Simplex opposite_vertices;
+ for(auto boundary_and_opposite_vertex : st.boundary_opposite_vertex_simplex_range(st.find(simplex))) {
+ Simplex output;
+ for (auto vertex : st.simplex_vertex_range(boundary_and_opposite_vertex.first)) {
+ std::clog << vertex << " ";
+ output.emplace_back(vertex);
+ }
+ std::clog << " - opposite vertex = " << boundary_and_opposite_vertex.second << std::endl;
+ // Check that boundary simplex + opposite vertex = simplex given as input
+ output.emplace_back(boundary_and_opposite_vertex.second);
+ std::sort(output.begin(), output.end());
+ BOOST_CHECK(simplex == output);
+ opposite_vertices.emplace_back(boundary_and_opposite_vertex.second);
+ }
+ // Check that the list of all opposite vertices = simplex given as input
+ // no opposite vertices if simplex given as input is of dimension 1
+ std::sort(opposite_vertices.begin(), opposite_vertices.end());
+ if (simplex.size() > 1)
+ BOOST_CHECK(simplex == opposite_vertices);
+ else
+ BOOST_CHECK(opposite_vertices.size() == 0);
+ }
+}
diff --git a/src/Skeleton_blocker/concept/SkeletonBlockerDS.h b/src/Skeleton_blocker/concept/SkeletonBlockerDS.h
index 0c2014bd..23eb3670 100644
--- a/src/Skeleton_blocker/concept/SkeletonBlockerDS.h
+++ b/src/Skeleton_blocker/concept/SkeletonBlockerDS.h
@@ -29,7 +29,7 @@ struct SkeletonBlockerDS {
/**
* @brief Root_vertex_handle and Vertex_handle are similar to global and local vertex descriptor
- * used in <a href="http://www.boost.org/doc/libs/1_38_0/libs/graph/doc/subgraph.html">boost subgraphs</a>
+ * used in <a href="https://www.boost.org/doc/libs/release/libs/graph/doc/subgraph.html">boost subgraphs</a>
* and allow to localize a vertex of a subcomplex on its parent root complex.
*
* In gross, vertices are stored in a vector
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h
index 0c0cc624..d091d7dd 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h
@@ -28,7 +28,7 @@ namespace skeleton_blocker {
*/
struct Skeleton_blocker_simple_traits {
/**
- * @brief Global and local handle similar to <a href="http://www.boost.org/doc/libs/1_38_0/libs/graph/doc/subgraph.html">boost subgraphs</a>.
+ * @brief Global and local handle similar to <a href="https://www.boost.org/doc/libs/release/libs/graph/doc/subgraph.html">boost subgraphs</a>.
* Vertices are stored in a vector.
* For the root simplicial complex, the local and global descriptors are the same.
* For a subcomplex L and one of its vertices 'v', the local descriptor of 'v' is its position in
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simplex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simplex.h
index 12fe6469..d83c0ab3 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simplex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simplex.h
@@ -134,7 +134,7 @@ class Skeleton_blocker_simplex {
}
/**
- * Substracts a from the simplex.
+ * Subtracts a from the simplex.
*/
void difference(const Skeleton_blocker_simplex & a) {
std::vector<T> v;
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h
index 4c48ff31..4c0c7dad 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h
@@ -76,8 +76,8 @@ class Skeleton_blocker_sub_complex : public ComplexType {
public:
/**
* Add a vertex 'global' of K to L. When added to L, this vertex will receive
- * another number, addresses(global), its local adress.
- * return the adress where the vertex lay on L.
+ * another number, addresses(global), its local address.
+ * return the address where the vertex lay on L.
* The vertex corresponding to 'global' must not be already present
* in the complex.
*/
@@ -174,7 +174,7 @@ class Skeleton_blocker_sub_complex : public ComplexType {
// /**
// * Allocates a simplex in L corresponding to the simplex s in K
- // * with its local adresses and returns an AddressSimplex.
+ // * with its local addresses and returns an AddressSimplex.
// */
// boost::optional<Simplex> get_address(const Root_simplex_handle & s) const;
@@ -196,10 +196,8 @@ class Skeleton_blocker_sub_complex : public ComplexType {
};
/**
- * @remark remarque perte de temps a creer un nouveau simplexe a chaque fois
- * alors qu'on pourrait utiliser a la place de 'addresses_sigma_in_link'
- * un simplex avec des valeurs sp�ciales ComplexDS::null_vertex par exemple
- * pour indiquer qu'un vertex n'appartient pas au complex
+ * @remark waste of time to create a new simplex each time when we could use instead of addresses_sigma_in_link a
+ * simplex with special values (ComplexDS::null_vertex e.g.) to indicate that a vertex does not belong to the complex.
*/
template<typename ComplexType>
bool proper_face_in_union(
@@ -226,7 +224,7 @@ bool proper_face_in_union(
}
// Remark: this function should be friend in order to leave get_adresses private
-// however doing so seemes currently not possible due to a visual studio bug c2668
+// however doing so seems currently not possible due to a visual studio bug c2668
// "the compiler does not support partial ordering of template functions as specified in the C++ Standard"
// http://www.serkey.com/error-c2668-ambiguous-call-to-overloaded-function-bb45ft.html
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/internal/Trie.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/internal/Trie.h
index a43fa034..116bc779 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/internal/Trie.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/internal/Trie.h
@@ -107,7 +107,7 @@ struct Trie {
}
/**
- * Goes to the root in the trie to consitute simplex
+ * Goes to the root in the trie to constitute simplex
*/
void add_vertices_up_to_the_root(Simplex& res) const {
res.add_vertex(v);
@@ -150,7 +150,7 @@ struct Trie {
++s_pos;
while (s_pos != s.end() && current != 0) {
bool found = false;
- for (const auto child : current->childs) {
+ for (const auto& child : current->childs) {
if (child->v == *s_pos) {
++s_pos;
current = child.get();
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_triangles_iterators.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_triangles_iterators.h
index 37c0b4d3..2c49a1b8 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_triangles_iterators.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/iterators/Skeleton_blockers_triangles_iterators.h
@@ -21,7 +21,7 @@ namespace skeleton_blocker {
/**
* \brief Iterator over the triangles that are
* adjacent to a vertex of the simplicial complex.
- * \remark Will be removed soon -> dont look
+ * \remark Will be removed soon -> don't look
*/
template<typename Complex, typename LinkType>
class Triangle_around_vertex_iterator : public boost::iterator_facade
@@ -95,7 +95,7 @@ class Triangle_around_vertex_iterator : public boost::iterator_facade
/**
* \brief Iterator over the triangles of the
* simplicial complex.
- * \remark Will be removed soon -> dont look
+ * \remark Will be removed soon -> don't look
*
*/
template<typename SkeletonBlockerComplex>
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h
index 125c6387..b4ffc756 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h
@@ -438,7 +438,7 @@ class Skeleton_blocker_complex {
}
/**
- * return the id of a vertex of adress local present in the graph
+ * return the id of a vertex of address local present in the graph
*/
Root_vertex_handle get_id(Vertex_handle local) const {
assert(0 <= local.vertex && local.vertex < boost::num_vertices(skeleton));
@@ -740,7 +740,7 @@ class Skeleton_blocker_complex {
* complex to the smallest flag complex that contains it.
*/
void remove_blockers() {
- // Desallocate the blockers
+ // Deallocate the blockers
while (!blocker_map_.empty()) {
delete_blocker(blocker_map_.begin()->second);
}
@@ -764,8 +764,8 @@ class Skeleton_blocker_complex {
public:
/**
- * Removes the simplex s from the set of blockers
- * and desallocate s.
+ * Removes the simplex sigma from the set of blockers
+ * and deallocate sigma.
*/
void delete_blocker(Blocker_handle sigma) {
if (visitor)
@@ -960,7 +960,7 @@ class Skeleton_blocker_complex {
}
/*
- * @brief returnrs true iff the complex is empty.
+ * @brief returns true iff the complex is empty.
*/
bool empty() const {
return num_vertices() == 0;
@@ -1043,7 +1043,7 @@ class Skeleton_blocker_complex {
if (num_vertices() == 1)
return true;
for (auto vi : vertex_range()) {
- // xxx todo faire une methode bool is_in_blocker(Vertex_handle)
+ // xxx todo create a method: bool is_in_blocker(Vertex_handle)
if (blocker_map_.find(vi) == blocker_map_.end()) {
// no blocker passes through the vertex, we just need to
// check if the current vertex is linked to all others vertices of the complex
@@ -1071,7 +1071,6 @@ class Skeleton_blocker_complex {
/**
* Removes all the popable blockers of the complex and delete them.
- * @returns the number of popable blockers deleted
*/
void remove_popable_blockers();
@@ -1103,7 +1102,6 @@ class Skeleton_blocker_complex {
public:
/**
* Remove the star of the edge connecting vertices a and b.
- * @returns the number of blocker that have been removed
*/
void remove_star(Vertex_handle a, Vertex_handle b);
@@ -1293,7 +1291,7 @@ class Skeleton_blocker_complex {
typedef boost::iterator_range<Complex_neighbors_vertices_iterator> Complex_neighbors_vertices_range;
/**
- * @brief Returns a Complex_edge_range over all edges of the simplicial complex that passes trough v
+ * @brief Returns a Complex_edge_range over all edges of the simplicial complex that passes through v
*/
Complex_neighbors_vertices_range vertex_range(Vertex_handle v) const {
auto begin = Complex_neighbors_vertices_iterator(this, v);
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_link_complex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_link_complex.h
index a2637da3..b3bf0382 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_link_complex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_link_complex.h
@@ -164,7 +164,7 @@ ComplexType> {
Vertex_handle y_parent = *parent_complex.get_address(
this->get_id(*y_link));
if (parent_complex.contains_edge(x_parent, y_parent)) {
- // we check that there is no blocker subset of alpha passing trough x and y
+ // we check that there is no blocker subset of alpha passing through x and y
bool new_edge = true;
for (auto blocker_parent : parent_complex.const_blocker_range(
x_parent)) {
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h
index 404f04f9..e686aaec 100644..100755
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h
@@ -39,7 +39,6 @@ bool Skeleton_blocker_complex<SkeletonBlockerDS>::is_popable_blocker(Blocker_han
/**
* Removes all the popable blockers of the complex and delete them.
- * @returns the number of popable blockers deleted
*/
template<typename SkeletonBlockerDS>
void Skeleton_blocker_complex<SkeletonBlockerDS>::remove_popable_blockers() {
@@ -160,7 +159,6 @@ void Skeleton_blocker_complex<SkeletonBlockerDS>::update_blockers_after_remove_s
/**
* Remove the star of the edge connecting vertices a and b.
- * @returns the number of blocker that have been removed
*/
template<typename SkeletonBlockerDS>
void Skeleton_blocker_complex<SkeletonBlockerDS>::remove_star(Vertex_handle a, Vertex_handle b) {
@@ -269,7 +267,7 @@ void Skeleton_blocker_complex<SkeletonBlockerDS>::remove_blocker_include_in_simp
template<typename SkeletonBlockerDS>
void Skeleton_blocker_complex<SkeletonBlockerDS>::tip_blockers(Vertex_handle a, Vertex_handle b,
std::vector<Simplex> & buffer) const {
- for (auto const & blocker : this->const_blocker_range(a)) {
+ for (auto const blocker : this->const_blocker_range(a)) {
Simplex beta = (*blocker);
beta.remove_vertex(a);
buffer.push_back(beta);
diff --git a/src/Tangential_complex/include/gudhi/Tangential_complex.h b/src/Tangential_complex/include/gudhi/Tangential_complex.h
index f3491f91..cc424810 100644
--- a/src/Tangential_complex/include/gudhi/Tangential_complex.h
+++ b/src/Tangential_complex/include/gudhi/Tangential_complex.h
@@ -1152,7 +1152,7 @@ class Tangential_complex {
#ifdef GUDHI_TC_VERY_VERBOSE
std::cerr << "Inserted " << num_inserted_points << " points / " << num_attempts_to_insert_points
- << " attemps to compute the star\n";
+ << " attempts to compute the star\n";
#endif
update_star(i);
diff --git a/src/Witness_complex/include/gudhi/Active_witness/Active_witness.h b/src/Witness_complex/include/gudhi/Active_witness/Active_witness.h
index 2ae1d6e0..1aebb045 100644
--- a/src/Witness_complex/include/gudhi/Active_witness/Active_witness.h
+++ b/src/Witness_complex/include/gudhi/Active_witness/Active_witness.h
@@ -18,7 +18,7 @@ namespace Gudhi {
namespace witness_complex {
- /* \class Active_witness
+ /** \class Active_witness
* \brief Class representing a list of nearest neighbors to a given witness.
* \details Every element is a pair of a landmark identifier and the squared distance to it.
*/
diff --git a/src/Witness_complex/include/gudhi/Active_witness/Active_witness_iterator.h b/src/Witness_complex/include/gudhi/Active_witness/Active_witness_iterator.h
index 4f8fddba..18f19650 100644
--- a/src/Witness_complex/include/gudhi/Active_witness/Active_witness_iterator.h
+++ b/src/Witness_complex/include/gudhi/Active_witness/Active_witness_iterator.h
@@ -18,7 +18,7 @@ namespace Gudhi {
namespace witness_complex {
-/* \brief Iterator in the nearest landmark list.
+/** \brief Iterator in the nearest landmark list.
* \details After the iterator reaches the end of the list,
* the list is augmented by a (nearest landmark, distance) pair if possible.
* If all the landmarks are present in the list, iterator returns the specific end value
diff --git a/src/Witness_complex/include/gudhi/Strong_witness_complex.h b/src/Witness_complex/include/gudhi/Strong_witness_complex.h
index b3699f77..ddc0da32 100644
--- a/src/Witness_complex/include/gudhi/Strong_witness_complex.h
+++ b/src/Witness_complex/include/gudhi/Strong_witness_complex.h
@@ -125,7 +125,7 @@ class Strong_witness_complex {
//@}
private:
- /* \brief Adds recursively all the faces of a certain dimension dim-1 witnessed by the same witness.
+ /** \brief Adds recursively all the faces of a certain dimension dim-1 witnessed by the same witness.
* Iterator is needed to know until how far we can take landmarks to form simplexes.
* simplex is the prefix of the simplexes to insert.
* The landmark pointed by aw_it is added to all formed simplices.
diff --git a/src/Witness_complex/include/gudhi/Witness_complex.h b/src/Witness_complex/include/gudhi/Witness_complex.h
index d655c7f6..66ae7af2 100644
--- a/src/Witness_complex/include/gudhi/Witness_complex.h
+++ b/src/Witness_complex/include/gudhi/Witness_complex.h
@@ -127,7 +127,7 @@ class Witness_complex {
//@}
private:
- /* \brief Adds recursively all the faces of a certain dimension dim witnessed by the same witness.
+ /** \brief Adds recursively all the faces of a certain dimension dim witnessed by the same witness.
* Iterator is needed to know until how far we can take landmarks to form simplexes.
* simplex is the prefix of the simplexes to insert.
* The output value indicates if the witness rests active or not.
diff --git a/src/Witness_complex/include/gudhi/Witness_complex/all_faces_in.h b/src/Witness_complex/include/gudhi/Witness_complex/all_faces_in.h
index 5845728a..007ab084 100644
--- a/src/Witness_complex/include/gudhi/Witness_complex/all_faces_in.h
+++ b/src/Witness_complex/include/gudhi/Witness_complex/all_faces_in.h
@@ -11,7 +11,7 @@
#ifndef WITNESS_COMPLEX_ALL_FACES_IN_H_
#define WITNESS_COMPLEX_ALL_FACES_IN_H_
-/* \brief Check if the facets of the k-dimensional simplex witnessed
+/** \brief Check if the facets of the k-dimensional simplex witnessed
* by witness witness_id are already in the complex.
* inserted_vertex is the handle of the (k+1)-th vertex witnessed by witness_id
*/
diff --git a/src/cmake/modules/FindTBB.cmake b/src/cmake/modules/FindTBB.cmake
index 13f4d929..e6c42dc7 100644
--- a/src/cmake/modules/FindTBB.cmake
+++ b/src/cmake/modules/FindTBB.cmake
@@ -34,7 +34,7 @@
#
# GvdB: Mac OS X distribution places libraries directly in lib directory.
#
-# For backwards compatibility, you may explicitely set the CMake variables TBB_ARCHITECTURE and TBB_COMPILER.
+# For backwards compatibility, you may explicitly set the CMake variables TBB_ARCHITECTURE and TBB_COMPILER.
# TBB_ARCHITECTURE [ ia32 | em64t | itanium ]
# which architecture to use
# TBB_COMPILER e.g. vc9 or cc3.2.3_libc2.3.2_kernel2.4.21 or cc4.0.1_os10.4.9
@@ -54,8 +54,8 @@
# TBB_MALLOC_DEBUG_LIBRARY, the TBB debug malloc library
# TBB_FOUND, If false, don't try to use TBB.
# TBB_INTERFACE_VERSION, as defined in tbb/tbb_stddef.h
-# TBB_MALLOCPROXY_DEBUG_LIBRARY, the TBB debug malloc_proxy library (not included in TBB_LIBRARIES since it's optionnal)
-# TBB_MALLOCPROXY_RELEASE_LIBRARY, the TBB release malloc_proxy library (not included in TBB_LIBRARIES since it's optionnal)
+# TBB_MALLOCPROXY_DEBUG_LIBRARY, the TBB debug malloc_proxy library (not included in TBB_LIBRARIES since it's optional)
+# TBB_MALLOCPROXY_RELEASE_LIBRARY, the TBB release malloc_proxy library (not included in TBB_LIBRARIES since it's optional)
include(CheckCXXSourceCompiles)
diff --git a/src/cmake/modules/GUDHI_doxygen_target.cmake b/src/cmake/modules/GUDHI_doxygen_target.cmake
index 0f80b187..327513da 100644
--- a/src/cmake/modules/GUDHI_doxygen_target.cmake
+++ b/src/cmake/modules/GUDHI_doxygen_target.cmake
@@ -44,6 +44,20 @@ if(DOXYGEN_FOUND)
set(GUDHI_DOXYGEN_UTILS_PATH "utilities/*")
endif()
+ message("++ Doxygen version ${DOXYGEN_VERSION}")
+ if (DOXYGEN_VERSION VERSION_LESS 1.9.3)
+ set(GUDHI_DOXYGEN_CLASS_DIAGRAMS "CLASS_DIAGRAMS = NO")
+ else()
+ set(GUDHI_DOXYGEN_CLASS_DIAGRAMS "")
+ endif()
+ if (DOXYGEN_VERSION VERSION_LESS 1.9.2)
+ set(GUDHI_DOXYGEN_MATHJAX_VERSION "MATHJAX_VERSION = MathJax_2")
+ set(GUDHI_DOXYGEN_MATHJAX_EXTENSIONS "TeX/AMSmath TeX/AMSsymbols")
+ else()
+ set(GUDHI_DOXYGEN_MATHJAX_VERSION "MATHJAX_VERSION = MathJax_3")
+ set(GUDHI_DOXYGEN_MATHJAX_EXTENSIONS "ams")
+ endif()
+
configure_file(${GUDHI_DOXYGEN_SOURCE_PREFIX}/Doxyfile.in "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile" @ONLY)
add_custom_target(doxygen ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
diff --git a/src/cmake/modules/GUDHI_modules.cmake b/src/cmake/modules/GUDHI_modules.cmake
index 13248f7e..ec1f756b 100644
--- a/src/cmake/modules/GUDHI_modules.cmake
+++ b/src/cmake/modules/GUDHI_modules.cmake
@@ -2,7 +2,7 @@
set(GUDHI_MODULES_FULL_LIST "")
function(add_gudhi_module file_path)
- option("WITH_MODULE_GUDHI_${file_path}" "Activate/desactivate ${file_path} compilation and installation" ON)
+ option("WITH_MODULE_GUDHI_${file_path}" "Activate/deactivate ${file_path} compilation and installation" ON)
if (WITH_MODULE_GUDHI_${file_path})
set(GUDHI_MODULES ${GUDHI_MODULES} ${file_path} CACHE INTERNAL "GUDHI_MODULES")
else()
@@ -10,7 +10,7 @@ function(add_gudhi_module file_path)
endif()
# Required by user_version
set(GUDHI_MODULES_FULL_LIST ${GUDHI_MODULES_FULL_LIST} ${file_path} PARENT_SCOPE)
- # Include module headers is independant - You may ask for no Alpha complex module but Python interface i.e.
+ # Include module headers is independent - You may ask for no Alpha complex module but Python interface i.e.
if(IS_DIRECTORY ${CMAKE_SOURCE_DIR}/src/${file_path}/include/)
include_directories(src/${file_path}/include/)
endif()
diff --git a/src/cmake/modules/GUDHI_options.cmake b/src/cmake/modules/GUDHI_options.cmake
index 3cd0a489..8379e3c6 100644
--- a/src/cmake/modules/GUDHI_options.cmake
+++ b/src/cmake/modules/GUDHI_options.cmake
@@ -1,5 +1,15 @@
-option(WITH_GUDHI_BENCHMARK "Activate/desactivate benchmark compilation" OFF)
-option(WITH_GUDHI_EXAMPLE "Activate/desactivate examples compilation and installation" OFF)
-option(WITH_GUDHI_PYTHON "Activate/desactivate python module compilation and installation" ON)
-option(WITH_GUDHI_TEST "Activate/desactivate examples compilation and installation" ON)
-option(WITH_GUDHI_UTILITIES "Activate/desactivate utilities compilation and installation" ON)
+option(WITH_GUDHI_BENCHMARK "Activate/deactivate benchmark compilation" OFF)
+option(WITH_GUDHI_EXAMPLE "Activate/deactivate examples compilation and installation" OFF)
+option(WITH_GUDHI_REMOTE_TEST "Activate/deactivate datasets fetching test which uses the Internet" OFF)
+option(WITH_GUDHI_PYTHON "Activate/deactivate python module compilation and installation" ON)
+option(WITH_GUDHI_TEST "Activate/deactivate examples compilation and installation" ON)
+option(WITH_GUDHI_UTILITIES "Activate/deactivate utilities compilation and installation" ON)
+option(WITH_GUDHI_THIRD_PARTY "Activate/deactivate third party libraries cmake detection. When set to OFF, it is useful for doxygen or user_version i.e." ON)
+
+if (NOT WITH_GUDHI_THIRD_PARTY)
+ set (WITH_GUDHI_BENCHMARK OFF)
+ set (WITH_GUDHI_EXAMPLE OFF)
+ set (WITH_GUDHI_PYTHON OFF)
+ set (WITH_GUDHI_TEST OFF)
+ set (WITH_GUDHI_UTILITIES OFF)
+endif()
diff --git a/src/cmake/modules/GUDHI_submodules.cmake b/src/cmake/modules/GUDHI_submodules.cmake
new file mode 100644
index 00000000..78b045bd
--- /dev/null
+++ b/src/cmake/modules/GUDHI_submodules.cmake
@@ -0,0 +1,5 @@
+# For those who dislike bundled dependencies, this indicates where to find a preinstalled Hera.
+set(HERA_WASSERSTEIN_INTERNAL_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/ext/hera/wasserstein/include)
+set(HERA_WASSERSTEIN_INCLUDE_DIR ${HERA_WASSERSTEIN_INTERNAL_INCLUDE_DIR} CACHE PATH "Directory where one can find Hera's wasserstein.h")
+set(HERA_BOTTLENECK_INTERNAL_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/ext/hera/bottleneck/include)
+set(HERA_BOTTLENECK_INCLUDE_DIR ${HERA_BOTTLENECK_INTERNAL_INCLUDE_DIR} CACHE PATH "Directory where one can find Hera's bottleneck.h") \ No newline at end of file
diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake
index 7c982b3b..2cf6787e 100644
--- a/src/cmake/modules/GUDHI_third_party_libraries.cmake
+++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake
@@ -19,6 +19,15 @@ if(GMP_FOUND)
endif()
endif()
+# from windows vcpkg eigen 3.4.0#2 : build fails with
+# error C2440: '<function-style-cast>': cannot convert from 'Eigen::EigenBase<Derived>::Index' to '__gmp_expr<mpq_t,mpq_t>'
+# cf. https://gitlab.com/libeigen/eigen/-/issues/2476
+# Workaround is to compile with '-DEIGEN_DEFAULT_DENSE_INDEX_TYPE=int'
+if (FORCE_EIGEN_DEFAULT_DENSE_INDEX_TYPE_TO_INT)
+ message("++ User explicit demand to force EIGEN_DEFAULT_DENSE_INDEX_TYPE to int")
+ add_definitions(-DEIGEN_DEFAULT_DENSE_INDEX_TYPE=int)
+endif()
+
# In CMakeLists.txt, when include(${CGAL_USE_FILE}), CMAKE_CXX_FLAGS are overwritten.
# cf. http://doc.cgal.org/latest/Manual/installation.html#title40
# A workaround is to include(${CGAL_USE_FILE}) before adding "-std=c++11".
@@ -39,12 +48,6 @@ if(CGAL_FOUND)
include( ${CGAL_USE_FILE} )
endif()
-# For those who dislike bundled dependencies, this indicates where to find a preinstalled Hera.
-set(HERA_WASSERSTEIN_INTERNAL_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/ext/hera/wasserstein/include)
-set(HERA_WASSERSTEIN_INCLUDE_DIR ${HERA_WASSERSTEIN_INTERNAL_INCLUDE_DIR} CACHE PATH "Directory where one can find Hera's wasserstein.h")
-set(HERA_BOTTLENECK_INTERNAL_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/ext/hera/bottleneck/include)
-set(HERA_BOTTLENECK_INCLUDE_DIR ${HERA_BOTTLENECK_INTERNAL_INCLUDE_DIR} CACHE PATH "Directory where one can find Hera's bottleneck.h")
-
option(WITH_GUDHI_USE_TBB "Build with Intel TBB parallelization" ON)
# Find TBB package for parallel sort - not mandatory, just optional.
@@ -165,7 +168,7 @@ if (WITH_GUDHI_PYTHON)
message(FATAL_ERROR "ERROR: GUDHI_PYTHON_PATH is not valid.")
endif(NOT GUDHI_PYTHON_PATH)
- option(WITH_GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS "Build with setting runtime_library_dirs. Usefull when setting rpath is not allowed" ON)
+ option(WITH_GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS "Build with setting runtime_library_dirs. Useful when setting rpath is not allowed" ON)
if(PYTHONINTERP_FOUND AND CYTHON_FOUND)
if(SPHINX_FOUND)
diff --git a/src/cmake/modules/GUDHI_user_version_target.cmake b/src/cmake/modules/GUDHI_user_version_target.cmake
index 9e76c3d9..4487ad86 100644
--- a/src/cmake/modules/GUDHI_user_version_target.cmake
+++ b/src/cmake/modules/GUDHI_user_version_target.cmake
@@ -14,8 +14,6 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
make_directory ${GUDHI_USER_VERSION_DIR}
COMMENT "user_version creation in ${GUDHI_USER_VERSION_DIR}")
-file(COPY "${CMAKE_SOURCE_DIR}/src/Doxyfile.in" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/")
-
# Generate bib files for Doxygen - cf. root CMakeLists.txt for explanation
string(TIMESTAMP GUDHI_VERSION_YEAR "%Y")
configure_file(${CMAKE_SOURCE_DIR}/biblio/how_to_cite_gudhi.bib.in "${CMAKE_CURRENT_BINARY_DIR}/biblio/how_to_cite_gudhi.bib" @ONLY)
diff --git a/src/common/doc/examples.h b/src/common/doc/examples.h
index 879fb96a..1634b19e 100644
--- a/src/common/doc/examples.h
+++ b/src/common/doc/examples.h
@@ -1,6 +1,6 @@
// List of GUDHI examples and utils - Doxygen needs at least a file tag to analyse comments
// Generated from scripts/cpp_examples_for_doxygen.py
-/*! @file Examples
+/*! @file
* \section Witness_complex_example_section Witness_complex
* @example strong_witness_persistence.cpp
* @example weak_witness_persistence.cpp
@@ -40,7 +40,6 @@
* @example edge_collapse_basic_example.cpp
* \section Cech_complex_example_section Cech_complex
* @example cech_persistence.cpp
- * @example cech_complex_step_by_step.cpp
* @example cech_complex_example_from_points.cpp
* \section Bitmap_cubical_complex_example_section Bitmap_cubical_complex
* @example periodic_cubical_complex_persistence.cpp
diff --git a/src/common/doc/footer.html b/src/common/doc/footer.html
index 4168c6bc..08a2cbd0 100644
--- a/src/common/doc/footer.html
+++ b/src/common/doc/footer.html
@@ -1,5 +1,9 @@
-<!-- HTML footer for doxygen 1.8.6-->
+<!-- HTML footer for doxygen 1.9.4-->
<!-- start footer part -->
+<!--BEGIN GENERATE_TREEVIEW-->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+<!--END GENERATE_TREEVIEW-->
+<ul>
<table style="width:100%">
<tr class="no-bullet shadow-black">
<td class="network-entypo">
@@ -10,14 +14,15 @@
<!--END PROJECT_NAME-->
</td>
<td class="network-entypo">
-<!--BEGIN GENERATE_TREEVIEW-->
$generatedby
<a href="http://www.doxygen.org/index.html">
Doxygen</a> $doxygenversion
-<!--END GENERATE_TREEVIEW-->
</td>
</tr>
</table>
-
+</ul>
+<!--BEGIN GENERATE_TREEVIEW-->
+</div>
+<!--END GENERATE_TREEVIEW-->
</body>
</html>
diff --git a/src/common/doc/header.html b/src/common/doc/header.html
index 7c20478b..a97e1b2f 100644
--- a/src/common/doc/header.html
+++ b/src/common/doc/header.html
@@ -8,9 +8,6 @@
<meta name="generator" content="Doxygen $doxygenversion"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
-<!-- GUDHI website css for header BEGIN -->
-<link rel="stylesheet" type="text/css" href="https://gudhi.inria.fr/assets/css/styles_feeling_responsive.css" />
-<!-- GUDHI website css for header END -->
<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="$relpath^jquery.js"></script>
<script type="text/javascript" src="$relpath^dynsections.js"></script>
@@ -18,13 +15,17 @@ $treeview
$search
$mathjax
<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
+<!-- GUDHI website css for header BEGIN -->
+<link rel="stylesheet" type="text/css" href="https://gudhi.inria.fr/assets/css/styles_feeling_responsive.css" />
+<!-- GUDHI website css for header END -->
$extrastylesheet
</head>
<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<!-- GUDHI website header BEGIN -->
<div id="navigation" class="sticky">
- <nav class="top-bar" role="navigation" data-topbar>
+ <nav class="top-bar" role="navigation" data-topbar="true">
<ul class="title-area">
<li class="name">
<h1 class="show-for-small-only"><a href="" class="icon-tree"> GUDHI library</a></h1>
@@ -38,7 +39,7 @@ $extrastylesheet
<li><a href="/contact/">Contact</a></li>
</ul>
<ul class="left">
- <li><a href="/"> <img src="/assets/img/home.png" alt=" GUDHI"> GUDHI </a></li>
+ <li><a href="/"> <img src="/assets/img/home.png" alt=" GUDHI"/> GUDHI </a></li>
<li class="divider"></li>
<li class="has-dropdown">
<a href="#">Project</a>
@@ -85,7 +86,6 @@ $extrastylesheet
</div><!-- /#navigation -->
<!-- GUDHI website header END -->
-<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<!--BEGIN TITLEAREA-->
<div id="titlearea">
diff --git a/src/common/doc/installation.h b/src/common/doc/installation.h
index 67d026bd..63a37a25 100644
--- a/src/common/doc/installation.h
+++ b/src/common/doc/installation.h
@@ -5,8 +5,8 @@
* Examples of GUDHI headers inclusion can be found in \ref utilities.
*
* \section compiling Compiling
- * The library uses c++14 and requires <a target="_blank" href="http://www.boost.org/">Boost</a> &ge; 1.66.0
- * and <a target="_blank" href="https://www.cmake.org/">CMake</a> &ge; 3.5.
+ * The library uses c++14 and requires <a target="_blank" href="https://www.boost.org/">Boost</a> &ge; 1.66.0
+ * and <a target="_blank" href="https://cmake.org/">CMake</a> &ge; 3.5.
* It is a multi-platform library and compiles on Linux, Mac OSX and Visual Studio 2015.
*
* \subsection utilities Utilities and examples
@@ -40,13 +40,20 @@ make \endverbatim
* `make test` is using <a href="https://cmake.org/cmake/help/latest/manual/ctest.1.html">Ctest</a> (CMake test driver
* program). If some of the tests are failing, please send us the result of the following command:
* \verbatim ctest --output-on-failure \endverbatim
+ * Testing fetching datasets feature requires the use of the internet and is disabled by default. If you want to include this test, set WITH_GUDHI_REMOTE_TEST to ON when building in the previous step (note that this test is included in the python module):
+ * \verbatim cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_REMOTE_TEST=ON --DWITH_GUDHI_PYTHON=ON .. \endverbatim
*
- * \subsection documentationgeneration Documentation
- * To generate the documentation, <a target="_blank" href="http://www.doxygen.org/">Doxygen</a> is required.
- * Run the following command in a terminal:
+ * \subsection documentationgeneration C++ documentation
+ * To generate the C++ documentation, the <a target="_blank" href="http://www.doxygen.org/">doxygen</a> program
+ * is required (version &ge; 1.9.5 is advised). Run the following command in a terminal:
* \verbatim make doxygen \endverbatim
* Documentation will be generated in a folder named <code>html</code>.
*
+ * In case there is not a full setup present and only the documentation should be build the following command sequence
+ * can be used:
+\verbatim cmake -DWITH_GUDHI_THIRD_PARTY=OFF ..
+make doxygen\endverbatim
+ *
* \subsection helloworld Hello world !
* The <a target="_blank" href="https://github.com/GUDHI/hello-gudhi-world">Hello world for GUDHI</a>
* project is an example to help developers to make their own C++ project on top of the GUDHI library.
@@ -56,10 +63,9 @@ make \endverbatim
* The multi-field persistent homology algorithm requires GMP which is a free library for arbitrary-precision
* arithmetic, operating on signed integers, rational numbers, and floating point numbers.
*
- * The following example requires the <a target="_blank" href="http://gmplib.org/">GNU Multiple Precision Arithmetic
+ * The following example requires the <a target="_blank" href="https://gmplib.org/">GNU Multiple Precision Arithmetic
* Library</a> (GMP) and will not be built if GMP is not installed:
- * \li <a href="rips_multifield_persistence_8cpp-example.html">
- * Persistent_cohomology/rips_multifield_persistence.cpp</a>
+ * \li \gudhi_example_link{Persistent_cohomology,rips_multifield_persistence.cpp}
*
* Having GMP version 4.2 or higher installed is recommended.
*
@@ -76,179 +82,101 @@ make \endverbatim
*
* The following examples/utilities require the <a target="_blank" href="http://www.cgal.org/">Computational Geometry Algorithms
* Library</a> (CGAL \cite cgal:eb-19b) and will not be built if CGAL version 4.11.0 or higher is not installed:
- * \li <a href="example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
- * Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp</a>
- * \li <a href="strong_witness_persistence_8cpp-example.html">
- * Witness_complex/strong_witness_persistence.cpp</a>
- * \li <a href="weak_witness_persistence_8cpp-example.html">
- * Witness_complex/weak_witness_persistence.cpp</a>
- * \li <a href="example_strong_witness_complex_off_8cpp-example.html">
- * Witness_complex/example_strong_witness_complex_off.cpp</a>
- * \li <a href="example_witness_complex_off_8cpp-example.html">
- * Witness_complex/example_witness_complex_off.cpp</a>
- * \li <a href="example_witness_complex_sphere_8cpp-example.html">
- * Witness_complex/example_witness_complex_sphere.cpp</a>
- * \li <a href="_alpha_complex_from_off_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_off.cpp</a>
- * \li <a href="_alpha_complex_from_points_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_points.cpp</a>
- * \li <a href="alpha_complex_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_persistence.cpp</a>
- * \li <a href="custom_persistence_sort_8cpp-example.html">
- * Persistent_cohomology/custom_persistence_sort.cpp</a>
- * \li <a href="alpha_rips_persistence_bottleneck_distance_8cpp-example.html">
- * Bottleneck_distance/alpha_rips_persistence_bottleneck_distance.cpp.cpp</a>
- * \li <a href="bottleneck_basic_example_8cpp-example.html">
- * Bottleneck_distance/bottleneck_basic_example.cpp</a>
- * \li <a href="bottleneck_distance_8cpp-example.html">
- * Bottleneck_distance/bottleneck_distance.cpp</a>
- * \li <a href="_coord_g_i_c_8cpp-example.html">
- * Nerve_GIC/CoordGIC.cpp</a>
- * \li <a href="_func_g_i_c_8cpp-example.html">
- * Nerve_GIC/FuncGIC.cpp</a>
- * \li <a href="_nerve_8cpp-example.html">
- * Nerve_GIC/Nerve.cpp</a>
- * \li <a href="_voronoi_g_i_c_8cpp-example.html">
- * Nerve_GIC/VoronoiGIC.cpp</a>
- * \li <a href="example_spatial_searching_8cpp-example.html">
- * Spatial_searching/example_spatial_searching.cpp</a>
- * \li <a href="example_choose_n_farthest_points_8cpp-example.html">
- * Subsampling/example_choose_n_farthest_points.cpp</a>
- * \li <a href="example_pick_n_random_points_8cpp-example.html">
- * Subsampling/example_pick_n_random_points.cpp</a>
- * \li <a href="example_sparsify_point_set_8cpp-example.html">
- * Subsampling/example_sparsify_point_set.cpp</a>
- * \li <a href="example_basic_8cpp-example.html">
- * Tangential_complex/example_basic.cpp</a>
- * \li <a href="example_with_perturb_8cpp-example.html">
- * Tangential_complex/example_with_perturb.cpp</a>
- * \li <a href="_weighted_alpha_complex_3d_from_points_8cpp-example.html">
- * Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp</a>
- * \li <a href="alpha_complex_3d_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_3d_persistence.cpp</a>
- * \li <a href="_coxeter_triangulation_2manifold_tracing_flat_torus_with_boundary_8cpp-example.html">
- * Coxeter_triangulation/manifold_tracing_flat_torus_with_boundary.cpp</a>
+ * \li \gudhi_example_link{Simplex_tree,example_alpha_shapes_3_simplex_tree_from_off_file.cpp}
+ * \li \gudhi_example_link{Witness_complex,strong_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,weak_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_strong_witness_complex_off.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_witness_complex_off.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_witness_complex_sphere.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_off.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_points.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_persistence.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,custom_persistence_sort.cpp}
+ * \li \gudhi_example_link{Bottleneck_distance,alpha_rips_persistence_bottleneck_distance.cpp}
+ * \li \gudhi_example_link{Bottleneck_distance,bottleneck_basic_example.cpp}
+ * \li \gudhi_example_link{Bottleneck_distance,bottleneck_distance.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,CoordGIC.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,FuncGIC.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,Nerve.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,VoronoiGIC.cpp}
+ * \li \gudhi_example_link{Spatial_searching,example_spatial_searching.cpp}
+ * \li \gudhi_example_link{Subsampling,example_choose_n_farthest_points.cpp}
+ * \li \gudhi_example_link{Subsampling,example_pick_n_random_points.cpp}
+ * \li \gudhi_example_link{Subsampling,example_sparsify_point_set.cpp}
+ * \li \gudhi_example_link{Tangential_complex,example_basic.cpp}
+ * \li \gudhi_example_link{Tangential_complex,example_with_perturb.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Weighted_alpha_complex_3d_from_points.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_3d_persistence.cpp}
+ * \li \gudhi_example_link{Coxeter_triangulation,manifold_tracing_flat_torus_with_boundary.cpp}
*
* \subsection eigen Eigen
* Some GUDHI modules (cf. \ref main_page "modules list"), and few examples require
- * <a target="_blank" href="http://eigen.tuxfamily.org/">Eigen</a> is a C++ template library for linear algebra:
+ * <a target="_blank" href="https://eigen.tuxfamily.org">Eigen</a> is a C++ template library for linear algebra:
* matrices, vectors, numerical solvers, and related algorithms.
*
- * The following examples/utilities require the <a target="_blank" href="http://eigen.tuxfamily.org/">Eigen</a> and will not be
+ * The following examples/utilities require the <a target="_blank" href="https://eigen.tuxfamily.org">Eigen</a> and will not be
* built if Eigen is not installed:
- * \li <a href="_alpha_complex_from_off_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_off.cpp</a>
- * \li <a href="_alpha_complex_from_points_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_points.cpp</a>
- * \li <a href="alpha_complex_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_persistence.cpp</a>
- * \li <a href="alpha_complex_3d_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_3d_persistence.cpp</a>
- * \li <a href="_weighted_alpha_complex_3d_from_points_8cpp-example.html">
- * Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp</a>
- * \li <a href="alpha_rips_persistence_bottleneck_distance_8cpp-example.html">
- * Bottleneck_distance/alpha_rips_persistence_bottleneck_distance.cpp.cpp</a>
- * \li <a href="custom_persistence_sort_8cpp-example.html">
- * Persistent_cohomology/custom_persistence_sort.cpp</a>
- * \li <a href="example_spatial_searching_8cpp-example.html">
- * Spatial_searching/example_spatial_searching.cpp</a>
- * \li <a href="example_choose_n_farthest_points_8cpp-example.html">
- * Subsampling/example_choose_n_farthest_points.cpp</a>
- * \li <a href="example_pick_n_random_points_8cpp-example.html">
- * Subsampling/example_pick_n_random_points.cpp</a>
- * \li <a href="example_sparsify_point_set_8cpp-example.html">
- * Subsampling/example_sparsify_point_set.cpp</a>
- * \li <a href="example_basic_8cpp-example.html">
- * Tangential_complex/example_basic.cpp</a>
- * \li <a href="example_with_perturb_8cpp-example.html">
- * Tangential_complex/example_with_perturb.cpp</a>
- * \li <a href="strong_witness_persistence_8cpp-example.html">
- * Witness_complex/strong_witness_persistence.cpp</a>
- * \li <a href="weak_witness_persistence_8cpp-example.html">
- * Witness_complex/weak_witness_persistence.cpp</a>
- * \li <a href="example_strong_witness_complex_off_8cpp-example.html">
- * Witness_complex/example_strong_witness_complex_off.cpp</a>
- * \li <a href="example_witness_complex_off_8cpp-example.html">
- * Witness_complex/example_witness_complex_off.cpp</a>
- * \li <a href="example_witness_complex_sphere_8cpp-example.html">
- * Witness_complex/example_witness_complex_sphere.cpp</a>
- * \li <a href="_coxeter_triangulation_2cell_complex_from_basic_circle_manifold_8cpp-example.html">
- * Coxeter_triangulation/cell_complex_from_basic_circle_manifold.cpp</a>
- * \li <a href="_coxeter_triangulation_2manifold_tracing_custom_function_8cpp-example.html">
- * Coxeter_triangulation/manifold_tracing_custom_function.cpp</a>
- * \li <a href="_coxeter_triangulation_2manifold_tracing_flat_torus_with_boundary_8cpp-example.html">
- * Coxeter_triangulation/manifold_tracing_flat_torus_with_boundary.cpp</a>
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_off.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_points.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_persistence.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_3d_persistence.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Weighted_alpha_complex_3d_from_points.cpp}
+ * \li \gudhi_example_link{Bottleneck_distance,alpha_rips_persistence_bottleneck_distance.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,custom_persistence_sort.cpp}
+ * \li \gudhi_example_link{Spatial_searching,example_spatial_searching.cpp}
+ * \li \gudhi_example_link{Subsampling,example_choose_n_farthest_points.cpp}
+ * \li \gudhi_example_link{Subsampling,example_pick_n_random_points.cpp}
+ * \li \gudhi_example_link{Subsampling,example_sparsify_point_set.cpp}
+ * \li \gudhi_example_link{Tangential_complex,example_basic.cpp}
+ * \li \gudhi_example_link{Tangential_complex,example_with_perturb.cpp}
+ * \li \gudhi_example_link{Witness_complex,strong_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,weak_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_strong_witness_complex_off.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_witness_complex_off.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_witness_complex_sphere.cpp}
+ * \li \gudhi_example_link{Coxeter_triangulation,cell_complex_from_basic_circle_manifold.cpp}
+ * \li \gudhi_example_link{Coxeter_triangulation,manifold_tracing_custom_function.cpp}
+ * \li \gudhi_example_link{Coxeter_triangulation,manifold_tracing_flat_torus_with_boundary.cpp}
*
* \subsection tbb Threading Building Blocks
- * <a target="_blank" href="https://www.threadingbuildingblocks.org/">Intel&reg; TBB</a> lets you easily write parallel
+ * <a target="_blank" href="https://github.com/oneapi-src/oneTBB">Intel&reg; TBB</a> lets you easily write parallel
* C++ programs that take full advantage of multicore performance, that are portable and composable, and that have
* future-proof scalability.
*
* Having Intel&reg; TBB installed is recommended to parallelize and accelerate some GUDHI computations.
*
* The following examples/utilities are using Intel&reg; TBB if installed:
- * \li <a href="_alpha_complex_from_off_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_off.cpp</a>
- * \li <a href="_alpha_complex_from_points_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_points.cpp</a>
- * \li <a href="alpha_complex_3d_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_3d_persistence.cpp</a>
- * \li <a href="alpha_complex_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_persistence.cpp</a>
- * \li <a href="cubical_complex_persistence_8cpp-example.html">
- * Bitmap_cubical_complex/cubical_complex_persistence.cpp</a>
- * \li <a href="periodic_cubical_complex_persistence_8cpp-example.html">
- * Bitmap_cubical_complex/periodic_cubical_complex_persistence.cpp</a>
- * \li <a href="_random_bitmap_cubical_complex_8cpp-example.html">
- * Bitmap_cubical_complex/Random_bitmap_cubical_complex.cpp</a>
- * \li <a href="_coord_g_i_c_8cpp-example.html">
- * Nerve_GIC/CoordGIC.cpp</a>
- * \li <a href="_func_g_i_c_8cpp-example.html">
- * Nerve_GIC/FuncGIC.cpp</a>
- * \li <a href="_nerve_8cpp-example.html">
- * Nerve_GIC/Nerve.cpp</a>
- * \li <a href="_voronoi_g_i_c_8cpp-example.html">
- * Nerve_GIC/VoronoiGIC.cpp</a>
- * \li <a href="simple_simplex_tree_8cpp-example.html">
- * Simplex_tree/simple_simplex_tree.cpp</a>
- * \li <a href="example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
- * Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp</a>
- * \li <a href="simplex_tree_from_cliques_of_graph_8cpp-example.html">
- * Simplex_tree/simplex_tree_from_cliques_of_graph.cpp</a>
- * \li <a href="graph_expansion_with_blocker_8cpp-example.html">
- * Simplex_tree/graph_expansion_with_blocker.cpp</a>
- * \li <a href="alpha_complex_3d_persistence_8cpp-example.html">
- * Persistent_cohomology/alpha_complex_3d_persistence.cpp</a>
- * \li <a href="alpha_complex_persistence_8cpp-example.html">
- * Persistent_cohomology/alpha_complex_persistence.cpp</a>
- * \li <a href="rips_persistence_via_boundary_matrix_8cpp-example.html">
- * Persistent_cohomology/rips_persistence_via_boundary_matrix.cpp</a>
- * \li <a href="persistence_from_file_8cpp-example.html">
- * Persistent_cohomology/persistence_from_file.cpp</a>
- * \li <a href="persistence_from_simple_simplex_tree_8cpp-example.html">
- * Persistent_cohomology/persistence_from_simple_simplex_tree.cpp</a>
- * \li <a href="plain_homology_8cpp-example.html">
- * Persistent_cohomology/plain_homology.cpp</a>
- * \li <a href="rips_multifield_persistence_8cpp-example.html">
- * Persistent_cohomology/rips_multifield_persistence.cpp</a>
- * \li <a href="rips_persistence_step_by_step_8cpp-example.html">
- * Persistent_cohomology/rips_persistence_step_by_step.cpp</a>
- * \li <a href="custom_persistence_sort_8cpp-example.html">
- * Persistent_cohomology/custom_persistence_sort.cpp</a>
- * \li <a href="example_one_skeleton_rips_from_points_8cpp-example.html">
- * Rips_complex/example_one_skeleton_rips_from_points.cpp</a>
- * \li <a href="example_rips_complex_from_off_file_8cpp-example.html">
- * Rips_complex/example_rips_complex_from_off_file.cpp</a>
- * \li <a href="rips_distance_matrix_persistence_8cpp-example.html">
- * Rips_complex/rips_distance_matrix_persistence.cpp</a>
- * \li <a href="rips_persistence_8cpp-example.html">
- * Rips_complex/rips_persistence.cpp</a>
- * \li <a href="strong_witness_persistence_8cpp-example.html">
- * Witness_complex/strong_witness_persistence.cpp</a>
- * \li <a href="weak_witness_persistence_8cpp-example.html">
- * Witness_complex/weak_witness_persistence.cpp</a>
- * \li <a href="example_nearest_landmark_table_8cpp-example.html">
- * Witness_complex/example_nearest_landmark_table.cpp</a>
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_off.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_points.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_3d_persistence.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_persistence.cpp}
+ * \li \gudhi_example_link{Bitmap_cubical_complex,cubical_complex_persistence.cpp}
+ * \li \gudhi_example_link{Bitmap_cubical_complex,periodic_cubical_complex_persistence.cpp}
+ * \li \gudhi_example_link{Bitmap_cubical_complex,Random_bitmap_cubical_complex.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,CoordGIC.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,FuncGIC.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,Nerve.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,VoronoiGIC.cpp}
+ * \li \gudhi_example_link{Simplex_tree,simple_simplex_tree.cpp}
+ * \li \gudhi_example_link{Simplex_tree,example_alpha_shapes_3_simplex_tree_from_off_file.cpp}
+ * \li \gudhi_example_link{Simplex_tree,simplex_tree_from_cliques_of_graph.cpp}
+ * \li \gudhi_example_link{Simplex_tree,graph_expansion_with_blocker.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,alpha_complex_3d_persistence.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,alpha_complex_persistence.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,rips_persistence_via_boundary_matrix.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,persistence_from_file.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,persistence_from_simple_simplex_tree.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,plain_homology.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,rips_multifield_persistence.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,rips_persistence_step_by_step.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,custom_persistence_sort.cpp}
+ * \li \gudhi_example_link{Rips_complex,example_one_skeleton_rips_from_points.cpp}
+ * \li \gudhi_example_link{Rips_complex,example_rips_complex_from_off_file.cpp}
+ * \li \gudhi_example_link{Rips_complex,rips_distance_matrix_persistence.cpp}
+ * \li \gudhi_example_link{Rips_complex,rips_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,strong_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,weak_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_nearest_landmark_table.cpp}
*
* \section Contributions Bug reports and contributions
* Please help us improving the quality of the GUDHI library.
diff --git a/src/common/doc/main_page.md b/src/common/doc/main_page.md
index 17354179..ce903405 100644
--- a/src/common/doc/main_page.md
+++ b/src/common/doc/main_page.md
@@ -180,8 +180,8 @@
<td width="15%">
<b>Author:</b> Vincent Rouvreau<br>
<b>Introduced in:</b> GUDHI 2.2.0<br>
- <b>Copyright:</b> MIT [(GPL v3)](../../licensing/)<br>
- <b>Includes:</b> [Miniball](https://people.inf.ethz.ch/gaertner/subdir/software/miniball.html)<br>
+ <b>Copyright:</b> MIT [(LGPL v3)](../../licensing/)<br>
+ <b>Requires:</b> \ref cgal
</td>
</tr>
<tr>
@@ -231,13 +231,12 @@
homology of the input sequence. The resulting method is simple and extremely efficient.
Computation of edge collapse and persistent homology of a filtered flag complex via edge collapse as described in
- \cite edgecollapsesocg2020.
+ \cite edgecollapsearxiv.
</td>
<td width="15%">
- <b>Author:</b> Siddharth Pritam<br>
+ <b>Author:</b> Siddharth Pritam, Marc Glisse<br>
<b>Introduced in:</b> GUDHI 3.3.0<br>
- <b>Copyright:</b> MIT<br>
- <b>Requires:</b> \ref eigen
+ <b>Copyright:</b> MIT
</td>
</tr>
<tr>
diff --git a/src/common/doc/stylesheet.css b/src/common/doc/stylesheet.css
index 1df177a4..fb030e1f 100644..100755
--- a/src/common/doc/stylesheet.css
+++ b/src/common/doc/stylesheet.css
@@ -1,1367 +1,28 @@
-/* The standard CSS for doxygen 1.8.6 */
-
-body, table, div, p, dl {
- font: 400 14px/22px Roboto,sans-serif;
-}
-
-/* @group Heading Levels */
-
-h1.groupheader {
- font-size: 150%;
-}
-
-.title {
- font: 400 14px/28px Roboto,sans-serif;
- font-size: 150%;
- font-weight: bold;
- margin: 10px 2px;
-}
-
-h2.groupheader {
- border-bottom: 1px solid #879ECB;
- color: #354C7B;
- font-size: 150%;
- font-weight: normal;
- margin-top: 1.75em;
- padding-top: 8px;
- padding-bottom: 4px;
- width: 100%;
-}
-
-h3.groupheader {
- font-size: 100%;
-}
-
-h1, h2, h3, h4, h5, h6 {
- -webkit-transition: text-shadow 0.5s linear;
- -moz-transition: text-shadow 0.5s linear;
- -ms-transition: text-shadow 0.5s linear;
- -o-transition: text-shadow 0.5s linear;
- transition: text-shadow 0.5s linear;
- margin-right: 15px;
-}
-
-h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow {
- text-shadow: 0 0 15px cyan;
-}
-
-dt {
- font-weight: bold;
-}
-
-div.multicol {
- -moz-column-gap: 1em;
- -webkit-column-gap: 1em;
- -moz-column-count: 3;
- -webkit-column-count: 3;
-}
-
-p.startli, p.startdd {
- margin-top: 2px;
-}
-
-p.starttd {
- margin-top: 0px;
-}
-
-p.endli {
- margin-bottom: 0px;
-}
-
-p.enddd {
- margin-bottom: 4px;
-}
-
-p.endtd {
- margin-bottom: 2px;
-}
-
-/* @end */
-
-caption {
- font-weight: bold;
-}
-
-span.legend {
- font-size: 70%;
- text-align: center;
-}
-
-h3.version {
- font-size: 90%;
- text-align: center;
-}
-
-div.qindex, div.navtab{
- background-color: #EBEFF6;
- border: 1px solid #A3B4D7;
- text-align: center;
-}
-
-div.qindex, div.navpath {
- width: 100%;
- line-height: 140%;
-}
-
-div.navtab {
- margin-right: 15px;
-}
-
-/* @group Link Styling */
-
-a {
- color: #3D578C;
- font-weight: normal;
- text-decoration: none;
-}
-
-.contents a:visited {
- color: #4665A2;
-}
-
-a:hover {
- text-decoration: underline;
-}
-
-a.qindex {
- font-weight: bold;
-}
-
-a.qindexHL {
- font-weight: bold;
- background-color: #9CAFD4;
- color: #ffffff;
- border: 1px double #869DCA;
-}
-
-.contents a.qindexHL:visited {
- color: #ffffff;
-}
-
-a.el {
- font-weight: bold;
-}
-
-a.elRef {
-}
-
-a.code, a.code:visited, a.line, a.line:visited {
- color: #4665A2;
-}
-
-a.codeRef, a.codeRef:visited, a.lineRef, a.lineRef:visited {
- color: #4665A2;
-}
-
-/* @end */
-
-dl.el {
- margin-left: -1cm;
-}
-
-pre.fragment {
- border: 1px solid #C4CFE5;
- background-color: #FBFCFD;
- padding: 4px 6px;
- margin: 4px 8px 4px 2px;
- overflow: auto;
- word-wrap: break-word;
- font-size: 9pt;
- line-height: 125%;
- font-family: monospace, fixed;
- font-size: 105%;
-}
-
-div.fragment {
- padding: 4px 6px;
- margin: 4px 8px 4px 2px;
- background-color: #FBFCFD;
- border: 1px solid #C4CFE5;
-}
-
-div.line {
- font-family: monospace, fixed;
- font-size: 13px;
- min-height: 13px;
- line-height: 1.0;
- text-wrap: unrestricted;
- white-space: -moz-pre-wrap; /* Moz */
- white-space: -pre-wrap; /* Opera 4-6 */
- white-space: -o-pre-wrap; /* Opera 7 */
- white-space: pre-wrap; /* CSS3 */
- word-wrap: break-word; /* IE 5.5+ */
- text-indent: -53px;
- padding-left: 53px;
- padding-bottom: 0px;
- margin: 0px;
- -webkit-transition-property: background-color, box-shadow;
- -webkit-transition-duration: 0.5s;
- -moz-transition-property: background-color, box-shadow;
- -moz-transition-duration: 0.5s;
- -ms-transition-property: background-color, box-shadow;
- -ms-transition-duration: 0.5s;
- -o-transition-property: background-color, box-shadow;
- -o-transition-duration: 0.5s;
- transition-property: background-color, box-shadow;
- transition-duration: 0.5s;
-}
-
-div.line.glow {
- background-color: cyan;
- box-shadow: 0 0 10px cyan;
-}
-
-
-span.lineno {
- padding-right: 4px;
- text-align: right;
- border-right: 2px solid #0F0;
- background-color: #E8E8E8;
- white-space: pre;
-}
-span.lineno a {
- background-color: #D8D8D8;
-}
-
-span.lineno a:hover {
- background-color: #C8C8C8;
-}
-
-div.ah {
- background-color: black;
- font-weight: bold;
- color: #ffffff;
- margin-bottom: 3px;
- margin-top: 3px;
- padding: 0.2em;
- border: solid thin #333;
- border-radius: 0.5em;
- -webkit-border-radius: .5em;
- -moz-border-radius: .5em;
- box-shadow: 2px 2px 3px #999;
- -webkit-box-shadow: 2px 2px 3px #999;
- -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px;
- background-image: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#000),color-stop(0.3, #444));
- background-image: -moz-linear-gradient(center top, #eee 0%, #444 40%, #000);
-}
-
-div.groupHeader {
- margin-left: 16px;
- margin-top: 12px;
- font-weight: bold;
-}
-
-div.groupText {
- margin-left: 16px;
- font-style: italic;
-}
-
-body {
- background-color: white;
- color: black;
- margin: 0;
-}
-
-div.contents {
- margin-top: 10px;
- margin-left: 12px;
- margin-right: 8px;
-}
-
-td.indexkey {
- background-color: #EBEFF6;
- font-weight: bold;
- border: 1px solid #C4CFE5;
- margin: 2px 0px 2px 0;
- padding: 2px 10px;
- white-space: nowrap;
- vertical-align: top;
-}
-
-td.indexvalue {
- background-color: #EBEFF6;
- border: 1px solid #C4CFE5;
- padding: 2px 10px;
- margin: 2px 0px;
-}
-
-tr.memlist {
- background-color: #EEF1F7;
-}
-
-p.formulaDsp {
- text-align: center;
-}
-
-img.formulaDsp {
-
-}
-
-img.formulaInl {
- vertical-align: middle;
-}
-
-div.center {
- text-align: center;
- margin-top: 0px;
- margin-bottom: 0px;
- padding: 0px;
-}
-
-div.center img {
- border: 0px;
-}
-
-address.footer {
- text-align: right;
- padding-right: 12px;
-}
-
-img.footer {
- border: 0px;
- vertical-align: middle;
-}
-
-/* @group Code Colorization */
-
-span.keyword {
- color: #008000
-}
-
-span.keywordtype {
- color: #604020
-}
-
-span.keywordflow {
- color: #e08000
-}
-
-span.comment {
- color: #800000
-}
-
-span.preprocessor {
- color: #806020
-}
-
-span.stringliteral {
- color: #002080
-}
-
-span.charliteral {
- color: #008080
-}
-
-span.vhdldigit {
- color: #ff00ff
-}
-
-span.vhdlchar {
- color: #000000
-}
-
-span.vhdlkeyword {
- color: #700070
-}
-
-span.vhdllogic {
- color: #ff0000
-}
-
-blockquote {
- background-color: #F7F8FB;
- border-left: 2px solid #9CAFD4;
- margin: 0 24px 0 4px;
- padding: 0 12px 0 16px;
-}
-
-/* @end */
-
-/*
-.search {
- color: #003399;
- font-weight: bold;
-}
-
-form.search {
- margin-bottom: 0px;
- margin-top: 0px;
-}
-
-input.search {
- font-size: 75%;
- color: #000080;
- font-weight: normal;
- background-color: #e8eef2;
-}
-*/
-
-td.tiny {
- font-size: 75%;
-}
-
-.dirtab {
- padding: 4px;
- border-collapse: collapse;
- border: 1px solid #A3B4D7;
-}
-
-th.dirtab {
- background: #EBEFF6;
- font-weight: bold;
-}
-
-hr {
- height: 0px;
- border: none;
- border-top: 1px solid #4A6AAA;
-}
-
-hr.footer {
- height: 1px;
-}
-
-/* @group Member Descriptions */
-
-table.memberdecls {
- border-spacing: 0px;
- padding: 0px;
-}
-
-.memberdecls td, .fieldtable tr {
- -webkit-transition-property: background-color, box-shadow;
- -webkit-transition-duration: 0.5s;
- -moz-transition-property: background-color, box-shadow;
- -moz-transition-duration: 0.5s;
- -ms-transition-property: background-color, box-shadow;
- -ms-transition-duration: 0.5s;
- -o-transition-property: background-color, box-shadow;
- -o-transition-duration: 0.5s;
- transition-property: background-color, box-shadow;
- transition-duration: 0.5s;
-}
-
-.memberdecls td.glow, .fieldtable tr.glow {
- background-color: cyan;
- box-shadow: 0 0 15px cyan;
-}
-
-.mdescLeft, .mdescRight,
-.memItemLeft, .memItemRight,
-.memTemplItemLeft, .memTemplItemRight, .memTemplParams {
- background-color: #F9FAFC;
- border: none;
- margin: 4px;
- padding: 1px 0 0 8px;
-}
-
-.mdescLeft, .mdescRight {
- padding: 0px 8px 4px 8px;
- color: #555;
-}
-
-.memSeparator {
- border-bottom: 1px solid #DEE4F0;
- line-height: 1px;
- margin: 0px;
- padding: 0px;
-}
-
-.memItemLeft, .memTemplItemLeft {
- white-space: nowrap;
-}
-
-.memItemRight {
- width: 100%;
-}
-
-.memTemplParams {
- color: #4665A2;
- white-space: nowrap;
- font-size: 80%;
-}
-
-/* @end */
-
-/* @group Member Details */
-
-/* Styles for detailed member documentation */
-
-.memtemplate {
- font-size: 80%;
- color: #4665A2;
- font-weight: normal;
- margin-left: 9px;
-}
-
-.memnav {
- background-color: #EBEFF6;
- border: 1px solid #A3B4D7;
- text-align: center;
- margin: 2px;
- margin-right: 15px;
- padding: 2px;
-}
-
-.mempage {
- width: 100%;
-}
-
-.memitem {
- padding: 0;
- margin-bottom: 10px;
- margin-right: 5px;
- -webkit-transition: box-shadow 0.5s linear;
- -moz-transition: box-shadow 0.5s linear;
- -ms-transition: box-shadow 0.5s linear;
- -o-transition: box-shadow 0.5s linear;
- transition: box-shadow 0.5s linear;
- display: table !important;
- width: 100%;
-}
-
-.memitem.glow {
- box-shadow: 0 0 15px cyan;
-}
-
-.memname {
- font-weight: bold;
- margin-left: 6px;
-}
-
-.memname td {
- vertical-align: bottom;
-}
-
-.memproto, dl.reflist dt {
- border-top: 1px solid #A8B8D9;
- border-left: 1px solid #A8B8D9;
- border-right: 1px solid #A8B8D9;
- padding: 6px 0px 6px 0px;
- color: #253555;
- font-weight: bold;
- text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9);
- background-image:url('nav_f.png');
- background-repeat:repeat-x;
- background-color: #E2E8F2;
- /* opera specific markup */
- box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
- border-top-right-radius: 4px;
- border-top-left-radius: 4px;
- /* firefox specific markup */
- -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px;
- -moz-border-radius-topright: 4px;
- -moz-border-radius-topleft: 4px;
- /* webkit specific markup */
- -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
- -webkit-border-top-right-radius: 4px;
- -webkit-border-top-left-radius: 4px;
-
-}
-
-.memdoc, dl.reflist dd {
- border-bottom: 1px solid #A8B8D9;
- border-left: 1px solid #A8B8D9;
- border-right: 1px solid #A8B8D9;
- padding: 6px 10px 2px 10px;
- background-color: #FBFCFD;
- border-top-width: 0;
- background-image:url('nav_g.png');
- background-repeat:repeat-x;
- background-color: #FFFFFF;
- /* opera specific markup */
- border-bottom-left-radius: 4px;
- border-bottom-right-radius: 4px;
- box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
- /* firefox specific markup */
- -moz-border-radius-bottomleft: 4px;
- -moz-border-radius-bottomright: 4px;
- -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px;
- /* webkit specific markup */
- -webkit-border-bottom-left-radius: 4px;
- -webkit-border-bottom-right-radius: 4px;
- -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
-}
-
-dl.reflist dt {
- padding: 5px;
-}
-
-dl.reflist dd {
- margin: 0px 0px 10px 0px;
- padding: 5px;
-}
-
-.paramkey {
- text-align: right;
-}
-
-.paramtype {
- white-space: nowrap;
-}
-
-.paramname {
- color: #602020;
- white-space: nowrap;
-}
-.paramname em {
- font-style: normal;
-}
-.paramname code {
- line-height: 14px;
-}
-
-.params, .retval, .exception, .tparams {
- margin-left: 0px;
- padding-left: 0px;
-}
-
-.params .paramname, .retval .paramname {
- font-weight: bold;
- vertical-align: top;
-}
-
-.params .paramtype {
- font-style: italic;
- vertical-align: top;
-}
-
-.params .paramdir {
- font-family: "courier new",courier,monospace;
- vertical-align: top;
-}
-
-table.mlabels {
- border-spacing: 0px;
-}
-
-td.mlabels-left {
- width: 100%;
- padding: 0px;
-}
-
-td.mlabels-right {
- vertical-align: bottom;
- padding: 0px;
- white-space: nowrap;
-}
-
-span.mlabels {
- margin-left: 8px;
-}
-
-span.mlabel {
- background-color: #728DC1;
- border-top:1px solid #5373B4;
- border-left:1px solid #5373B4;
- border-right:1px solid #C4CFE5;
- border-bottom:1px solid #C4CFE5;
- text-shadow: none;
- color: white;
- margin-right: 4px;
- padding: 2px 3px;
- border-radius: 3px;
- font-size: 7pt;
- white-space: nowrap;
- vertical-align: middle;
-}
-
-
-
-/* @end */
-
-/* these are for tree view when not used as main index */
-
-div.directory {
- margin: 10px 0px;
- border-top: 1px solid #A8B8D9;
- border-bottom: 1px solid #A8B8D9;
- width: 100%;
-}
-
-.directory table {
- border-collapse:collapse;
-}
-
-.directory td {
- margin: 0px;
- padding: 0px;
- vertical-align: top;
-}
-
-.directory td.entry {
- white-space: nowrap;
- padding-right: 6px;
- padding-top: 3px;
-}
-
-.directory td.entry a {
- outline:none;
-}
-
-.directory td.entry a img {
- border: none;
-}
-
-.directory td.desc {
- width: 100%;
- padding-left: 6px;
- padding-right: 6px;
- padding-top: 3px;
- border-left: 1px solid rgba(0,0,0,0.05);
-}
-
-.directory tr.even {
- padding-left: 6px;
- background-color: #F7F8FB;
-}
-
-.directory img {
- vertical-align: -30%;
-}
-
-.directory .levels {
- white-space: nowrap;
- width: 100%;
- text-align: right;
- font-size: 9pt;
-}
-
-.directory .levels span {
- cursor: pointer;
- padding-left: 2px;
- padding-right: 2px;
- color: #3D578C;
-}
-
-div.dynheader {
- margin-top: 8px;
- -webkit-touch-callout: none;
- -webkit-user-select: none;
- -khtml-user-select: none;
- -moz-user-select: none;
- -ms-user-select: none;
- user-select: none;
-}
-
-address {
- font-style: normal;
- color: #2A3D61;
-}
-
-table.doxtable {
- border-collapse:collapse;
- margin-top: 4px;
- margin-bottom: 4px;
-}
-
-table.doxtable td, table.doxtable th {
- border: 1px solid #2D4068;
- padding: 3px 7px 2px;
-}
-
-table.doxtable th {
- background-color: #374F7F;
- color: #FFFFFF;
- font-size: 110%;
- padding-bottom: 4px;
- padding-top: 5px;
-}
-
-table.fieldtable {
- /*width: 100%;*/
- margin-bottom: 10px;
- border: 1px solid #A8B8D9;
- border-spacing: 0px;
- -moz-border-radius: 4px;
- -webkit-border-radius: 4px;
- border-radius: 4px;
- -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px;
- -webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15);
- box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15);
-}
-
-.fieldtable td, .fieldtable th {
- padding: 3px 7px 2px;
-}
-
-.fieldtable td.fieldtype, .fieldtable td.fieldname {
- white-space: nowrap;
- border-right: 1px solid #A8B8D9;
- border-bottom: 1px solid #A8B8D9;
- vertical-align: top;
-}
-
-.fieldtable td.fieldname {
- padding-top: 3px;
-}
-
-.fieldtable td.fielddoc {
- border-bottom: 1px solid #A8B8D9;
- /*width: 100%;*/
-}
-
-.fieldtable td.fielddoc p:first-child {
- margin-top: 0px;
-}
-
-.fieldtable td.fielddoc p:last-child {
- margin-bottom: 2px;
-}
-
-.fieldtable tr:last-child td {
- border-bottom: none;
-}
-
-.fieldtable th {
- background-image:url('nav_f.png');
- background-repeat:repeat-x;
- background-color: #E2E8F2;
- font-size: 90%;
- color: #253555;
- padding-bottom: 4px;
- padding-top: 5px;
- text-align:left;
- -moz-border-radius-topleft: 4px;
- -moz-border-radius-topright: 4px;
- -webkit-border-top-left-radius: 4px;
- -webkit-border-top-right-radius: 4px;
- border-top-left-radius: 4px;
- border-top-right-radius: 4px;
- border-bottom: 1px solid #A8B8D9;
-}
-
-
-.tabsearch {
- top: 0px;
- left: 10px;
- height: 36px;
- background-image: url('tab_b.png');
- z-index: 101;
- overflow: hidden;
- font-size: 13px;
-}
-
-.navpath ul
-{
- font-size: 11px;
- background-image:url('tab_b.png');
- background-repeat:repeat-x;
- background-position: 0 -5px;
- height:30px;
- line-height:30px;
- color:#8AA0CC;
- border:solid 1px #C2CDE4;
- overflow:hidden;
- margin:0px;
- padding:0px;
-}
-
-.navpath li
-{
- list-style-type:none;
- float:left;
- padding-left:10px;
- padding-right:15px;
- background-image:url('bc_s.png');
- background-repeat:no-repeat;
- background-position:right;
- color:#364D7C;
-}
-
-.navpath li.navelem a
-{
- height:32px;
- display:block;
- text-decoration: none;
- outline: none;
- color: #283A5D;
- font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif;
- text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9);
- text-decoration: none;
-}
-
-.navpath li.navelem a:hover
-{
- color:#6884BD;
-}
-
-.navpath li.footer
-{
- list-style-type:none;
- float:right;
- padding-left:10px;
- padding-right:15px;
- background-image:none;
- background-repeat:no-repeat;
- background-position:right;
- color:#364D7C;
- font-size: 8pt;
-}
-
-
-div.summary
-{
- float: right;
- font-size: 8pt;
- padding-right: 5px;
- width: 50%;
- text-align: right;
-}
-
-div.summary a
-{
- white-space: nowrap;
-}
-
-div.ingroups
-{
- font-size: 8pt;
- width: 50%;
- text-align: left;
-}
-
-div.ingroups a
-{
- white-space: nowrap;
-}
-
-div.header
-{
- background-image:url('nav_h.png');
- background-repeat:repeat-x;
- background-color: #F9FAFC;
- margin: 0px;
- border-bottom: 1px solid #C4CFE5;
-}
-
-div.headertitle
-{
- padding: 5px 5px 5px 10px;
-}
-
-dl
-{
- padding: 0 0 0 10px;
-}
-
-/* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug */
-dl.section
-{
- margin-left: 0px;
- padding-left: 0px;
-}
-
-dl.note
-{
- margin-left:-7px;
- padding-left: 3px;
- border-left:4px solid;
- border-color: #D0C000;
-}
-
-dl.warning, dl.attention
-{
- margin-left:-7px;
- padding-left: 3px;
- border-left:4px solid;
- border-color: #FF0000;
-}
-
-dl.pre, dl.post, dl.invariant
-{
- margin-left:-7px;
- padding-left: 3px;
- border-left:4px solid;
- border-color: #00D000;
-}
-
-dl.deprecated
-{
- margin-left:-7px;
- padding-left: 3px;
- border-left:4px solid;
- border-color: #505050;
-}
-
-dl.todo
-{
- margin-left:-7px;
- padding-left: 3px;
- border-left:4px solid;
- border-color: #00C0E0;
-}
-
-dl.test
-{
- margin-left:-7px;
- padding-left: 3px;
- border-left:4px solid;
- border-color: #3030E0;
-}
-
-dl.bug
-{
- margin-left:-7px;
- padding-left: 3px;
- border-left:4px solid;
- border-color: #C08050;
-}
-
-dl.section dd {
- margin-bottom: 6px;
-}
-
-
-#projectlogo
-{
- text-align: center;
- vertical-align: bottom;
- border-collapse: separate;
-}
-
-#projectlogo img
-{
- border: 0px none;
-}
-
#projectname
{
- border: 0px none;
- font: 300% Tahoma, Arial,sans-serif;
- margin: 0px;
- padding: 2px 0px;
+ border: 0px none;
}
-
#projectbrief
{
- font: 60% Tahoma, Arial,sans-serif;
- margin: 0px;
- padding: 0px;
+ font: 60% Tahoma, Arial,sans-serif;
}
-
#projectnumber
{
- font: 80% Tahoma, Arial,sans-serif;
- margin: 0px;
- padding: 0px;
-}
-
-#titlearea
-{
- padding: 0px;
- margin: 0px;
- width: 100%;
- border-bottom: 1px solid #5373B4;
-}
-
-.image
-{
- text-align: center;
-}
-
-.dotgraph
-{
- text-align: center;
+ font: 80% Tahoma, Arial,sans-serif;
}
-
-.mscgraph
+.arrow
{
- text-align: center;
-}
-
-.diagraph
-{
- text-align: center;
-}
-
-.caption
-{
- font-weight: bold;
-}
-
-div.zoom
-{
- border: 1px solid #90A5CE;
-}
-
-dl.citelist {
- margin-bottom:50px;
-}
-
-dl.citelist dt {
- color:#334975;
- float:left;
- font-weight:bold;
- margin-right:10px;
- padding:5px;
-}
-
-dl.citelist dd {
- margin:2px 0;
- padding:5px 0;
-}
-
-div.toc {
- padding: 14px 25px;
- background-color: #F4F6FA;
- border: 1px solid #D8DFEE;
- border-radius: 7px 7px 7px 7px;
- float: right;
- height: auto;
- margin: 0 20px 10px 10px;
- width: 200px;
-}
-
-div.toc li {
- background: url("bdwn.png") no-repeat scroll 0 5px transparent;
- font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif;
- margin-top: 5px;
- padding-left: 10px;
- padding-top: 2px;
-}
-
-div.toc h3 {
- font: bold 12px/1.2 Arial,FreeSans,sans-serif;
- color: #4665A2;
- border-bottom: 0 none;
- margin: 0;
-}
-
-div.toc ul {
- list-style: none outside none;
- border: medium none;
- padding: 0px;
-}
-
-div.toc li.level1 {
- margin-left: 0px;
-}
-
-div.toc li.level2 {
- margin-left: 15px;
-}
-
-div.toc li.level3 {
- margin-left: 30px;
-}
-
-div.toc li.level4 {
- margin-left: 45px;
-}
-
-.inherit_header {
- font-weight: bold;
- color: gray;
- cursor: pointer;
- -webkit-touch-callout: none;
- -webkit-user-select: none;
- -khtml-user-select: none;
- -moz-user-select: none;
- -ms-user-select: none;
- user-select: none;
-}
-
-.inherit_header td {
- padding: 6px 0px 2px 5px;
-}
-
-.inherit {
- display: none;
-}
-
-tr.heading h2 {
- margin-top: 12px;
- margin-bottom: 4px;
-}
-
-/* tooltip related style info */
-
-.ttc {
- position: absolute;
- display: none;
-}
-
-#powerTip {
- cursor: default;
- white-space: nowrap;
- background-color: white;
- border: 1px solid gray;
- border-radius: 4px 4px 4px 4px;
- box-shadow: 1px 1px 7px gray;
- display: none;
- font-size: smaller;
- max-width: 80%;
- opacity: 0.9;
- padding: 1ex 1em 1em;
- position: absolute;
- z-index: 2147483647;
+ width: auto;
+ height: auto;
+ padding-left: 16px;
}
-
-#powerTip div.ttdoc {
- color: grey;
- font-style: italic;
-}
-
-#powerTip div.ttname a {
- font-weight: bold;
-}
-
-#powerTip div.ttname {
- font-weight: bold;
-}
-
-#powerTip div.ttdeci {
- color: #006318;
-}
-
-#powerTip div {
- margin: 0px;
- padding: 0px;
- font: 12px/16px Roboto,sans-serif;
-}
-
-#powerTip:before, #powerTip:after {
- content: "";
- position: absolute;
- margin: 0px;
-}
-
-#powerTip.n:after, #powerTip.n:before,
-#powerTip.s:after, #powerTip.s:before,
-#powerTip.w:after, #powerTip.w:before,
-#powerTip.e:after, #powerTip.e:before,
-#powerTip.ne:after, #powerTip.ne:before,
-#powerTip.se:after, #powerTip.se:before,
-#powerTip.nw:after, #powerTip.nw:before,
-#powerTip.sw:after, #powerTip.sw:before {
- border: solid transparent;
- content: " ";
- height: 0;
- width: 0;
- position: absolute;
-}
-
-#powerTip.n:after, #powerTip.s:after,
-#powerTip.w:after, #powerTip.e:after,
-#powerTip.nw:after, #powerTip.ne:after,
-#powerTip.sw:after, #powerTip.se:after {
- border-color: rgba(255, 255, 255, 0);
-}
-
-#powerTip.n:before, #powerTip.s:before,
-#powerTip.w:before, #powerTip.e:before,
-#powerTip.nw:before, #powerTip.ne:before,
-#powerTip.sw:before, #powerTip.se:before {
- border-color: rgba(128, 128, 128, 0);
-}
-
-#powerTip.n:after, #powerTip.n:before,
-#powerTip.ne:after, #powerTip.ne:before,
-#powerTip.nw:after, #powerTip.nw:before {
- top: 100%;
-}
-
-#powerTip.n:after, #powerTip.ne:after, #powerTip.nw:after {
- border-top-color: #ffffff;
- border-width: 10px;
- margin: 0px -10px;
-}
-#powerTip.n:before {
- border-top-color: #808080;
- border-width: 11px;
- margin: 0px -11px;
-}
-#powerTip.n:after, #powerTip.n:before {
- left: 50%;
-}
-
-#powerTip.nw:after, #powerTip.nw:before {
- right: 14px;
-}
-
-#powerTip.ne:after, #powerTip.ne:before {
- left: 14px;
-}
-
-#powerTip.s:after, #powerTip.s:before,
-#powerTip.se:after, #powerTip.se:before,
-#powerTip.sw:after, #powerTip.sw:before {
- bottom: 100%;
-}
-
-#powerTip.s:after, #powerTip.se:after, #powerTip.sw:after {
- border-bottom-color: #ffffff;
- border-width: 10px;
- margin: 0px -10px;
-}
-
-#powerTip.s:before, #powerTip.se:before, #powerTip.sw:before {
- border-bottom-color: #808080;
- border-width: 11px;
- margin: 0px -11px;
-}
-
-#powerTip.s:after, #powerTip.s:before {
- left: 50%;
-}
-
-#powerTip.sw:after, #powerTip.sw:before {
- right: 14px;
-}
-
-#powerTip.se:after, #powerTip.se:before {
- left: 14px;
-}
-
-#powerTip.e:after, #powerTip.e:before {
- left: 100%;
-}
-#powerTip.e:after {
- border-left-color: #ffffff;
- border-width: 10px;
- top: 50%;
- margin-top: -10px;
-}
-#powerTip.e:before {
- border-left-color: #808080;
- border-width: 11px;
- top: 50%;
- margin-top: -11px;
-}
-
-#powerTip.w:after, #powerTip.w:before {
- right: 100%;
-}
-#powerTip.w:after {
- border-right-color: #ffffff;
- border-width: 10px;
- top: 50%;
- margin-top: -10px;
-}
-#powerTip.w:before {
- border-right-color: #808080;
- border-width: 11px;
- top: 50%;
- margin-top: -11px;
-}
-
-@media print
-{
- #top { display: none; }
- #side-nav { display: none; }
- #nav-path { display: none; }
- body { overflow:visible; }
- h1, h2, h3, h4, h5, h6 { page-break-after: avoid; }
- .summary { display: none; }
- .memitem { page-break-inside: avoid; }
- #doc-content
- {
- margin-left:0 !important;
- height:auto !important;
- width:auto !important;
- overflow:inherit;
- display:inline;
- }
+// With the doxygen versions <= 1.9.2 the default setting 'overflow: hidden;' causes problems.
+// With the commit:
+// Commit: 590198b416cd53313d150428d2f912586065ea0d [590198b]
+// Date: Wednesday, December 1, 2021 1:37:26 PM
+// issue #8924 Horizontal scroll bar missing in HTML for wide class="dotgraph" objects
+// for the doxygen 1.9.3 version this has already been corrected but to run properly with the <= 1.9.2 version
+// this setting is required
+ul {
+ overflow: visible;
}
-
diff --git a/src/common/include/gudhi/distance_functions.h b/src/common/include/gudhi/distance_functions.h
index 9bbc62b7..5e5a1e31 100644
--- a/src/common/include/gudhi/distance_functions.h
+++ b/src/common/include/gudhi/distance_functions.h
@@ -13,8 +13,6 @@
#include <gudhi/Debug_utils.h>
-#include <gudhi/Miniball.hpp>
-
#include <boost/range/metafunctions.hpp>
#include <boost/range/size.hpp>
@@ -59,53 +57,6 @@ class Euclidean_distance {
}
};
-/** @brief Compute the radius of the minimal enclosing ball between Points given by a range of coordinates.
- * The points are assumed to have the same dimension. */
-class Minimal_enclosing_ball_radius {
- public:
- /** \brief Minimal_enclosing_ball_radius from two points.
- *
- * @param[in] point_1 First point.
- * @param[in] point_2 second point.
- * @return The minimal enclosing ball radius for the two points (aka. Euclidean distance / 2.).
- *
- * \tparam Point must be a range of Cartesian coordinates.
- *
- */
- template< typename Point >
- typename std::iterator_traits<typename boost::range_iterator<Point>::type>::value_type
- operator()(const Point& point_1, const Point& point_2) const {
- return Euclidean_distance()(point_1, point_2) / 2.;
- }
- /** \brief Minimal_enclosing_ball_radius from a point cloud.
- *
- * @param[in] point_cloud The points.
- * @return The minimal enclosing ball radius for the points.
- *
- * \tparam Point_cloud must be a range of points with Cartesian coordinates.
- * Point_cloud is a range over a range of Coordinate.
- *
- */
- template< typename Point_cloud,
- typename Point_iterator = typename boost::range_const_iterator<Point_cloud>::type,
- typename Point = typename std::iterator_traits<Point_iterator>::value_type,
- typename Coordinate_iterator = typename boost::range_const_iterator<Point>::type,
- typename Coordinate = typename std::iterator_traits<Coordinate_iterator>::value_type>
- Coordinate
- operator()(const Point_cloud& point_cloud) const {
- using Min_sphere = Miniball::Miniball<Miniball::CoordAccessor<Point_iterator, Coordinate_iterator>>;
-
- Min_sphere ms(boost::size(*point_cloud.begin()), point_cloud.begin(), point_cloud.end());
-#ifdef DEBUG_TRACES
- std::clog << "Minimal_enclosing_ball_radius = " << std::sqrt(ms.squared_radius()) << " | nb points = "
- << boost::size(point_cloud) << " | dimension = "
- << boost::size(*point_cloud.begin()) << std::endl;
-#endif // DEBUG_TRACES
-
- return std::sqrt(ms.squared_radius());
- }
-};
-
} // namespace Gudhi
#endif // DISTANCE_FUNCTIONS_H_
diff --git a/src/common/include/gudhi/reader_utils.h b/src/common/include/gudhi/reader_utils.h
index 29d5423d..a7d82541 100644
--- a/src/common/include/gudhi/reader_utils.h
+++ b/src/common/include/gudhi/reader_utils.h
@@ -231,7 +231,7 @@ std::vector<std::vector<Filtration_value>> read_lower_triangular_matrix_from_csv
std::string line;
- // the first line is emtpy, so we ignore it:
+ // the first line is empty, so we ignore it:
std::getline(in, line);
std::vector<Filtration_value> values_in_this_line;
result.push_back(values_in_this_line);
diff --git a/src/common/include/gudhi/writing_persistence_to_file.h b/src/common/include/gudhi/writing_persistence_to_file.h
index 2e36b831..3a0df1a8 100644
--- a/src/common/include/gudhi/writing_persistence_to_file.h
+++ b/src/common/include/gudhi/writing_persistence_to_file.h
@@ -48,7 +48,7 @@ class Persistence_interval_common {
: birth_(birth), death_(death), dimension_(dim), arith_element_(field) {}
/**
- * Operator to compare two persistence pairs. During the comparision all the
+ * Operator to compare two persistence pairs. During the comparison all the
* fields: birth, death, dimensiona and arith_element_ are taken into account
* and they all have to be equal for two pairs to be equal.
**/
@@ -65,7 +65,7 @@ class Persistence_interval_common {
/**
* Operator to compare objects of a type Persistence_interval_common.
* One intervals is smaller than the other if it has lower persistence.
- * Note that this operator do not take Arith_element into account when doing comparisions.
+ * Note that this operator do not take Arith_element into account when doing comparisons.
**/
bool operator<(const Persistence_interval_common& i2) const {
return fabs(this->death_ - this->birth_) < fabs(i2.death_ - i2.birth_);
diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt
index fbfb0d1f..5f323935 100644
--- a/src/python/CMakeLists.txt
+++ b/src/python/CMakeLists.txt
@@ -70,6 +70,7 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'euclidean_strong_witness_complex', ")
# Modules that should not be auto-imported in __init__.py
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'representations', ")
+ set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'tensorflow', ")
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'wasserstein', ")
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'point_cloud', ")
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'weighted_rips_complex', ")
@@ -148,10 +149,6 @@ if(PYTHONINTERP_FOUND)
add_gudhi_debug_info("Eigen3 version ${EIGEN3_VERSION}")
# No problem, even if no CGAL found
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DCGAL_EIGEN3_ENABLED', ")
- set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DGUDHI_USE_EIGEN3', ")
- set(GUDHI_USE_EIGEN3 "True")
- else (EIGEN3_FOUND)
- set(GUDHI_USE_EIGEN3 "False")
endif (EIGEN3_FOUND)
set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'off_reader', ")
@@ -180,6 +177,15 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'alpha_complex', ")
endif ()
+ # from windows vcpkg eigen 3.4.0#2 : build fails with
+ # error C2440: '<function-style-cast>': cannot convert from 'Eigen::EigenBase<Derived>::Index' to '__gmp_expr<mpq_t,mpq_t>'
+ # cf. https://gitlab.com/libeigen/eigen/-/issues/2476
+ # Workaround is to compile with '-DEIGEN_DEFAULT_DENSE_INDEX_TYPE=int'
+ if (FORCE_EIGEN_DEFAULT_DENSE_INDEX_TYPE_TO_INT)
+ set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DEIGEN_DEFAULT_DENSE_INDEX_TYPE=int', ")
+ endif()
+
+
add_gudhi_debug_info("Boost version ${Boost_VERSION}")
if(CGAL_FOUND)
if(NOT CGAL_VERSION VERSION_LESS 5.3.0)
@@ -215,13 +221,14 @@ if(PYTHONINTERP_FOUND)
endif(NOT GMP_LIBRARIES_DIR)
add_GUDHI_PYTHON_lib_dir(${GMP_LIBRARIES_DIR})
message("** Add gmp ${GMP_LIBRARIES_DIR}")
+ # When FORCE_CGAL_NOT_TO_BUILD_WITH_GMPXX is set, not defining CGAL_USE_GMPXX is sufficient enough
if(GMPXX_FOUND)
add_gudhi_debug_info("GMPXX_LIBRARIES = ${GMPXX_LIBRARIES}")
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DCGAL_USE_GMPXX', ")
add_GUDHI_PYTHON_lib("${GMPXX_LIBRARIES}")
add_GUDHI_PYTHON_lib_dir(${GMPXX_LIBRARIES_DIR})
message("** Add gmpxx ${GMPXX_LIBRARIES_DIR}")
- endif(GMPXX_FOUND)
+ endif()
endif(GMP_FOUND)
if(MPFR_FOUND)
add_gudhi_debug_info("MPFR_LIBRARIES = ${MPFR_LIBRARIES}")
@@ -283,13 +290,15 @@ if(PYTHONINTERP_FOUND)
# Other .py files
file(COPY "gudhi/persistence_graphical_tools.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/")
- file(COPY "gudhi/wasserstein" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
+ file(COPY "gudhi/wasserstein" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
+ file(COPY "gudhi/tensorflow" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/point_cloud" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/clustering" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi" FILES_MATCHING PATTERN "*.py")
file(COPY "gudhi/weighted_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/dtm_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/hera/__init__.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/hera")
file(COPY "gudhi/datasets" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi" FILES_MATCHING PATTERN "*.py")
+ file(COPY "gudhi/sklearn" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/")
# Some files for pip package
@@ -323,9 +332,9 @@ if(PYTHONINTERP_FOUND)
if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/")
# User warning - Sphinx is a static pages generator, and configured to work fine with user_version
- # Images and biblio warnings because not found on developper version
+ # Images and biblio warnings because not found on developer version
if (GUDHI_PYTHON_PATH STREQUAL "src/python")
- set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developper version. Images and biblio will miss")
+ set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developer version. Images and biblio will miss")
endif()
# sphinx target requires gudhi.so, because conf.py reads gudhi version from it
add_custom_target(sphinx
@@ -478,7 +487,7 @@ if(PYTHONINTERP_FOUND)
add_gudhi_py_test(test_euclidean_witness_complex)
# Datasets generators
- add_gudhi_py_test(test_datasets_generators) # TODO separate full python datasets generators in another test file independant from CGAL ?
+ add_gudhi_py_test(test_datasets_generators) # TODO separate full python datasets generators in another test file independent from CGAL ?
endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
@@ -556,11 +565,21 @@ if(PYTHONINTERP_FOUND)
add_gudhi_py_test(test_representations)
endif()
+ # Differentiation
+ if(TENSORFLOW_FOUND)
+ add_gudhi_py_test(test_diff)
+ endif()
+
# Betti curves
if(SKLEARN_FOUND AND SCIPY_FOUND)
add_gudhi_py_test(test_betti_curve_representations)
endif()
+ # Representations preprocessing
+ if(SKLEARN_FOUND)
+ add_gudhi_py_test(test_representations_preprocessing)
+ endif()
+
# Time Delay
add_gudhi_py_test(test_time_delay)
@@ -585,6 +604,20 @@ if(PYTHONINTERP_FOUND)
add_gudhi_py_test(test_dtm_rips_complex)
endif()
+ # Fetch remote datasets
+ if(WITH_GUDHI_REMOTE_TEST)
+ add_gudhi_py_test(test_remote_datasets)
+ endif()
+
+ # sklearn
+ if(SKLEARN_FOUND)
+ add_gudhi_py_test(test_sklearn_cubical_persistence)
+ endif()
+
+ # persistence graphical tools
+ if(MATPLOTLIB_FOUND)
+ add_gudhi_py_test(test_persistence_graphical_tools)
+ endif()
# Set missing or not modules
set(GUDHI_MODULES ${GUDHI_MODULES} "python" CACHE INTERNAL "GUDHI_MODULES")
@@ -595,4 +628,4 @@ if(PYTHONINTERP_FOUND)
else(PYTHONINTERP_FOUND)
message("++ Python module will not be compiled because no Python interpreter was found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python" CACHE INTERNAL "GUDHI_MISSING_MODULES")
-endif(PYTHONINTERP_FOUND) \ No newline at end of file
+endif(PYTHONINTERP_FOUND)
diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst
index cfd22742..9e67d38a 100644
--- a/src/python/doc/alpha_complex_user.rst
+++ b/src/python/doc/alpha_complex_user.rst
@@ -27,7 +27,8 @@ Remarks
If you pass :code:`precision = 'exact'` to :func:`~gudhi.AlphaComplex.__init__`, the filtration values are the exact
ones converted to float. This can be very slow.
If you pass :code:`precision = 'safe'` (the default), the filtration values are only
- guaranteed to have a small multiplicative error compared to the exact value.
+ guaranteed to have a small multiplicative error compared to the exact value, see
+ :func:`~gudhi.AlphaComplex.set_float_relative_precision` to modify the precision.
A drawback, when computing persistence, is that an empty exact interval [10^12,10^12] may become a
non-empty approximate interval [10^12,10^12+10^6].
Using :code:`precision = 'fast'` makes the computations slightly faster, and the combinatorics are still exact, but
@@ -177,11 +178,11 @@ Weighted version
^^^^^^^^^^^^^^^^
A weighted version for Alpha complex is available. It is like a usual Alpha complex, but based on a
-`CGAL regular triangulation <https://doc.cgal.org/latest/Triangulation/index.html#title20>`_.
+`CGAL regular triangulation <https://doc.cgal.org/latest/Triangulation/index.html#TriangulationSecRT>`_.
This example builds the weighted alpha-complex of a small molecule, where atoms have different sizes.
It is taken from
-`CGAL 3d weighted alpha shapes <https://doc.cgal.org/latest/Alpha_shapes_3/index.html#title13>`_.
+`CGAL 3d weighted alpha shapes <https://doc.cgal.org/latest/Alpha_shapes_3/index.html#AlphaShape_3DExampleforWeightedAlphaShapes>`_.
Then, it is asked to display information about the alpha complex.
diff --git a/src/python/doc/cubical_complex_sklearn_itf_ref.rst b/src/python/doc/cubical_complex_sklearn_itf_ref.rst
new file mode 100644
index 00000000..90ae9ccd
--- /dev/null
+++ b/src/python/doc/cubical_complex_sklearn_itf_ref.rst
@@ -0,0 +1,102 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+Cubical complex persistence scikit-learn like interface
+#######################################################
+
+.. list-table::
+ :width: 100%
+ :header-rows: 0
+
+ * - :Since: GUDHI 3.6.0
+ - :License: MIT
+ - :Requires: `Scikit-learn <installation.html#scikit-learn>`_
+
+Cubical complex persistence scikit-learn like interface example
+---------------------------------------------------------------
+
+In this example, hand written digits are used as an input.
+a TDA scikit-learn pipeline is constructed and is composed of:
+
+#. :class:`~gudhi.sklearn.cubical_persistence.CubicalPersistence` that builds a cubical complex from the inputs and
+ returns its persistence diagrams
+#. :class:`~gudhi.representations.preprocessing.DiagramSelector` that removes non-finite persistence diagrams values
+#. :class:`~gudhi.representations.vector_methods.PersistenceImage` that builds the persistence images from persistence diagrams
+#. `SVC <https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html>`_ which is a scikit-learn support
+ vector classifier.
+
+This ML pipeline is trained to detect if the hand written digit is an '8' or not, thanks to the fact that an '8' has
+two holes in :math:`\mathbf{H}_1`, or, like in this example, three connected components in :math:`\mathbf{H}_0`.
+
+.. code-block:: python
+
+ # Standard scientific Python imports
+ import numpy as np
+
+ # Standard scikit-learn imports
+ from sklearn.datasets import fetch_openml
+ from sklearn.pipeline import Pipeline
+ from sklearn.model_selection import train_test_split
+ from sklearn.svm import SVC
+ from sklearn import metrics
+
+ # Import TDA pipeline requirements
+ from gudhi.sklearn.cubical_persistence import CubicalPersistence
+ from gudhi.representations import PersistenceImage, DiagramSelector
+
+ X, y = fetch_openml("mnist_784", version=1, return_X_y=True, as_frame=False)
+
+ # Target is: "is an eight ?"
+ y = (y == "8") * 1
+ print("There are", np.sum(y), "eights out of", len(y), "numbers.")
+
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
+ pipe = Pipeline(
+ [
+ ("cub_pers", CubicalPersistence(homology_dimensions=0, newshape=[-1, 28, 28], n_jobs=-2)),
+ # Or for multiple persistence dimension computation
+ # ("cub_pers", CubicalPersistence(homology_dimensions=[0, 1], newshape=[-1, 28, 28])),
+ # ("H0_diags", DimensionSelector(index=0), # where index is the index in homology_dimensions array
+ ("finite_diags", DiagramSelector(use=True, point_type="finite")),
+ (
+ "pers_img",
+ PersistenceImage(bandwidth=50, weight=lambda x: x[1] ** 2, im_range=[0, 256, 0, 256], resolution=[20, 20]),
+ ),
+ ("svc", SVC()),
+ ]
+ )
+
+ # Learn from the train subset
+ pipe.fit(X_train, y_train)
+ # Predict from the test subset
+ predicted = pipe.predict(X_test)
+
+ print(f"Classification report for TDA pipeline {pipe}:\n" f"{metrics.classification_report(y_test, predicted)}\n")
+
+.. code-block:: none
+
+ There are 6825 eights out of 70000 numbers.
+ Classification report for TDA pipeline Pipeline(steps=[('cub_pers',
+ CubicalPersistence(newshape=[28, 28], n_jobs=-2)),
+ ('finite_diags', DiagramSelector(use=True)),
+ ('pers_img',
+ PersistenceImage(bandwidth=50, im_range=[0, 256, 0, 256],
+ weight=<function <lambda> at 0x7f3e54137ae8>)),
+ ('svc', SVC())]):
+ precision recall f1-score support
+
+ 0 0.97 0.99 0.98 25284
+ 1 0.92 0.68 0.78 2716
+
+ accuracy 0.96 28000
+ macro avg 0.94 0.84 0.88 28000
+ weighted avg 0.96 0.96 0.96 28000
+
+Cubical complex persistence scikit-learn like interface reference
+-----------------------------------------------------------------
+
+.. autoclass:: gudhi.sklearn.cubical_persistence.CubicalPersistence
+ :members:
+ :special-members: __init__
+ :show-inheritance: \ No newline at end of file
diff --git a/src/python/doc/cubical_complex_sum.inc b/src/python/doc/cubical_complex_sum.inc
index 87db184d..b27843e5 100644
--- a/src/python/doc/cubical_complex_sum.inc
+++ b/src/python/doc/cubical_complex_sum.inc
@@ -1,14 +1,22 @@
.. table::
:widths: 30 40 30
- +--------------------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------+
- | .. figure:: | The cubical complex represents a grid as a cell complex with | :Author: Pawel Dlotko |
- | ../../doc/Bitmap_cubical_complex/Cubical_complex_representation.png | cells of all dimensions. | |
- | :alt: Cubical complex representation | | :Since: GUDHI 2.0.0 |
- | :figclass: align-center | | |
- | | | :License: MIT |
- | | | |
- +--------------------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------+
- | * :doc:`cubical_complex_user` | * :doc:`cubical_complex_ref` |
- | | * :doc:`periodic_cubical_complex_ref` |
- +--------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------+
+ +--------------------------------------------------------------------------+--------------------------------------------------------------+-------------------------------------------------------------+
+ | .. figure:: | The cubical complex represents a grid as a cell complex with | :Author: Pawel Dlotko |
+ | ../../doc/Bitmap_cubical_complex/Cubical_complex_representation.png | cells of all dimensions. | :Since: GUDHI 2.0.0 |
+ | :alt: Cubical complex representation | | :License: MIT |
+ | :figclass: align-center | | |
+ +--------------------------------------------------------------------------+--------------------------------------------------------------+-------------------------------------------------------------+
+ | * :doc:`cubical_complex_user` | * :doc:`cubical_complex_ref` |
+ | | * :doc:`periodic_cubical_complex_ref` |
+ +--------------------------------------------------------------------------+--------------------------------------------------------------+-------------------------------------------------------------+
+ | .. image:: | * :doc:`cubical_complex_tflow_itf_ref` | :requires: `TensorFlow <installation.html#tensorflow>`_ |
+ | img/tensorflow.png | | |
+ | :target: https://www.tensorflow.org | | |
+ | :height: 30 | | |
+ +--------------------------------------------------------------------------+--------------------------------------------------------------+-------------------------------------------------------------+
+ | .. image:: | * :doc:`cubical_complex_sklearn_itf_ref` | :Requires: `Scikit-learn <installation.html#scikit-learn>`_ |
+ | img/sklearn.png | | |
+ | :target: https://scikit-learn.org | | |
+ | :height: 30 | | |
+ +--------------------------------------------------------------------------+--------------------------------------------------------------+-------------------------------------------------------------+
diff --git a/src/python/doc/cubical_complex_tflow_itf_ref.rst b/src/python/doc/cubical_complex_tflow_itf_ref.rst
new file mode 100644
index 00000000..b32f5e47
--- /dev/null
+++ b/src/python/doc/cubical_complex_tflow_itf_ref.rst
@@ -0,0 +1,40 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+TensorFlow layer for cubical persistence
+########################################
+
+.. include:: differentiation_sum.inc
+
+Example of gradient computed from cubical persistence
+-----------------------------------------------------
+
+.. testcode::
+
+ from gudhi.tensorflow import CubicalLayer
+ import tensorflow as tf
+
+ X = tf.Variable([[0.,2.,2.],[2.,2.,2.],[2.,2.,1.]], dtype=tf.float32, trainable=True)
+ cl = CubicalLayer(homology_dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = cl.call(X)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+
+ grads = tape.gradient(loss, [X])
+ print(grads[0].numpy())
+
+.. testoutput::
+
+ [[ 0. 0. 0. ]
+ [ 0. 0.5 0. ]
+ [ 0. 0. -0.5]]
+
+Documentation for CubicalLayer
+------------------------------
+
+.. autoclass:: gudhi.tensorflow.CubicalLayer
+ :members:
+ :special-members: __init__
+ :show-inheritance:
diff --git a/src/python/doc/cubical_complex_user.rst b/src/python/doc/cubical_complex_user.rst
index 6a211347..42a23875 100644
--- a/src/python/doc/cubical_complex_user.rst
+++ b/src/python/doc/cubical_complex_user.rst
@@ -7,14 +7,7 @@ Cubical complex user manual
Definition
----------
-===================================== ===================================== =====================================
-:Author: Pawel Dlotko :Since: GUDHI PYTHON 2.0.0 :License: GPL v3
-===================================== ===================================== =====================================
-
-+---------------------------------------------+----------------------------------------------------------------------+
-| :doc:`cubical_complex_user` | * :doc:`cubical_complex_ref` |
-| | * :doc:`periodic_cubical_complex_ref` |
-+---------------------------------------------+----------------------------------------------------------------------+
+.. include:: cubical_complex_sum.inc
The cubical complex is an example of a structured complex useful in computational mathematics (specially rigorous
numerics) and image analysis.
@@ -163,4 +156,4 @@ Tutorial
--------
This `notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-cubical-complexes.ipynb>`_
-explains how to represent sublevels sets of functions using cubical complexes. \ No newline at end of file
+explains how to represent sublevels sets of functions using cubical complexes.
diff --git a/src/python/doc/datasets_generators.inc b/src/python/doc/datasets.inc
index 8d169275..95a87678 100644
--- a/src/python/doc/datasets_generators.inc
+++ b/src/python/doc/datasets.inc
@@ -2,7 +2,7 @@
:widths: 30 40 30
+-----------------------------------+--------------------------------------------+--------------------------------------------------------------------------------------+
- | .. figure:: | Datasets generators (points). | :Authors: Hind Montassif |
+ | .. figure:: | Datasets either generated or fetched. | :Authors: Hind Montassif |
| img/sphere_3d.png | | |
| | | :Since: GUDHI 3.5.0 |
| | | |
@@ -10,5 +10,5 @@
| | | |
| | | :Requires: `CGAL <installation.html#cgal>`_ |
+-----------------------------------+--------------------------------------------+--------------------------------------------------------------------------------------+
- | * :doc:`datasets_generators` |
+ | * :doc:`datasets` |
+-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/datasets_generators.rst b/src/python/doc/datasets.rst
index 260c3882..2d11a19d 100644
--- a/src/python/doc/datasets_generators.rst
+++ b/src/python/doc/datasets.rst
@@ -3,12 +3,14 @@
.. To get rid of WARNING: document isn't included in any toctree
-===========================
-Datasets generators manual
-===========================
+================
+Datasets manual
+================
-We provide the generation of different customizable datasets to use as inputs for Gudhi complexes and data structures.
+Datasets generators
+===================
+We provide the generation of different customizable datasets to use as inputs for Gudhi complexes and data structures.
Points generators
------------------
@@ -103,3 +105,29 @@ Example
.. autofunction:: gudhi.datasets.generators.points.torus
+
+
+Fetching datasets
+=================
+
+We provide some ready-to-use datasets that are not available by default when getting GUDHI, and need to be fetched explicitly.
+
+By **default**, the fetched datasets directory is set to a folder named **'gudhi_data'** in the **user home folder**.
+Alternatively, it can be set using the **'GUDHI_DATA'** environment variable.
+
+.. autofunction:: gudhi.datasets.remote.fetch_bunny
+
+.. figure:: ./img/bunny.png
+ :figclass: align-center
+
+ 3D Stanford bunny with 35947 vertices.
+
+
+.. autofunction:: gudhi.datasets.remote.fetch_spiral_2d
+
+.. figure:: ./img/spiral_2d.png
+ :figclass: align-center
+
+ 2D spiral with 114562 vertices.
+
+.. autofunction:: gudhi.datasets.remote.clear_data_home
diff --git a/src/python/doc/differentiation_sum.inc b/src/python/doc/differentiation_sum.inc
new file mode 100644
index 00000000..140cf180
--- /dev/null
+++ b/src/python/doc/differentiation_sum.inc
@@ -0,0 +1,12 @@
+.. list-table::
+ :width: 100%
+ :header-rows: 0
+
+ * - :Since: GUDHI 3.6.0
+ - :License: MIT
+ - :Requires: `TensorFlow <installation.html#tensorflow>`_
+
+We provide TensorFlow 2 models that can handle automatic differentiation for the computation of persistence diagrams from complexes available in the Gudhi library.
+This includes simplex trees, cubical complexes and Vietoris-Rips complexes. Detailed example on how to use these layers in practice are available
+in the following `notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-optimization.ipynb>`_. Note that even if TensorFlow GPU is enabled, all
+internal computations using Gudhi will be done on CPU.
diff --git a/src/python/doc/img/bunny.png b/src/python/doc/img/bunny.png
new file mode 100644
index 00000000..769aa530
--- /dev/null
+++ b/src/python/doc/img/bunny.png
Binary files differ
diff --git a/src/python/doc/img/sklearn.png b/src/python/doc/img/sklearn.png
new file mode 100644
index 00000000..d1fecbbf
--- /dev/null
+++ b/src/python/doc/img/sklearn.png
Binary files differ
diff --git a/src/python/doc/img/spiral_2d.png b/src/python/doc/img/spiral_2d.png
new file mode 100644
index 00000000..abd247cd
--- /dev/null
+++ b/src/python/doc/img/spiral_2d.png
Binary files differ
diff --git a/src/python/doc/img/tensorflow.png b/src/python/doc/img/tensorflow.png
new file mode 100644
index 00000000..a75f3f5b
--- /dev/null
+++ b/src/python/doc/img/tensorflow.png
Binary files differ
diff --git a/src/python/doc/index.rst b/src/python/doc/index.rst
index 2d7921ae..35f4ba46 100644
--- a/src/python/doc/index.rst
+++ b/src/python/doc/index.rst
@@ -92,7 +92,7 @@ Clustering
.. include:: clustering.inc
-Datasets generators
-*******************
+Datasets
+********
-.. include:: datasets_generators.inc
+.. include:: datasets.inc
diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst
index cff84691..4eefd415 100644
--- a/src/python/doc/installation.rst
+++ b/src/python/doc/installation.rst
@@ -175,7 +175,7 @@ A complete configuration would be :
Scikit-learn version 1.0.1
POT version 0.8.0
HNSWlib found
- PyKeOps version [pyKeOps]: 1.5
+ PyKeOps version [pyKeOps]: 2.1
EagerPy version 0.30.0
TensorFlow version 2.7.0
Sphinx version 4.3.0
@@ -396,7 +396,11 @@ mathematics, science, and engineering.
TensorFlow
----------
-`TensorFlow <https://www.tensorflow.org>`_ is currently only used in some automatic differentiation tests.
+The :doc:`cubical complex </cubical_complex_tflow_itf_ref>`, :doc:`simplex tree </ls_simplex_tree_tflow_itf_ref>`
+and :doc:`Rips complex </rips_complex_tflow_itf_ref>` modules require `TensorFlow <https://www.tensorflow.org>`_
+for incorporating them in neural nets.
+
+`TensorFlow <https://www.tensorflow.org>`_ is also used in some automatic differentiation tests.
Bug reports and contributions
*****************************
diff --git a/src/python/doc/ls_simplex_tree_tflow_itf_ref.rst b/src/python/doc/ls_simplex_tree_tflow_itf_ref.rst
new file mode 100644
index 00000000..9d7d633f
--- /dev/null
+++ b/src/python/doc/ls_simplex_tree_tflow_itf_ref.rst
@@ -0,0 +1,53 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+TensorFlow layer for lower-star persistence on simplex trees
+############################################################
+
+.. include:: differentiation_sum.inc
+
+Example of gradient computed from lower-star filtration of a simplex tree
+-------------------------------------------------------------------------
+
+.. testcode::
+
+ from gudhi.tensorflow import LowerStarSimplexTreeLayer
+ import tensorflow as tf
+ import gudhi as gd
+
+ st = gd.SimplexTree()
+ st.insert([0, 1])
+ st.insert([1, 2])
+ st.insert([2, 3])
+ st.insert([3, 4])
+ st.insert([4, 5])
+ st.insert([5, 6])
+ st.insert([6, 7])
+ st.insert([7, 8])
+ st.insert([8, 9])
+ st.insert([9, 10])
+
+ F = tf.Variable([6.,4.,3.,4.,5.,4.,3.,2.,3.,4.,5.], dtype=tf.float32, trainable=True)
+ sl = LowerStarSimplexTreeLayer(simplextree=st, homology_dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = sl.call(F)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+
+ grads = tape.gradient(loss, [F])
+ print(grads[0].indices.numpy())
+ print(grads[0].values.numpy())
+
+.. testoutput::
+
+ [2 4]
+ [-1. 1.]
+
+Documentation for LowerStarSimplexTreeLayer
+-------------------------------------------
+
+.. autoclass:: gudhi.tensorflow.LowerStarSimplexTreeLayer
+ :members:
+ :special-members: __init__
+ :show-inheritance:
diff --git a/src/python/doc/nerve_gic_complex_user.rst b/src/python/doc/nerve_gic_complex_user.rst
index 0b820abf..8633cadb 100644
--- a/src/python/doc/nerve_gic_complex_user.rst
+++ b/src/python/doc/nerve_gic_complex_user.rst
@@ -12,7 +12,7 @@ Definition
Visualizations of the simplicial complexes can be done with either
neato (from `graphviz <http://www.graphviz.org/>`_),
`geomview <http://www.geomview.org/>`_,
-`KeplerMapper <https://github.com/MLWave/kepler-mapper>`_.
+`KeplerMapper <https://github.com/scikit-tda/kepler-mapper>`_.
Input point clouds are assumed to be OFF files (cf. `OFF file format <fileformats.html#off-file-format>`_).
Covers
diff --git a/src/python/doc/persistence_graphical_tools_user.rst b/src/python/doc/persistence_graphical_tools_user.rst
index d95b9d2b..e1d28c71 100644
--- a/src/python/doc/persistence_graphical_tools_user.rst
+++ b/src/python/doc/persistence_graphical_tools_user.rst
@@ -60,7 +60,7 @@ of shape (N x 2) encoding a persistence diagram (in a given dimension).
import matplotlib.pyplot as plt
import gudhi
import numpy as np
- d = np.array([[0, 1], [1, 2], [1, np.inf]])
+ d = np.array([[0., 1.], [1., 2.], [1., np.inf]])
gudhi.plot_persistence_diagram(d)
plt.show()
diff --git a/src/python/doc/persistent_cohomology_user.rst b/src/python/doc/persistent_cohomology_user.rst
index a3f294b2..39744b95 100644
--- a/src/python/doc/persistent_cohomology_user.rst
+++ b/src/python/doc/persistent_cohomology_user.rst
@@ -6,19 +6,24 @@ Persistent cohomology user manual
=================================
Definition
----------
-===================================== ===================================== =====================================
-:Author: Clément Maria :Since: GUDHI PYTHON 2.0.0 :License: GPL v3
-===================================== ===================================== =====================================
-
-+-----------------------------------------------------------------+-----------------------------------------------------------------------+
-| :doc:`persistent_cohomology_user` | Please refer to each data structure that contains persistence |
-| | feature for reference: |
-| | |
-| | * :doc:`simplex_tree_ref` |
-| | * :doc:`cubical_complex_ref` |
-| | * :doc:`periodic_cubical_complex_ref` |
-+-----------------------------------------------------------------+-----------------------------------------------------------------------+
+.. list-table::
+ :width: 100%
+ :header-rows: 0
+
+ * - :Author: Clément Maria
+ - :Since: GUDHI 2.0.0
+ - :License: MIT
+
+.. list-table::
+ :width: 100%
+ :header-rows: 0
+
+ * - :doc:`persistent_cohomology_user`
+ - Please refer to each data structure that contains persistence feature for reference:
+ * :doc:`simplex_tree_ref`
+ * :doc:`cubical_complex_ref`
+ * :doc:`periodic_cubical_complex_ref`
Computation of persistent cohomology using the algorithm of :cite:`DBLP:journals/dcg/SilvaMV11` and
:cite:`DBLP:conf/compgeom/DeyFW14` and the Compressed Annotation Matrix implementation of
diff --git a/src/python/doc/rips_complex_sum.inc b/src/python/doc/rips_complex_sum.inc
index 2cb24990..2b125e54 100644
--- a/src/python/doc/rips_complex_sum.inc
+++ b/src/python/doc/rips_complex_sum.inc
@@ -11,4 +11,9 @@
| | | |
+----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------------------+
| * :doc:`rips_complex_user` | * :doc:`rips_complex_ref` |
- +----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------+
+ +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------------------+
+ | .. image:: | * :doc:`rips_complex_tflow_itf_ref` | :requires: `TensorFlow <installation.html#tensorflow>`_ |
+ | img/tensorflow.png | | |
+ | :target: https://www.tensorflow.org | | |
+ | :height: 30 | | |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------------------+
diff --git a/src/python/doc/rips_complex_tflow_itf_ref.rst b/src/python/doc/rips_complex_tflow_itf_ref.rst
new file mode 100644
index 00000000..3ce75868
--- /dev/null
+++ b/src/python/doc/rips_complex_tflow_itf_ref.rst
@@ -0,0 +1,48 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+TensorFlow layer for Vietoris-Rips persistence
+##############################################
+
+.. include:: differentiation_sum.inc
+
+Example of gradient computed from Vietoris-Rips persistence
+-----------------------------------------------------------
+
+.. testsetup::
+
+ import numpy
+ numpy.set_printoptions(precision=4)
+
+.. testcode::
+
+ from gudhi.tensorflow import RipsLayer
+ import tensorflow as tf
+
+ X = tf.Variable([[1.,1.],[2.,2.]], dtype=tf.float32, trainable=True)
+ rl = RipsLayer(maximum_edge_length=2., homology_dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = rl.call(X)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+
+ grads = tape.gradient(loss, [X])
+ print(grads[0].numpy())
+
+.. testcleanup::
+
+ numpy.set_printoptions(precision=8)
+
+.. testoutput::
+
+ [[-0.5 -0.5]
+ [ 0.5 0.5]]
+
+Documentation for RipsLayer
+---------------------------
+
+.. autoclass:: gudhi.tensorflow.RipsLayer
+ :members:
+ :special-members: __init__
+ :show-inheritance:
diff --git a/src/python/doc/rips_complex_user.rst b/src/python/doc/rips_complex_user.rst
index 27d218d4..c41a7803 100644
--- a/src/python/doc/rips_complex_user.rst
+++ b/src/python/doc/rips_complex_user.rst
@@ -7,13 +7,7 @@ Rips complex user manual
Definition
----------
-================================================================================ ================================ ======================
-:Authors: Clément Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse, Yuichi Ike :Since: GUDHI 2.0.0 :License: GPL v3
-================================================================================ ================================ ======================
-
-+-------------------------------------------+----------------------------------------------------------------------+
-| :doc:`rips_complex_user` | :doc:`rips_complex_ref` |
-+-------------------------------------------+----------------------------------------------------------------------+
+.. include:: rips_complex_sum.inc
The `Rips complex <https://en.wikipedia.org/wiki/Vietoris%E2%80%93Rips_complex>`_ is a simplicial complex that
generalizes proximity (:math:`\varepsilon`-ball) graphs to higher dimensions. The vertices correspond to the input
diff --git a/src/python/doc/simplex_tree_sum.inc b/src/python/doc/simplex_tree_sum.inc
index a8858f16..6b534c9e 100644
--- a/src/python/doc/simplex_tree_sum.inc
+++ b/src/python/doc/simplex_tree_sum.inc
@@ -1,13 +1,18 @@
.. table::
:widths: 30 40 30
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------+
- | .. figure:: | The simplex tree is an efficient and flexible data structure for | :Author: Clément Maria |
- | ../../doc/Simplex_tree/Simplex_tree_representation.png | representing general (filtered) simplicial complexes. | |
- | :alt: Simplex tree representation | | :Since: GUDHI 2.0.0 |
- | :figclass: align-center | The data structure is described in | |
- | | :cite:`boissonnatmariasimplextreealgorithmica` | :License: MIT |
- | | | |
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------+
- | * :doc:`simplex_tree_user` | * :doc:`simplex_tree_ref` |
- +----------------------------------------------------------------+------------------------------------------------------------------------------------------------------+
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------+
+ | .. figure:: | The simplex tree is an efficient and flexible data structure for | :Author: Clément Maria |
+ | ../../doc/Simplex_tree/Simplex_tree_representation.png | representing general (filtered) simplicial complexes. | |
+ | :alt: Simplex tree representation | | :Since: GUDHI 2.0.0 |
+ | :figclass: align-center | The data structure is described in | |
+ | | :cite:`boissonnatmariasimplextreealgorithmica` | :License: MIT |
+ | | | |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------+
+ | * :doc:`simplex_tree_user` | * :doc:`simplex_tree_ref` |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------+
+ | .. image:: | * :doc:`ls_simplex_tree_tflow_itf_ref` | :requires: `TensorFlow <installation.html#tensorflow>`_ |
+ | img/tensorflow.png | | |
+ | :target: https://www.tensorflow.org | | |
+ | :height: 30 | | |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------+
diff --git a/src/python/example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py b/src/python/example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py
index ea2eb7e1..0b35dbc5 100755
--- a/src/python/example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py
+++ b/src/python/example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py
@@ -40,7 +40,7 @@ parser.add_argument(
args = parser.parse_args()
if not (-1.0 < args.min_edge_correlation < 1.0):
- print("Wrong value of the treshold corelation (should be between -1 and 1).")
+ print("Wrong value of the threshold corelation (should be between -1 and 1).")
sys.exit(1)
print("#####################################################################")
diff --git a/src/python/gudhi/__init__.py.in b/src/python/gudhi/__init__.py.in
index 3043201a..79e12fbc 100644
--- a/src/python/gudhi/__init__.py.in
+++ b/src/python/gudhi/__init__.py.in
@@ -23,10 +23,6 @@ __all__ = [@GUDHI_PYTHON_MODULES@ @GUDHI_PYTHON_MODULES_EXTRA@]
__available_modules = ''
__missing_modules = ''
-# For unitary tests purpose
-# could use "if 'collapse_edges' in gudhi.__all__" when collapse edges will have a python module
-__GUDHI_USE_EIGEN3 = @GUDHI_USE_EIGEN3@
-
# Try to import * from gudhi.__module_name for default modules.
# Extra modules require an explicit import by the user (mostly because of
# unusual dependencies, but also to avoid cluttering namespace gudhi and
diff --git a/src/python/gudhi/alpha_complex.pyx b/src/python/gudhi/alpha_complex.pyx
index a4888914..375e1561 100644
--- a/src/python/gudhi/alpha_complex.pyx
+++ b/src/python/gudhi/alpha_complex.pyx
@@ -31,6 +31,10 @@ cdef extern from "Alpha_complex_interface.h" namespace "Gudhi":
Alpha_complex_interface(vector[vector[double]] points, vector[double] weights, bool fast_version, bool exact_version) nogil except +
vector[double] get_point(int vertex) nogil except +
void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, double max_alpha_square, bool default_filtration_value) nogil except +
+ @staticmethod
+ void set_float_relative_precision(double precision) nogil
+ @staticmethod
+ double get_float_relative_precision() nogil
# AlphaComplex python interface
cdef class AlphaComplex:
@@ -133,3 +137,28 @@ cdef class AlphaComplex:
self.this_ptr.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr,
mas, compute_filtration)
return stree
+
+ @staticmethod
+ def set_float_relative_precision(precision):
+ """
+ :param precision: When the AlphaComplex is constructed with :code:`precision = 'safe'` (the default),
+ one can set the float relative precision of filtration values computed in
+ :func:`~gudhi.AlphaComplex.create_simplex_tree`.
+ Default is :code:`1e-5` (cf. :func:`~gudhi.AlphaComplex.get_float_relative_precision`).
+ For more details, please refer to
+ `CGAL::Lazy_exact_nt<NT>::set_relative_precision_of_to_double <https://doc.cgal.org/latest/Number_types/classCGAL_1_1Lazy__exact__nt.html>`_
+ :type precision: float
+ """
+ if precision <=0. or precision >= 1.:
+ raise ValueError("Relative precision value must be strictly greater than 0 and strictly lower than 1")
+ Alpha_complex_interface.set_float_relative_precision(precision)
+
+ @staticmethod
+ def get_float_relative_precision():
+ """
+ :returns: The float relative precision of filtration values computation in
+ :func:`~gudhi.AlphaComplex.create_simplex_tree` when the AlphaComplex is constructed with
+ :code:`precision = 'safe'` (the default).
+ :rtype: float
+ """
+ return Alpha_complex_interface.get_float_relative_precision()
diff --git a/src/python/gudhi/datasets/remote.py b/src/python/gudhi/datasets/remote.py
new file mode 100644
index 00000000..f6d3fe56
--- /dev/null
+++ b/src/python/gudhi/datasets/remote.py
@@ -0,0 +1,223 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Hind Montassif
+#
+# Copyright (C) 2021 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+from os.path import join, split, exists, expanduser
+from os import makedirs, remove, environ
+
+from urllib.request import urlretrieve
+import hashlib
+import shutil
+
+import numpy as np
+
+def _get_data_home(data_home = None):
+ """
+ Return the path of the remote datasets directory.
+ This folder is used to store remotely fetched datasets.
+ By default the datasets directory is set to a folder named 'gudhi_data' in the user home folder.
+ Alternatively, it can be set by the 'GUDHI_DATA' environment variable.
+ The '~' symbol is expanded to the user home folder.
+ If the folder does not already exist, it is automatically created.
+
+ Parameters
+ ----------
+ data_home : string
+ The path to remote datasets directory.
+ Default is `None`, meaning that the data home directory will be set to "~/gudhi_data",
+ if the 'GUDHI_DATA' environment variable does not exist.
+
+ Returns
+ -------
+ data_home: string
+ The path to remote datasets directory.
+ """
+ if data_home is None:
+ data_home = environ.get("GUDHI_DATA", join("~", "gudhi_data"))
+ data_home = expanduser(data_home)
+ makedirs(data_home, exist_ok=True)
+ return data_home
+
+
+def clear_data_home(data_home = None):
+ """
+ Delete the data home cache directory and all its content.
+
+ Parameters
+ ----------
+ data_home : string, default is None.
+ The path to remote datasets directory.
+ If `None` and the 'GUDHI_DATA' environment variable does not exist,
+ the default directory to be removed is set to "~/gudhi_data".
+ """
+ data_home = _get_data_home(data_home)
+ shutil.rmtree(data_home)
+
+def _checksum_sha256(file_path):
+ """
+ Compute the file checksum using sha256.
+
+ Parameters
+ ----------
+ file_path: string
+ Full path of the created file including filename.
+
+ Returns
+ -------
+ The hex digest of file_path.
+ """
+ sha256_hash = hashlib.sha256()
+ chunk_size = 4096
+ with open(file_path,"rb") as f:
+ # Read and update hash string value in blocks of 4K
+ while True:
+ buffer = f.read(chunk_size)
+ if not buffer:
+ break
+ sha256_hash.update(buffer)
+ return sha256_hash.hexdigest()
+
+def _fetch_remote(url, file_path, file_checksum = None):
+ """
+ Fetch the wanted dataset from the given url and save it in file_path.
+
+ Parameters
+ ----------
+ url : string
+ The url to fetch the dataset from.
+ file_path : string
+ Full path of the downloaded file including filename.
+ file_checksum : string
+ The file checksum using sha256 to check against the one computed on the downloaded file.
+ Default is 'None', which means the checksum is not checked.
+
+ Raises
+ ------
+ IOError
+ If the computed SHA256 checksum of file does not match the one given by the user.
+ """
+
+ # Get the file
+ urlretrieve(url, file_path)
+
+ if file_checksum is not None:
+ checksum = _checksum_sha256(file_path)
+ if file_checksum != checksum:
+ # Remove file and raise error
+ remove(file_path)
+ raise IOError("{} has a SHA256 checksum : {}, "
+ "different from expected : {}."
+ "The file may be corrupted or the given url may be wrong !".format(file_path, checksum, file_checksum))
+
+def _get_archive_path(file_path, label):
+ """
+ Get archive path based on file_path given by user and label.
+
+ Parameters
+ ----------
+ file_path: string
+ Full path of the file to get including filename, or None.
+ label: string
+ Label used along with 'data_home' to get archive path, in case 'file_path' is None.
+
+ Returns
+ -------
+ Full path of archive including filename.
+ """
+ if file_path is None:
+ archive_path = join(_get_data_home(), label)
+ dirname = split(archive_path)[0]
+ makedirs(dirname, exist_ok=True)
+ else:
+ archive_path = file_path
+ dirname = split(archive_path)[0]
+ makedirs(dirname, exist_ok=True)
+
+ return archive_path
+
+def fetch_spiral_2d(file_path = None):
+ """
+ Load the spiral_2d dataset.
+
+ Note that if the dataset already exists in the target location, it is not downloaded again,
+ and the corresponding array is returned from cache.
+
+ Parameters
+ ----------
+ file_path : string
+ Full path of the downloaded file including filename.
+
+ Default is None, meaning that it's set to "data_home/points/spiral_2d/spiral_2d.npy".
+
+ The "data_home" directory is set by default to "~/gudhi_data",
+ unless the 'GUDHI_DATA' environment variable is set.
+
+ Returns
+ -------
+ points: numpy array
+ Array of shape (114562, 2).
+ """
+ file_url = "https://raw.githubusercontent.com/GUDHI/gudhi-data/main/points/spiral_2d/spiral_2d.npy"
+ file_checksum = '2226024da76c073dd2f24b884baefbfd14928b52296df41ad2d9b9dc170f2401'
+
+ archive_path = _get_archive_path(file_path, "points/spiral_2d/spiral_2d.npy")
+
+ if not exists(archive_path):
+ _fetch_remote(file_url, archive_path, file_checksum)
+
+ return np.load(archive_path, mmap_mode='r')
+
+def fetch_bunny(file_path = None, accept_license = False):
+ """
+ Load the Stanford bunny dataset.
+
+ This dataset contains 35947 vertices.
+
+ Note that if the dataset already exists in the target location, it is not downloaded again,
+ and the corresponding array is returned from cache.
+
+ Parameters
+ ----------
+ file_path : string
+ Full path of the downloaded file including filename.
+
+ Default is None, meaning that it's set to "data_home/points/bunny/bunny.npy".
+ In this case, the LICENSE file would be downloaded as "data_home/points/bunny/bunny.LICENSE".
+
+ The "data_home" directory is set by default to "~/gudhi_data",
+ unless the 'GUDHI_DATA' environment variable is set.
+
+ accept_license : boolean
+ Flag to specify if user accepts the file LICENSE and prevents from printing the corresponding license terms.
+
+ Default is False.
+
+ Returns
+ -------
+ points: numpy array
+ Array of shape (35947, 3).
+ """
+
+ file_url = "https://raw.githubusercontent.com/GUDHI/gudhi-data/main/points/bunny/bunny.npy"
+ file_checksum = 'f382482fd89df8d6444152dc8fd454444fe597581b193fd139725a85af4a6c6e'
+ license_url = "https://raw.githubusercontent.com/GUDHI/gudhi-data/main/points/bunny/bunny.LICENSE"
+ license_checksum = 'b763dbe1b2fc6015d05cbf7bcc686412a2eb100a1f2220296e3b4a644c69633a'
+
+ archive_path = _get_archive_path(file_path, "points/bunny/bunny.npy")
+
+ if not exists(archive_path):
+ _fetch_remote(file_url, archive_path, file_checksum)
+ license_path = join(split(archive_path)[0], "bunny.LICENSE")
+ _fetch_remote(license_url, license_path, license_checksum)
+ # Print license terms unless accept_license is set to True
+ if not accept_license:
+ if exists(license_path):
+ with open(license_path, 'r') as f:
+ print(f.read())
+
+ return np.load(archive_path, mmap_mode='r')
diff --git a/src/python/gudhi/hera/wasserstein.cc b/src/python/gudhi/hera/wasserstein.cc
index 1a21f02f..fa0cf8aa 100644
--- a/src/python/gudhi/hera/wasserstein.cc
+++ b/src/python/gudhi/hera/wasserstein.cc
@@ -29,7 +29,7 @@ double wasserstein_distance(
if(std::isinf(internal_p)) internal_p = hera::get_infinity<double>();
params.internal_p = internal_p;
params.delta = delta;
- // The extra parameters are purposedly not exposed for now.
+ // The extra parameters are purposely not exposed for now.
return hera::wasserstein_dist(diag1, diag2, params);
}
diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py
index 848dc03e..21275cdd 100644
--- a/src/python/gudhi/persistence_graphical_tools.py
+++ b/src/python/gudhi/persistence_graphical_tools.py
@@ -12,6 +12,9 @@ from os import path
from math import isfinite
import numpy as np
from functools import lru_cache
+import warnings
+import errno
+import os
from gudhi.reader_utils import read_persistence_intervals_in_dimension
from gudhi.reader_utils import read_persistence_intervals_grouped_by_dimension
@@ -22,6 +25,7 @@ __license__ = "MIT"
_gudhi_matplotlib_use_tex = True
+
def __min_birth_max_death(persistence, band=0.0):
"""This function returns (min_birth, max_death) from the persistence.
@@ -44,20 +48,46 @@ def __min_birth_max_death(persistence, band=0.0):
min_birth = float(interval[1][0])
if band > 0.0:
max_death += band
+ # can happen if only points at inf death
+ if min_birth == max_death:
+ max_death = max_death + 1.0
return (min_birth, max_death)
def _array_handler(a):
- '''
+ """
:param a: if array, assumes it is a (n x 2) np.array and return a
persistence-compatible list (padding with 0), so that the
plot can be performed seamlessly.
- '''
- if isinstance(a[0][1], np.float64) or isinstance(a[0][1], float):
+ """
+ if isinstance(a[0][1], (np.floating, float)):
return [[0, x] for x in a]
else:
return a
+
+def _limit_to_max_intervals(persistence, max_intervals, key):
+ """This function returns truncated persistence if length is bigger than max_intervals.
+ :param persistence: Persistence intervals values list. Can be grouped by dimension or not.
+ :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death).
+ :param max_intervals: maximal number of intervals to display.
+ Selected intervals are those with the longest life time. Set it
+ to 0 to see all. Default value is 1000.
+ :type max_intervals: int.
+ :param key: key function for sort algorithm.
+ :type key: function or lambda.
+ """
+ if max_intervals > 0 and max_intervals < len(persistence):
+ warnings.warn(
+ "There are %s intervals given as input, whereas max_intervals is set to %s."
+ % (len(persistence), max_intervals)
+ )
+ # Sort by life time, then takes only the max_intervals elements
+ return sorted(persistence, key=key, reverse=True)[:max_intervals]
+ else:
+ return persistence
+
+
@lru_cache(maxsize=1)
def _matplotlib_can_use_tex():
"""This function returns True if matplotlib can deal with LaTeX, False otherwise.
@@ -65,17 +95,17 @@ def _matplotlib_can_use_tex():
"""
try:
from matplotlib import checkdep_usetex
+
return checkdep_usetex(True)
- except ImportError:
- print("This function is not available, you may be missing matplotlib.")
+ except ImportError as import_error:
+ warnings.warn(f"This function is not available.\nModuleNotFoundError: No module named '{import_error.name}'.")
def plot_persistence_barcode(
persistence=[],
persistence_file="",
alpha=0.6,
- max_intervals=1000,
- max_barcodes=1000,
+ max_intervals=20000,
inf_delta=0.1,
legend=False,
colormap=None,
@@ -97,7 +127,7 @@ def plot_persistence_barcode(
:type alpha: float.
:param max_intervals: maximal number of intervals to display.
Selected intervals are those with the longest life time. Set it
- to 0 to see all. Default value is 1000.
+ to 0 to see all. Default value is 20000.
:type max_intervals: int.
:param inf_delta: Infinity is placed at :code:`((max_death - min_birth) x
inf_delta)` above :code:`max_death` value. A reasonable value is
@@ -119,99 +149,68 @@ def plot_persistence_barcode(
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib import rc
+
if _gudhi_matplotlib_use_tex and _matplotlib_can_use_tex():
- plt.rc('text', usetex=True)
- plt.rc('font', family='serif')
+ plt.rc("text", usetex=True)
+ plt.rc("font", family="serif")
else:
- plt.rc('text', usetex=False)
- plt.rc('font', family='DejaVu Sans')
+ plt.rc("text", usetex=False)
+ plt.rc("font", family="DejaVu Sans")
if persistence_file != "":
if path.isfile(persistence_file):
# Reset persistence
persistence = []
- diag = read_persistence_intervals_grouped_by_dimension(
- persistence_file=persistence_file
- )
+ diag = read_persistence_intervals_grouped_by_dimension(persistence_file=persistence_file)
for key in diag.keys():
for persistence_interval in diag[key]:
persistence.append((key, persistence_interval))
else:
- print("file " + persistence_file + " not found.")
- return None
-
- persistence = _array_handler(persistence)
-
- if max_barcodes != 1000:
- print("Deprecated parameter. It has been replaced by max_intervals")
- max_intervals = max_barcodes
-
- if max_intervals > 0 and max_intervals < len(persistence):
- # Sort by life time, then takes only the max_intervals elements
- persistence = sorted(
- persistence,
- key=lambda life_time: life_time[1][1] - life_time[1][0],
- reverse=True,
- )[:max_intervals]
-
- if colormap == None:
- colormap = plt.cm.Set1.colors
- if axes == None:
- fig, axes = plt.subplots(1, 1)
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), persistence_file)
- persistence = sorted(persistence, key=lambda birth: birth[1][0])
+ try:
+ persistence = _array_handler(persistence)
+ persistence = _limit_to_max_intervals(
+ persistence, max_intervals, key=lambda life_time: life_time[1][1] - life_time[1][0]
+ )
+ (min_birth, max_death) = __min_birth_max_death(persistence)
+ persistence = sorted(persistence, key=lambda birth: birth[1][0])
+ except IndexError:
+ min_birth, max_death = 0.0, 1.0
+ pass
- (min_birth, max_death) = __min_birth_max_death(persistence)
- ind = 0
delta = (max_death - min_birth) * inf_delta
# Replace infinity values with max_death + delta for bar code to be more
# readable
infinity = max_death + delta
axis_start = min_birth - delta
- # Draw horizontal bars in loop
- for interval in reversed(persistence):
- if float(interval[1][1]) != float("inf"):
- # Finite death case
- axes.barh(
- ind,
- (interval[1][1] - interval[1][0]),
- height=0.8,
- left=interval[1][0],
- alpha=alpha,
- color=colormap[interval[0]],
- linewidth=0,
- )
- else:
- # Infinite death case for diagram to be nicer
- axes.barh(
- ind,
- (infinity - interval[1][0]),
- height=0.8,
- left=interval[1][0],
- alpha=alpha,
- color=colormap[interval[0]],
- linewidth=0,
- )
- ind = ind + 1
+
+ if axes == None:
+ _, axes = plt.subplots(1, 1)
+ if colormap == None:
+ colormap = plt.cm.Set1.colors
+
+ x=[birth for (dim,(birth,death)) in persistence]
+ y=[(death - birth) if death != float("inf") else (infinity - birth) for (dim,(birth,death)) in persistence]
+ c=[colormap[dim] for (dim,(birth,death)) in persistence]
+
+ axes.barh(list(reversed(range(len(x)))), y, height=0.8, left=x, alpha=alpha, color=c, linewidth=0)
if legend:
dimensions = list(set(item[0] for item in persistence))
axes.legend(
- handles=[
- mpatches.Patch(color=colormap[dim], label=str(dim))
- for dim in dimensions
- ],
- loc="lower right",
+ handles=[mpatches.Patch(color=colormap[dim], label=str(dim)) for dim in dimensions], loc="lower right",
)
axes.set_title("Persistence barcode", fontsize=fontsize)
# Ends plot on infinity value and starts a little bit before min_birth
- axes.axis([axis_start, infinity, 0, ind])
+ if len(x) != 0:
+ axes.axis([axis_start, infinity, 0, len(x)])
return axes
- except ImportError:
- print("This function is not available, you may be missing matplotlib.")
+ except ImportError as import_error:
+ warnings.warn(f"This function is not available.\nModuleNotFoundError: No module named '{import_error.name}'.")
def plot_persistence_diagram(
@@ -219,14 +218,13 @@ def plot_persistence_diagram(
persistence_file="",
alpha=0.6,
band=0.0,
- max_intervals=1000,
- max_plots=1000,
+ max_intervals=1000000,
inf_delta=0.1,
legend=False,
colormap=None,
axes=None,
fontsize=16,
- greyblock=True
+ greyblock=True,
):
"""This function plots the persistence diagram from persistence values
list, a np.array of shape (N x 2) representing a diagram in a single
@@ -244,7 +242,7 @@ def plot_persistence_diagram(
:type band: float.
:param max_intervals: maximal number of intervals to display.
Selected intervals are those with the longest life time. Set it
- to 0 to see all. Default value is 1000.
+ to 0 to see all. Default value is 1000000.
:type max_intervals: int.
:param inf_delta: Infinity is placed at :code:`((max_death - min_birth) x
inf_delta)` above :code:`max_death` value. A reasonable value is
@@ -268,47 +266,35 @@ def plot_persistence_diagram(
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib import rc
+
if _gudhi_matplotlib_use_tex and _matplotlib_can_use_tex():
- plt.rc('text', usetex=True)
- plt.rc('font', family='serif')
+ plt.rc("text", usetex=True)
+ plt.rc("font", family="serif")
else:
- plt.rc('text', usetex=False)
- plt.rc('font', family='DejaVu Sans')
+ plt.rc("text", usetex=False)
+ plt.rc("font", family="DejaVu Sans")
if persistence_file != "":
if path.isfile(persistence_file):
# Reset persistence
persistence = []
- diag = read_persistence_intervals_grouped_by_dimension(
- persistence_file=persistence_file
- )
+ diag = read_persistence_intervals_grouped_by_dimension(persistence_file=persistence_file)
for key in diag.keys():
for persistence_interval in diag[key]:
persistence.append((key, persistence_interval))
else:
- print("file " + persistence_file + " not found.")
- return None
-
- persistence = _array_handler(persistence)
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), persistence_file)
- if max_plots != 1000:
- print("Deprecated parameter. It has been replaced by max_intervals")
- max_intervals = max_plots
-
- if max_intervals > 0 and max_intervals < len(persistence):
- # Sort by life time, then takes only the max_intervals elements
- persistence = sorted(
- persistence,
- key=lambda life_time: life_time[1][1] - life_time[1][0],
- reverse=True,
- )[:max_intervals]
-
- if colormap == None:
- colormap = plt.cm.Set1.colors
- if axes == None:
- fig, axes = plt.subplots(1, 1)
+ try:
+ persistence = _array_handler(persistence)
+ persistence = _limit_to_max_intervals(
+ persistence, max_intervals, key=lambda life_time: life_time[1][1] - life_time[1][0]
+ )
+ min_birth, max_death = __min_birth_max_death(persistence, band)
+ except IndexError:
+ min_birth, max_death = 0.0, 1.0
+ pass
- (min_birth, max_death) = __min_birth_max_death(persistence, band)
delta = (max_death - min_birth) * inf_delta
# Replace infinity values with max_death + delta for diagram to be more
# readable
@@ -316,61 +302,56 @@ def plot_persistence_diagram(
axis_end = max_death + delta / 2
axis_start = min_birth - delta
+ if axes == None:
+ _, axes = plt.subplots(1, 1)
+ if colormap == None:
+ colormap = plt.cm.Set1.colors
# bootstrap band
if band > 0.0:
x = np.linspace(axis_start, infinity, 1000)
axes.fill_between(x, x, x + band, alpha=alpha, facecolor="red")
# lower diag patch
if greyblock:
- axes.add_patch(mpatches.Polygon([[axis_start, axis_start], [axis_end, axis_start], [axis_end, axis_end]], fill=True, color='lightgrey'))
- # Draw points in loop
- pts_at_infty = False # Records presence of pts at infty
- for interval in reversed(persistence):
- if float(interval[1][1]) != float("inf"):
- # Finite death case
- axes.scatter(
- interval[1][0],
- interval[1][1],
- alpha=alpha,
- color=colormap[interval[0]],
+ axes.add_patch(
+ mpatches.Polygon(
+ [[axis_start, axis_start], [axis_end, axis_start], [axis_end, axis_end]],
+ fill=True,
+ color="lightgrey",
)
- else:
- pts_at_infty = True
- # Infinite death case for diagram to be nicer
- axes.scatter(
- interval[1][0], infinity, alpha=alpha, color=colormap[interval[0]]
- )
- if pts_at_infty:
+ )
+ # line display of equation : birth = death
+ axes.plot([axis_start, axis_end], [axis_start, axis_end], linewidth=1.0, color="k")
+
+ x=[birth for (dim,(birth,death)) in persistence]
+ y=[death if death != float("inf") else infinity for (dim,(birth,death)) in persistence]
+ c=[colormap[dim] for (dim,(birth,death)) in persistence]
+
+ axes.scatter(x,y,alpha=alpha,color=c)
+ if float("inf") in (death for (dim,(birth,death)) in persistence):
# infinity line and text
- axes.plot([axis_start, axis_end], [axis_start, axis_end], linewidth=1.0, color="k")
axes.plot([axis_start, axis_end], [infinity, infinity], linewidth=1.0, color="k", alpha=alpha)
# Infinity label
yt = axes.get_yticks()
- yt = yt[np.where(yt < axis_end)] # to avoid ploting ticklabel higher than infinity
+ yt = yt[np.where(yt < axis_end)] # to avoid plotting ticklabel higher than infinity
yt = np.append(yt, infinity)
ytl = ["%.3f" % e for e in yt] # to avoid float precision error
- ytl[-1] = r'$+\infty$'
+ ytl[-1] = r"$+\infty$"
axes.set_yticks(yt)
axes.set_yticklabels(ytl)
if legend:
dimensions = list(set(item[0] for item in persistence))
- axes.legend(
- handles=[
- mpatches.Patch(color=colormap[dim], label=str(dim))
- for dim in dimensions
- ]
- )
+ axes.legend(handles=[mpatches.Patch(color=colormap[dim], label=str(dim)) for dim in dimensions])
axes.set_xlabel("Birth", fontsize=fontsize)
axes.set_ylabel("Death", fontsize=fontsize)
axes.set_title("Persistence diagram", fontsize=fontsize)
# Ends plot on infinity value and starts a little bit before min_birth
- axes.axis([axis_start, axis_end, axis_start, infinity + delta/2])
+ axes.axis([axis_start, axis_end, axis_start, infinity + delta / 2])
return axes
- except ImportError:
- print("This function is not available, you may be missing matplotlib.")
+ except ImportError as import_error:
+ warnings.warn(f"This function is not available.\nModuleNotFoundError: No module named '{import_error.name}'.")
def plot_persistence_density(
@@ -384,7 +365,7 @@ def plot_persistence_density(
legend=False,
axes=None,
fontsize=16,
- greyblock=False
+ greyblock=False,
):
"""This function plots the persistence density from persistence
values list, np.array of shape (N x 2) representing a diagram
@@ -444,12 +425,13 @@ def plot_persistence_density(
import matplotlib.patches as mpatches
from scipy.stats import kde
from matplotlib import rc
+
if _gudhi_matplotlib_use_tex and _matplotlib_can_use_tex():
- plt.rc('text', usetex=True)
- plt.rc('font', family='serif')
+ plt.rc("text", usetex=True)
+ plt.rc("font", family="serif")
else:
- plt.rc('text', usetex=False)
- plt.rc('font', family='DejaVu Sans')
+ plt.rc("text", usetex=False)
+ plt.rc("font", family="DejaVu Sans")
if persistence_file != "":
if dimension is None:
@@ -460,10 +442,16 @@ def plot_persistence_density(
persistence_file=persistence_file, only_this_dim=dimension
)
else:
- print("file " + persistence_file + " not found.")
- return None
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), persistence_file)
+
+ # default cmap value cannot be done at argument definition level as matplotlib is not yet defined.
+ if cmap is None:
+ cmap = plt.cm.hot_r
+ if axes == None:
+ _, axes = plt.subplots(1, 1)
- if len(persistence) > 0:
+ try:
+ # if not read from file but given by an argument
persistence = _array_handler(persistence)
persistence_dim = np.array(
[
@@ -472,47 +460,54 @@ def plot_persistence_density(
if (dim_interval[0] == dimension) or (dimension is None)
]
)
-
- persistence_dim = persistence_dim[np.isfinite(persistence_dim[:, 1])]
- if max_intervals > 0 and max_intervals < len(persistence_dim):
- # Sort by life time, then takes only the max_intervals elements
+ persistence_dim = persistence_dim[np.isfinite(persistence_dim[:, 1])]
persistence_dim = np.array(
- sorted(
- persistence_dim,
- key=lambda life_time: life_time[1] - life_time[0],
- reverse=True,
- )[:max_intervals]
+ _limit_to_max_intervals(
+ persistence_dim, max_intervals, key=lambda life_time: life_time[1] - life_time[0]
+ )
)
- # Set as numpy array birth and death (remove undefined values - inf and NaN)
- birth = persistence_dim[:, 0]
- death = persistence_dim[:, 1]
-
- # default cmap value cannot be done at argument definition level as matplotlib is not yet defined.
- if cmap is None:
- cmap = plt.cm.hot_r
- if axes == None:
- fig, axes = plt.subplots(1, 1)
+ # Set as numpy array birth and death (remove undefined values - inf and NaN)
+ birth = persistence_dim[:, 0]
+ death = persistence_dim[:, 1]
+ birth_min = birth.min()
+ birth_max = birth.max()
+ death_min = death.min()
+ death_max = death.max()
+
+ # Evaluate a gaussian kde on a regular grid of nbins x nbins over data extents
+ k = kde.gaussian_kde([birth, death], bw_method=bw_method)
+ xi, yi = np.mgrid[
+ birth_min : birth_max : nbins * 1j, death_min : death_max : nbins * 1j,
+ ]
+ zi = k(np.vstack([xi.flatten(), yi.flatten()]))
+ # Make the plot
+ img = axes.pcolormesh(xi, yi, zi.reshape(xi.shape), cmap=cmap, shading="auto")
+ plot_success = True
+
+ # IndexError on empty diagrams, ValueError on only inf death values
+ except (IndexError, ValueError):
+ birth_min = 0.0
+ birth_max = 1.0
+ death_min = 0.0
+ death_max = 1.0
+ plot_success = False
+ pass
# line display of equation : birth = death
- x = np.linspace(death.min(), birth.max(), 1000)
+ x = np.linspace(death_min, birth_max, 1000)
axes.plot(x, x, color="k", linewidth=1.0)
- # Evaluate a gaussian kde on a regular grid of nbins x nbins over data extents
- k = kde.gaussian_kde([birth, death], bw_method=bw_method)
- xi, yi = np.mgrid[
- birth.min() : birth.max() : nbins * 1j,
- death.min() : death.max() : nbins * 1j,
- ]
- zi = k(np.vstack([xi.flatten(), yi.flatten()]))
-
- # Make the plot
- img = axes.pcolormesh(xi, yi, zi.reshape(xi.shape), cmap=cmap)
-
if greyblock:
- axes.add_patch(mpatches.Polygon([[birth.min(), birth.min()], [death.max(), birth.min()], [death.max(), death.max()]], fill=True, color='lightgrey'))
+ axes.add_patch(
+ mpatches.Polygon(
+ [[birth_min, birth_min], [death_max, birth_min], [death_max, death_max]],
+ fill=True,
+ color="lightgrey",
+ )
+ )
- if legend:
+ if plot_success and legend:
plt.colorbar(img, ax=axes)
axes.set_xlabel("Birth", fontsize=fontsize)
@@ -521,7 +516,5 @@ def plot_persistence_density(
return axes
- except ImportError:
- print(
- "This function is not available, you may be missing matplotlib and/or scipy."
- )
+ except ImportError as import_error:
+ warnings.warn(f"This function is not available.\nModuleNotFoundError: No module named '{import_error.name}'.")
diff --git a/src/python/gudhi/representations/preprocessing.py b/src/python/gudhi/representations/preprocessing.py
index a8545349..8722e162 100644
--- a/src/python/gudhi/representations/preprocessing.py
+++ b/src/python/gudhi/representations/preprocessing.py
@@ -1,10 +1,11 @@
# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
-# Author(s): Mathieu Carrière
+# Author(s): Mathieu Carrière, Vincent Rouvreau
#
# Copyright (C) 2018-2019 Inria
#
# Modification(s):
+# - 2021/10 Vincent Rouvreau: Add DimensionSelector
# - YYYY/MM Author: Description of the modification
import numpy as np
@@ -75,7 +76,7 @@ class Clamping(BaseEstimator, TransformerMixin):
Constructor for the Clamping class.
Parameters:
- limit (double): clamping value (default np.inf).
+ limit (float): clamping value (default np.inf).
"""
self.minimum = minimum
self.maximum = maximum
@@ -234,7 +235,7 @@ class ProminentPoints(BaseEstimator, TransformerMixin):
use (bool): whether to use the class or not (default False).
location (string): either "upper" or "lower" (default "upper"). Whether to keep the points that are far away ("upper") or close ("lower") to the diagonal.
num_pts (int): cardinality threshold (default 10). If location == "upper", keep the top **num_pts** points that are the farthest away from the diagonal. If location == "lower", keep the top **num_pts** points that are the closest to the diagonal.
- threshold (double): distance-to-diagonal threshold (default -1). If location == "upper", keep the points that are at least at a distance **threshold** from the diagonal. If location == "lower", keep the points that are at most at a distance **threshold** from the diagonal.
+ threshold (float): distance-to-diagonal threshold (default -1). If location == "upper", keep the points that are at least at a distance **threshold** from the diagonal. If location == "lower", keep the points that are at most at a distance **threshold** from the diagonal.
"""
self.num_pts = num_pts
self.threshold = threshold
@@ -317,7 +318,7 @@ class DiagramSelector(BaseEstimator, TransformerMixin):
Parameters:
use (bool): whether to use the class or not (default False).
- limit (double): second coordinate value that is the criterion for being an essential point (default numpy.inf).
+ limit (float): second coordinate value that is the criterion for being an essential point (default numpy.inf).
point_type (string): either "finite" or "essential". The type of the points that are going to be extracted.
"""
self.use, self.limit, self.point_type = use, limit, point_type
@@ -363,3 +364,51 @@ class DiagramSelector(BaseEstimator, TransformerMixin):
n x 2 numpy array: extracted persistence diagram.
"""
return self.fit_transform([diag])[0]
+
+
+# Mermaid sequence diagram - https://mermaid-js.github.io/mermaid-live-editor/
+# sequenceDiagram
+# USER->>DimensionSelector: fit_transform(<br/>[[array( Hi(X0) ), array( Hj(X0) ), ...],<br/> [array( Hi(X1) ), array( Hj(X1) ), ...],<br/> ...])
+# DimensionSelector->>thread1: _transform([array( Hi(X0) ), array( Hj(X0) )], ...)
+# DimensionSelector->>thread2: _transform([array( Hi(X1) ), array( Hj(X1) )], ...)
+# Note right of DimensionSelector: ...
+# thread1->>DimensionSelector: array( Hn(X0) )
+# thread2->>DimensionSelector: array( Hn(X1) )
+# Note right of DimensionSelector: ...
+# DimensionSelector->>USER: [array( Hn(X0) ), <br/> array( Hn(X1) ), <br/> ...]
+
+class DimensionSelector(BaseEstimator, TransformerMixin):
+ """
+ This is a class to select persistence diagrams in a specific dimension from its index.
+ """
+
+ def __init__(self, index=0):
+ """
+ Constructor for the DimensionSelector class.
+
+ Parameters:
+ index (int): The returned persistence diagrams dimension index. Default value is `0`.
+ """
+ self.index = index
+
+ def fit(self, X, Y=None):
+ """
+ Nothing to be done, but useful when included in a scikit-learn Pipeline.
+ """
+ return self
+
+ def transform(self, X, Y=None):
+ """
+ Select persistence diagrams from its dimension.
+
+ Parameters:
+ X (list of list of tuple): List of list of persistence pairs, i.e.
+ `[[array( Hi(X0) ), array( Hj(X0) ), ...], [array( Hi(X1) ), array( Hj(X1) ), ...], ...]`
+
+ Returns:
+ list of tuple:
+ Persistence diagrams in a specific dimension. i.e. if `index` was set to `m` and `Hn` is at index `m` of
+ the input, it returns `[array( Hn(X0) ), array( Hn(X1), ...]`
+ """
+
+ return [persistence[self.index] for persistence in X]
diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py
index f8078d03..69ff5e1e 100644
--- a/src/python/gudhi/representations/vector_methods.py
+++ b/src/python/gudhi/representations/vector_methods.py
@@ -508,26 +508,20 @@ class Entropy(BaseEstimator, TransformerMixin):
new_X = BirthPersistenceTransform().fit_transform(X)
for i in range(num_diag):
- orig_diagram, diagram, num_pts_in_diag = X[i], new_X[i], X[i].shape[0]
- try:
- new_diagram = DiagramScaler(use=True, scalers=[([1], MaxAbsScaler())]).fit_transform([diagram])[0]
- except ValueError:
- # Empty persistence diagram case - https://github.com/GUDHI/gudhi-devel/issues/507
- assert len(diagram) == 0
- new_diagram = np.empty(shape = [0, 2])
-
+ orig_diagram, new_diagram, num_pts_in_diag = X[i], new_X[i], X[i].shape[0]
+
+ p = new_diagram[:,1]
+ p = p/np.sum(p)
if self.mode == "scalar":
- ent = - np.sum( np.multiply(new_diagram[:,1], np.log(new_diagram[:,1])) )
+ ent = -np.dot(p, np.log(p))
Xfit.append(np.array([[ent]]))
-
else:
ent = np.zeros(self.resolution)
for j in range(num_pts_in_diag):
[px,py] = orig_diagram[j,:2]
min_idx = np.clip(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- for k in range(min_idx, max_idx):
- ent[k] += (-1) * new_diagram[j,1] * np.log(new_diagram[j,1])
+ ent[min_idx:max_idx]-=p[j]*np.log(p[j])
if self.normalized:
ent = ent / np.linalg.norm(ent, ord=1)
Xfit.append(np.reshape(ent,[1,-1]))
diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd
index 284daa96..f86f1232 100644
--- a/src/python/gudhi/simplex_tree.pxd
+++ b/src/python/gudhi/simplex_tree.pxd
@@ -64,7 +64,6 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
bool prune_above_filtration(double filtration) nogil
bool make_filtration_non_decreasing() nogil
void compute_extended_filtration() nogil
- vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(vector[pair[int, pair[double, double]]] dgm, double min_persistence) nogil
Simplex_tree_interface_full_featured* collapse_edges(int nb_collapse_iteration) nogil except +
void reset_filtration(double filtration, int dimension) nogil
bint operator==(Simplex_tree_interface_full_featured) nogil
@@ -82,7 +81,7 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
void expansion_with_blockers_callback(int dimension, blocker_func_t user_func, void *user_data)
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
- cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Simplex_tree<Gudhi::Simplex_tree_options_full_featured>>":
+ cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>>":
Simplex_tree_persistence_interface(Simplex_tree_interface_full_featured * st, bool persistence_dim_max) nogil
void compute_persistence(int homology_coeff_field, double min_persistence) nogil except +
vector[pair[int, pair[double, double]]] get_persistence() nogil
@@ -93,3 +92,4 @@ cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
vector[pair[vector[int], vector[int]]] persistence_pairs() nogil
pair[vector[vector[int]], vector[vector[int]]] lower_star_generators() nogil
pair[vector[vector[int]], vector[vector[int]]] flag_generators() nogil
+ vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(double min_persistence) nogil
diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx
index 1ac03afa..6b1b5c00 100644
--- a/src/python/gudhi/simplex_tree.pyx
+++ b/src/python/gudhi/simplex_tree.pyx
@@ -558,8 +558,7 @@ cdef class SimplexTree:
del self.pcohptr
self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), False)
self.pcohptr.compute_persistence(homology_coeff_field, -1.)
- persistence_result = self.pcohptr.get_persistence()
- return self.get_ptr().compute_extended_persistence_subdiagrams(persistence_result, min_persistence)
+ return self.pcohptr.compute_extended_persistence_subdiagrams(min_persistence)
def expansion_with_blocker(self, max_dim, blocker_func):
"""Expands the Simplex_tree containing only a graph. Simplices corresponding to cliques in the graph are added
@@ -572,9 +571,9 @@ cdef class SimplexTree:
otherwise it is kept. The algorithm then proceeds with the next candidate.
.. warning::
- Several candidates of the same dimension may be inserted simultaneously before calling `block_simplex`, so
- if you examine the complex in `block_simplex`, you may hit a few simplices of the same dimension that have
- not been vetted by `block_simplex` yet, or have already been rejected but not yet removed.
+ Several candidates of the same dimension may be inserted simultaneously before calling `blocker_func`, so
+ if you examine the complex in `blocker_func`, you may hit a few simplices of the same dimension that have
+ not been vetted by `blocker_func` yet, or have already been rejected but not yet removed.
:param max_dim: Expansion maximal dimension value.
:type max_dim: int
@@ -760,18 +759,17 @@ cdef class SimplexTree:
return (normal0, normals, infinite0, infinites)
def collapse_edges(self, nb_iterations = 1):
- """Assuming the simplex tree is a 1-skeleton graph, this method collapse edges (simplices of higher dimension
- are ignored) and resets the simplex tree from the remaining edges.
- A good candidate is to build a simplex tree on top of a :class:`~gudhi.RipsComplex` of dimension 1 before
- collapsing edges
+ """Assuming the complex is a graph (simplices of higher dimension are ignored), this method implicitly
+ interprets it as the 1-skeleton of a flag complex, and replaces it with another (smaller) graph whose
+ expansion has the same persistent homology, using a technique known as edge collapses
+ (see :cite:`edgecollapsearxiv`).
+
+ A natural application is to get a simplex tree of dimension 1 from :class:`~gudhi.RipsComplex`,
+ then collapse edges, perform :meth:`expansion()` and finally compute persistence
(cf. :download:`rips_complex_edge_collapse_example.py <../example/rips_complex_edge_collapse_example.py>`).
- For implementation details, please refer to :cite:`edgecollapsesocg2020`.
:param nb_iterations: The number of edge collapse iterations to perform. Default is 1.
:type nb_iterations: int
-
- :note: collapse_edges method requires `Eigen <installation.html#eigen>`_ >= 3.1.0 and an exception is thrown
- if this method is not available.
"""
# Backup old pointer
cdef Simplex_tree_interface_full_featured* ptr = self.get_ptr()
diff --git a/src/python/gudhi/sklearn/__init__.py b/src/python/gudhi/sklearn/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/python/gudhi/sklearn/__init__.py
diff --git a/src/python/gudhi/sklearn/cubical_persistence.py b/src/python/gudhi/sklearn/cubical_persistence.py
new file mode 100644
index 00000000..672af278
--- /dev/null
+++ b/src/python/gudhi/sklearn/cubical_persistence.py
@@ -0,0 +1,110 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2021 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+from .. import CubicalComplex
+from sklearn.base import BaseEstimator, TransformerMixin
+
+import numpy as np
+# joblib is required by scikit-learn
+from joblib import Parallel, delayed
+
+# Mermaid sequence diagram - https://mermaid-js.github.io/mermaid-live-editor/
+# sequenceDiagram
+# USER->>CubicalPersistence: fit_transform(X)
+# CubicalPersistence->>thread1: _tranform(X[0])
+# CubicalPersistence->>thread2: _tranform(X[1])
+# Note right of CubicalPersistence: ...
+# thread1->>CubicalPersistence: [array( H0(X[0]) ), array( H1(X[0]) )]
+# thread2->>CubicalPersistence: [array( H0(X[1]) ), array( H1(X[1]) )]
+# Note right of CubicalPersistence: ...
+# CubicalPersistence->>USER: [[array( H0(X[0]) ), array( H1(X[0]) )],<br/> [array( H0(X[1]) ), array( H1(X[1]) )],<br/> ...]
+
+
+class CubicalPersistence(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the persistence diagrams from a cubical complex.
+ """
+
+ def __init__(
+ self,
+ homology_dimensions,
+ newshape=None,
+ homology_coeff_field=11,
+ min_persistence=0.0,
+ n_jobs=None,
+ ):
+ """
+ Constructor for the CubicalPersistence class.
+
+ Parameters:
+ homology_dimensions (int or list of int): The returned persistence diagrams dimension(s).
+ Short circuit the use of :class:`~gudhi.representations.preprocessing.DimensionSelector` when only one
+ dimension matters (in other words, when `homology_dimensions` is an int).
+ newshape (tuple of ints): If cells filtration values require to be reshaped
+ (cf. :func:`~gudhi.sklearn.cubical_persistence.CubicalPersistence.transform`), set `newshape`
+ to perform `numpy.reshape(X, newshape, order='C')` in
+ :func:`~gudhi.sklearn.cubical_persistence.CubicalPersistence.transform` method.
+ homology_coeff_field (int): The homology coefficient field. Must be a prime number. Default value is 11.
+ min_persistence (float): The minimum persistence value to take into account (strictly greater than
+ `min_persistence`). Default value is `0.0`. Set `min_persistence` to `-1.0` to see all values.
+ n_jobs (int): cf. https://joblib.readthedocs.io/en/latest/generated/joblib.Parallel.html
+ """
+ self.homology_dimensions = homology_dimensions
+ self.newshape = newshape
+ self.homology_coeff_field = homology_coeff_field
+ self.min_persistence = min_persistence
+ self.n_jobs = n_jobs
+
+ def fit(self, X, Y=None):
+ """
+ Nothing to be done, but useful when included in a scikit-learn Pipeline.
+ """
+ return self
+
+ def __transform(self, cells):
+ cubical_complex = CubicalComplex(top_dimensional_cells=cells)
+ cubical_complex.compute_persistence(
+ homology_coeff_field=self.homology_coeff_field, min_persistence=self.min_persistence
+ )
+ return [
+ cubical_complex.persistence_intervals_in_dimension(dim) for dim in self.homology_dimensions
+ ]
+
+ def __transform_only_this_dim(self, cells):
+ cubical_complex = CubicalComplex(top_dimensional_cells=cells)
+ cubical_complex.compute_persistence(
+ homology_coeff_field=self.homology_coeff_field, min_persistence=self.min_persistence
+ )
+ return cubical_complex.persistence_intervals_in_dimension(self.homology_dimensions)
+
+ def transform(self, X, Y=None):
+ """Compute all the cubical complexes and their associated persistence diagrams.
+
+ :param X: List of cells filtration values (`numpy.reshape(X, newshape, order='C'` if `newshape` is set with a tuple of ints).
+ :type X: list of list of float OR list of numpy.ndarray
+
+ :return: Persistence diagrams in the format:
+
+ - If `homology_dimensions` was set to `n`: `[array( Hn(X[0]) ), array( Hn(X[1]) ), ...]`
+ - If `homology_dimensions` was set to `[i, j]`: `[[array( Hi(X[0]) ), array( Hj(X[0]) )], [array( Hi(X[1]) ), array( Hj(X[1]) )], ...]`
+ :rtype: list of (,2) array_like or list of list of (,2) array_like
+ """
+ if self.newshape is not None:
+ X = np.reshape(X, self.newshape, order='C')
+
+ # Depends on homology_dimensions is an integer or a list of integer (else case)
+ if isinstance(self.homology_dimensions, int):
+ # threads is preferred as cubical construction and persistence computation releases the GIL
+ return Parallel(n_jobs=self.n_jobs, prefer="threads")(
+ delayed(self.__transform_only_this_dim)(cells) for cells in X
+ )
+ else:
+ # threads is preferred as cubical construction and persistence computation releases the GIL
+ return Parallel(n_jobs=self.n_jobs, prefer="threads")(delayed(self.__transform)(cells) for cells in X)
+
diff --git a/src/python/gudhi/tensorflow/__init__.py b/src/python/gudhi/tensorflow/__init__.py
new file mode 100644
index 00000000..1599cf52
--- /dev/null
+++ b/src/python/gudhi/tensorflow/__init__.py
@@ -0,0 +1,5 @@
+from .cubical_layer import CubicalLayer
+from .lower_star_simplex_tree_layer import LowerStarSimplexTreeLayer
+from .rips_layer import RipsLayer
+
+__all__ = ["LowerStarSimplexTreeLayer", "RipsLayer", "CubicalLayer"]
diff --git a/src/python/gudhi/tensorflow/cubical_layer.py b/src/python/gudhi/tensorflow/cubical_layer.py
new file mode 100644
index 00000000..5df2c370
--- /dev/null
+++ b/src/python/gudhi/tensorflow/cubical_layer.py
@@ -0,0 +1,82 @@
+import numpy as np
+import tensorflow as tf
+from ..cubical_complex import CubicalComplex
+
+######################
+# Cubical filtration #
+######################
+
+# The parameters of the model are the pixel values.
+
+def _Cubical(Xflat, Xdim, dimensions, homology_coeff_field):
+ # Parameters: Xflat (flattened image),
+ # Xdim (shape of non-flattened image)
+ # dimensions (homology dimensions)
+
+ # Compute the persistence pairs with Gudhi
+ # We reverse the dimensions because CubicalComplex uses Fortran ordering
+ cc = CubicalComplex(dimensions=Xdim[::-1], top_dimensional_cells=Xflat)
+ cc.compute_persistence(homology_coeff_field=homology_coeff_field)
+
+ # Retrieve and output image indices/pixels corresponding to positive and negative simplices
+ cof_pp = cc.cofaces_of_persistence_pairs()
+
+ L_cofs = []
+ for dim in dimensions:
+
+ try:
+ cof = cof_pp[0][dim]
+ except IndexError:
+ cof = np.array([])
+
+ L_cofs.append(np.array(cof, dtype=np.int32))
+
+ return L_cofs
+
+class CubicalLayer(tf.keras.layers.Layer):
+ """
+ TensorFlow layer for computing the persistent homology of a cubical complex
+ """
+ def __init__(self, homology_dimensions, min_persistence=None, homology_coeff_field=11, **kwargs):
+ """
+ Constructor for the CubicalLayer class
+
+ Parameters:
+ homology_dimensions (List[int]): list of homology dimensions
+ min_persistence (List[float]): minimum distance-to-diagonal of the points in the output persistence diagrams (default None, in which case 0. is used for all dimensions)
+ homology_coeff_field (int): homology field coefficient. Must be a prime number. Default value is 11. Max is 46337.
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.dimensions = homology_dimensions
+ self.min_persistence = min_persistence if min_persistence != None else [0.] * len(self.dimensions)
+ self.hcf = homology_coeff_field
+ assert len(self.min_persistence) == len(self.dimensions)
+
+ def call(self, X):
+ """
+ Compute persistence diagram associated to a cubical complex filtered by some pixel values
+
+ Parameters:
+ X (TensorFlow variable): pixel values of the cubical complex
+
+ Returns:
+ List[Tuple[tf.Tensor,tf.Tensor]]: List of cubical persistence diagrams. The length of this list is the same than that of dimensions, i.e., there is one persistence diagram per homology dimension provided in the input list dimensions. Moreover, the finite and essential parts of the persistence diagrams are provided separately: each element of this list is a tuple of size two that contains the finite and essential parts of the corresponding persistence diagram, of shapes [num_finite_points, 2] and [num_essential_points, 1] respectively. Note that the essential part is always empty in cubical persistence diagrams, except in homology dimension zero, where the essential part always contains a single point, with abscissa equal to the smallest value in the complex, and infinite ordinate
+ """
+ # Compute pixels associated to positive and negative simplices
+ # Don't compute gradient for this operation
+ Xflat = tf.reshape(X, [-1])
+ Xdim, Xflat_numpy = X.shape, Xflat.numpy()
+ indices_list = _Cubical(Xflat_numpy, Xdim, self.dimensions, self.hcf)
+ index_essential = np.argmin(Xflat_numpy) # index of minimum pixel value for essential persistence diagram
+ # Get persistence diagram by simply picking the corresponding entries in the image
+ self.dgms = []
+ for idx_dim, dimension in enumerate(self.dimensions):
+ finite_dgm = tf.reshape(tf.gather(Xflat, indices_list[idx_dim]), [-1,2])
+ essential_dgm = tf.reshape(tf.gather(Xflat, index_essential), [-1,1]) if dimension == 0 else tf.zeros([0, 1])
+ min_pers = self.min_persistence[idx_dim]
+ if min_pers >= 0:
+ persistent_indices = tf.where(tf.math.abs(finite_dgm[:,1]-finite_dgm[:,0]) > min_pers)
+ self.dgms.append((tf.reshape(tf.gather(finite_dgm, indices=persistent_indices), [-1,2]), essential_dgm))
+ else:
+ self.dgms.append((finite_dgm, essential_dgm))
+ return self.dgms
diff --git a/src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py b/src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py
new file mode 100644
index 00000000..5a8e5b75
--- /dev/null
+++ b/src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py
@@ -0,0 +1,87 @@
+import numpy as np
+import tensorflow as tf
+
+#########################################
+# Lower star filtration on simplex tree #
+#########################################
+
+# The parameters of the model are the vertex function values of the simplex tree.
+
+def _LowerStarSimplexTree(simplextree, filtration, dimensions, homology_coeff_field):
+ # Parameters: simplextree (simplex tree on which to compute persistence)
+ # filtration (function values on the vertices of st),
+ # dimensions (homology dimensions),
+ # homology_coeff_field (homology field coefficient)
+
+ simplextree.reset_filtration(-np.inf, 0)
+
+ # Assign new filtration values
+ for i in range(simplextree.num_vertices()):
+ simplextree.assign_filtration([i], filtration[i])
+ simplextree.make_filtration_non_decreasing()
+
+ # Compute persistence diagram
+ simplextree.compute_persistence(homology_coeff_field=homology_coeff_field)
+
+ # Get vertex pairs for optimization. First, get all simplex pairs
+ pairs = simplextree.lower_star_persistence_generators()
+
+ L_indices = []
+ for dimension in dimensions:
+
+ finite_pairs = pairs[0][dimension] if len(pairs[0]) >= dimension+1 else np.empty(shape=[0,2])
+ essential_pairs = pairs[1][dimension] if len(pairs[1]) >= dimension+1 else np.empty(shape=[0,1])
+
+ finite_indices = np.array(finite_pairs.flatten(), dtype=np.int32)
+ essential_indices = np.array(essential_pairs.flatten(), dtype=np.int32)
+
+ L_indices.append((finite_indices, essential_indices))
+
+ return L_indices
+
+class LowerStarSimplexTreeLayer(tf.keras.layers.Layer):
+ """
+ TensorFlow layer for computing lower-star persistence out of a simplex tree
+ """
+ def __init__(self, simplextree, homology_dimensions, min_persistence=None, homology_coeff_field=11, **kwargs):
+ """
+ Constructor for the LowerStarSimplexTreeLayer class
+
+ Parameters:
+ simplextree (gudhi.SimplexTree): underlying simplex tree. Its vertices MUST be named with integers from 0 to n-1, where n is its number of vertices. Note that its filtration values are modified in each call of the class.
+ homology_dimensions (List[int]): list of homology dimensions
+ min_persistence (List[float]): minimum distance-to-diagonal of the points in the output persistence diagrams (default None, in which case 0. is used for all dimensions)
+ homology_coeff_field (int): homology field coefficient. Must be a prime number. Default value is 11. Max is 46337.
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.dimensions = homology_dimensions
+ self.simplextree = simplextree
+ self.min_persistence = min_persistence if min_persistence != None else [0. for _ in range(len(self.dimensions))]
+ self.hcf = homology_coeff_field
+ assert len(self.min_persistence) == len(self.dimensions)
+
+ def call(self, filtration):
+ """
+ Compute lower-star persistence diagram associated to a function defined on the vertices of the simplex tree
+
+ Parameters:
+ F (TensorFlow variable): filter function values over the vertices of the simplex tree. The ith entry of F corresponds to vertex i in self.simplextree
+
+ Returns:
+ List[Tuple[tf.Tensor,tf.Tensor]]: List of lower-star persistence diagrams. The length of this list is the same than that of dimensions, i.e., there is one persistence diagram per homology dimension provided in the input list dimensions. Moreover, the finite and essential parts of the persistence diagrams are provided separately: each element of this list is a tuple of size two that contains the finite and essential parts of the corresponding persistence diagram, of shapes [num_finite_points, 2] and [num_essential_points, 1] respectively
+ """
+ # Don't try to compute gradients for the vertex pairs
+ indices = _LowerStarSimplexTree(self.simplextree, filtration.numpy(), self.dimensions, self.hcf)
+ # Get persistence diagrams
+ self.dgms = []
+ for idx_dim, dimension in enumerate(self.dimensions):
+ finite_dgm = tf.reshape(tf.gather(filtration, indices[idx_dim][0]), [-1,2])
+ essential_dgm = tf.reshape(tf.gather(filtration, indices[idx_dim][1]), [-1,1])
+ min_pers = self.min_persistence[idx_dim]
+ if min_pers >= 0:
+ persistent_indices = tf.where(tf.math.abs(finite_dgm[:,1]-finite_dgm[:,0]) > min_pers)
+ self.dgms.append((tf.reshape(tf.gather(finite_dgm, indices=persistent_indices),[-1,2]), essential_dgm))
+ else:
+ self.dgms.append((finite_dgm, essential_dgm))
+ return self.dgms
+
diff --git a/src/python/gudhi/tensorflow/rips_layer.py b/src/python/gudhi/tensorflow/rips_layer.py
new file mode 100644
index 00000000..2a73472c
--- /dev/null
+++ b/src/python/gudhi/tensorflow/rips_layer.py
@@ -0,0 +1,93 @@
+import numpy as np
+import tensorflow as tf
+from ..rips_complex import RipsComplex
+
+############################
+# Vietoris-Rips filtration #
+############################
+
+# The parameters of the model are the point coordinates.
+
+def _Rips(DX, max_edge, dimensions, homology_coeff_field):
+ # Parameters: DX (distance matrix),
+ # max_edge (maximum edge length for Rips filtration),
+ # dimensions (homology dimensions)
+
+ # Compute the persistence pairs with Gudhi
+ rc = RipsComplex(distance_matrix=DX, max_edge_length=max_edge)
+ st = rc.create_simplex_tree(max_dimension=max(dimensions)+1)
+ st.compute_persistence(homology_coeff_field=homology_coeff_field)
+ pairs = st.flag_persistence_generators()
+
+ L_indices = []
+ for dimension in dimensions:
+
+ if dimension == 0:
+ finite_pairs = pairs[0]
+ essential_pairs = pairs[2]
+ else:
+ finite_pairs = pairs[1][dimension-1] if len(pairs[1]) >= dimension else np.empty(shape=[0,4])
+ essential_pairs = pairs[3][dimension-1] if len(pairs[3]) >= dimension else np.empty(shape=[0,2])
+
+ finite_indices = np.array(finite_pairs.flatten(), dtype=np.int32)
+ essential_indices = np.array(essential_pairs.flatten(), dtype=np.int32)
+
+ L_indices.append((finite_indices, essential_indices))
+
+ return L_indices
+
+class RipsLayer(tf.keras.layers.Layer):
+ """
+ TensorFlow layer for computing Rips persistence out of a point cloud
+ """
+ def __init__(self, homology_dimensions, maximum_edge_length=np.inf, min_persistence=None, homology_coeff_field=11, **kwargs):
+ """
+ Constructor for the RipsLayer class
+
+ Parameters:
+ maximum_edge_length (float): maximum edge length for the Rips complex
+ homology_dimensions (List[int]): list of homology dimensions
+ min_persistence (List[float]): minimum distance-to-diagonal of the points in the output persistence diagrams (default None, in which case 0. is used for all dimensions)
+ homology_coeff_field (int): homology field coefficient. Must be a prime number. Default value is 11. Max is 46337.
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.max_edge = maximum_edge_length
+ self.dimensions = homology_dimensions
+ self.min_persistence = min_persistence if min_persistence != None else [0. for _ in range(len(self.dimensions))]
+ self.hcf = homology_coeff_field
+ assert len(self.min_persistence) == len(self.dimensions)
+
+ def call(self, X):
+ """
+ Compute Rips persistence diagram associated to a point cloud
+
+ Parameters:
+ X (TensorFlow variable): point cloud of shape [number of points, number of dimensions]
+
+ Returns:
+ List[Tuple[tf.Tensor,tf.Tensor]]: List of Rips persistence diagrams. The length of this list is the same than that of dimensions, i.e., there is one persistence diagram per homology dimension provided in the input list dimensions. Moreover, the finite and essential parts of the persistence diagrams are provided separately: each element of this list is a tuple of size two that contains the finite and essential parts of the corresponding persistence diagram, of shapes [num_finite_points, 2] and [num_essential_points, 1] respectively
+ """
+ # Compute distance matrix
+ DX = tf.norm(tf.expand_dims(X, 1)-tf.expand_dims(X, 0), axis=2)
+ # Compute vertices associated to positive and negative simplices
+ # Don't compute gradient for this operation
+ indices = _Rips(DX.numpy(), self.max_edge, self.dimensions, self.hcf)
+ # Get persistence diagrams by simply picking the corresponding entries in the distance matrix
+ self.dgms = []
+ for idx_dim, dimension in enumerate(self.dimensions):
+ cur_idx = indices[idx_dim]
+ if dimension > 0:
+ finite_dgm = tf.reshape(tf.gather_nd(DX, tf.reshape(cur_idx[0], [-1,2])), [-1,2])
+ essential_dgm = tf.reshape(tf.gather_nd(DX, tf.reshape(cur_idx[1], [-1,2])), [-1,1])
+ else:
+ reshaped_cur_idx = tf.reshape(cur_idx[0], [-1,3])
+ finite_dgm = tf.concat([tf.zeros([reshaped_cur_idx.shape[0],1]), tf.reshape(tf.gather_nd(DX, reshaped_cur_idx[:,1:]), [-1,1])], axis=1)
+ essential_dgm = tf.zeros([cur_idx[1].shape[0],1])
+ min_pers = self.min_persistence[idx_dim]
+ if min_pers >= 0:
+ persistent_indices = tf.where(tf.math.abs(finite_dgm[:,1]-finite_dgm[:,0]) > min_pers)
+ self.dgms.append((tf.reshape(tf.gather(finite_dgm, indices=persistent_indices),[-1,2]), essential_dgm))
+ else:
+ self.dgms.append((finite_dgm, essential_dgm))
+ return self.dgms
+
diff --git a/src/python/gudhi/wasserstein/barycenter.py b/src/python/gudhi/wasserstein/barycenter.py
index d67bcde7..bb6e641e 100644
--- a/src/python/gudhi/wasserstein/barycenter.py
+++ b/src/python/gudhi/wasserstein/barycenter.py
@@ -37,7 +37,7 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False):
:param init: The initial value for barycenter estimate.
If ``None``, init is made on a random diagram from the dataset.
Otherwise, it can be an ``int`` (then initialization is made on ``pdiagset[init]``)
- or a `(n x 2)` ``numpy.array`` enconding a persistence diagram with `n` points.
+ or a `(n x 2)` ``numpy.array`` encoding a persistence diagram with `n` points.
:type init: ``int``, or (n x 2) ``np.array``
:param verbose: if ``True``, returns additional information about the barycenter.
:type verbose: boolean
@@ -45,7 +45,7 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False):
(local minimum of the energy function).
If ``pdiagset`` is empty, returns ``None``.
If verbose, returns a couple ``(Y, log)`` where ``Y`` is the barycenter estimate,
- and ``log`` is a ``dict`` that contains additional informations:
+ and ``log`` is a ``dict`` that contains additional information:
- `"groupings"`, a list of list of pairs ``(i,j)``. Namely, ``G[k] = [...(i, j)...]``, where ``(i,j)`` indicates that `pdiagset[k][i]`` is matched to ``Y[j]`` if ``i = -1`` or ``j = -1``, it means they represent the diagonal.
@@ -73,7 +73,7 @@ def lagrangian_barycenter(pdiagset, init=None, verbose=False):
nb_iter = 0
- converged = False # stoping criterion
+ converged = False # stopping criterion
while not converged:
nb_iter += 1
K = len(Y) # current nb of points in Y (some might be on diagonal)
diff --git a/src/python/include/Alpha_complex_interface.h b/src/python/include/Alpha_complex_interface.h
index 671af4a4..469b91ce 100644
--- a/src/python/include/Alpha_complex_interface.h
+++ b/src/python/include/Alpha_complex_interface.h
@@ -57,6 +57,16 @@ class Alpha_complex_interface {
alpha_ptr_->create_simplex_tree(simplex_tree, max_alpha_square, default_filtration_value);
}
+ static void set_float_relative_precision(double precision) {
+ // cf. Exact_alpha_complex_dD kernel type in Alpha_complex_factory.h
+ CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>::FT::set_relative_precision_of_to_double(precision);
+ }
+
+ static double get_float_relative_precision() {
+ // cf. Exact_alpha_complex_dD kernel type in Alpha_complex_factory.h
+ return CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>::FT::get_relative_precision_of_to_double();
+ }
+
private:
std::unique_ptr<Abstract_alpha_complex> alpha_ptr_;
};
diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h
index e5a3dfba..945378a0 100644
--- a/src/python/include/Persistent_cohomology_interface.h
+++ b/src/python/include/Persistent_cohomology_interface.h
@@ -12,6 +12,8 @@
#define INCLUDE_PERSISTENT_COHOMOLOGY_INTERFACE_H_
#include <gudhi/Persistent_cohomology.h>
+#include <gudhi/Simplex_tree.h> // for Extended_simplex_type
+
#include <cstdlib>
#include <vector>
@@ -223,6 +225,44 @@ persistent_cohomology::Persistent_cohomology<FilteredComplex, persistent_cohomol
return out;
}
+ using Filtration_value = typename FilteredComplex::Filtration_value;
+ using Birth_death = std::pair<Filtration_value, Filtration_value>;
+ using Persistence_subdiagrams = std::vector<std::vector<std::pair<int, Birth_death>>>;
+
+ Persistence_subdiagrams compute_extended_persistence_subdiagrams(Filtration_value min_persistence){
+ Persistence_subdiagrams pers_subs(4);
+ auto const& persistent_pairs = Base::get_persistent_pairs();
+ for (auto pair : persistent_pairs) {
+ std::pair<Filtration_value, Extended_simplex_type> px = stptr_->decode_extended_filtration(stptr_->filtration(get<0>(pair)),
+ stptr_->efd);
+ std::pair<Filtration_value, Extended_simplex_type> py = stptr_->decode_extended_filtration(stptr_->filtration(get<1>(pair)),
+ stptr_->efd);
+ std::pair<int, Birth_death> pd_point = std::make_pair(stptr_->dimension(get<0>(pair)),
+ std::make_pair(px.first, py.first));
+ if(std::abs(px.first - py.first) > min_persistence){
+ //Ordinary
+ if (px.second == Extended_simplex_type::UP && py.second == Extended_simplex_type::UP){
+ pers_subs[0].push_back(pd_point);
+ }
+ // Relative
+ else if (px.second == Extended_simplex_type::DOWN && py.second == Extended_simplex_type::DOWN){
+ pers_subs[1].push_back(pd_point);
+ }
+ else{
+ // Extended+
+ if (px.first < py.first){
+ pers_subs[2].push_back(pd_point);
+ }
+ //Extended-
+ else{
+ pers_subs[3].push_back(pd_point);
+ }
+ }
+ }
+ }
+ return pers_subs;
+ }
+
private:
// A copy
FilteredComplex* stptr_;
diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h
index b93ccfff..0317ea39 100644
--- a/src/python/include/Simplex_tree_interface.h
+++ b/src/python/include/Simplex_tree_interface.h
@@ -15,9 +15,7 @@
#include <gudhi/distance_functions.h>
#include <gudhi/Simplex_tree.h>
#include <gudhi/Points_off_io.h>
-#ifdef GUDHI_USE_EIGEN3
#include <gudhi/Flag_complex_edge_collapser.h>
-#endif
#include <iostream>
#include <vector>
@@ -160,38 +158,7 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
return;
}
- std::vector<std::vector<std::pair<int, std::pair<Filtration_value, Filtration_value>>>> compute_extended_persistence_subdiagrams(const std::vector<std::pair<int, std::pair<Filtration_value, Filtration_value>>>& dgm, Filtration_value min_persistence){
- std::vector<std::vector<std::pair<int, std::pair<Filtration_value, Filtration_value>>>> new_dgm(4);
- for (unsigned int i = 0; i < dgm.size(); i++){
- std::pair<Filtration_value, Extended_simplex_type> px = this->decode_extended_filtration(dgm[i].second.first, this->efd);
- std::pair<Filtration_value, Extended_simplex_type> py = this->decode_extended_filtration(dgm[i].second.second, this->efd);
- std::pair<int, std::pair<Filtration_value, Filtration_value>> pd_point = std::make_pair(dgm[i].first, std::make_pair(px.first, py.first));
- if(std::abs(px.first - py.first) > min_persistence){
- //Ordinary
- if (px.second == Extended_simplex_type::UP && py.second == Extended_simplex_type::UP){
- new_dgm[0].push_back(pd_point);
- }
- // Relative
- else if (px.second == Extended_simplex_type::DOWN && py.second == Extended_simplex_type::DOWN){
- new_dgm[1].push_back(pd_point);
- }
- else{
- // Extended+
- if (px.first < py.first){
- new_dgm[2].push_back(pd_point);
- }
- //Extended-
- else{
- new_dgm[3].push_back(pd_point);
- }
- }
- }
- }
- return new_dgm;
- }
-
Simplex_tree_interface* collapse_edges(int nb_collapse_iteration) {
-#ifdef GUDHI_USE_EIGEN3
using Filtered_edge = std::tuple<Vertex_handle, Vertex_handle, Filtration_value>;
std::vector<Filtered_edge> edges;
for (Simplex_handle sh : Base::skeleton_simplex_range(1)) {
@@ -205,7 +172,7 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
}
for (int iteration = 0; iteration < nb_collapse_iteration; iteration++) {
- edges = Gudhi::collapse::flag_complex_collapse_edges(edges);
+ edges = Gudhi::collapse::flag_complex_collapse_edges(std::move(edges));
}
Simplex_tree_interface* collapsed_stree_ptr = new Simplex_tree_interface();
// Copy the original 0-skeleton
@@ -217,9 +184,6 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
collapsed_stree_ptr->insert({std::get<0>(remaining_edge), std::get<1>(remaining_edge)}, std::get<2>(remaining_edge));
}
return collapsed_stree_ptr;
-#else
- throw std::runtime_error("Unable to collapse edges as it requires Eigen3 >= 3.1.0.");
-#endif
}
void expansion_with_blockers_callback(int dimension, blocker_func_t user_func, void *user_data) {
diff --git a/src/python/test/test_alpha_complex.py b/src/python/test/test_alpha_complex.py
index f15284f3..f81e6137 100755
--- a/src/python/test/test_alpha_complex.py
+++ b/src/python/test/test_alpha_complex.py
@@ -286,3 +286,30 @@ def _weighted_doc_example(precision):
def test_weighted_doc_example():
for precision in ['fast', 'safe', 'exact']:
_weighted_doc_example(precision)
+
+def test_float_relative_precision():
+ assert AlphaComplex.get_float_relative_precision() == 1e-5
+ # Must be > 0.
+ with pytest.raises(ValueError):
+ AlphaComplex.set_float_relative_precision(0.)
+ # Must be < 1.
+ with pytest.raises(ValueError):
+ AlphaComplex.set_float_relative_precision(1.)
+
+ points = [[1, 1], [7, 0], [4, 6], [9, 6], [0, 14], [2, 19], [9, 17]]
+ st = AlphaComplex(points=points).create_simplex_tree()
+ filtrations = list(st.get_filtration())
+
+ # Get a better precision
+ AlphaComplex.set_float_relative_precision(1e-15)
+ assert AlphaComplex.get_float_relative_precision() == 1e-15
+
+ st = AlphaComplex(points=points).create_simplex_tree()
+ filtrations_better_resolution = list(st.get_filtration())
+
+ assert len(filtrations) == len(filtrations_better_resolution)
+ for idx in range(len(filtrations)):
+ # check simplex is the same
+ assert filtrations[idx][0] == filtrations_better_resolution[idx][0]
+ # check filtration is about the same with a relative precision of the worst case
+ assert filtrations[idx][1] == pytest.approx(filtrations_better_resolution[idx][1], rel=1e-5)
diff --git a/src/python/test/test_diff.py b/src/python/test/test_diff.py
new file mode 100644
index 00000000..dca001a9
--- /dev/null
+++ b/src/python/test/test_diff.py
@@ -0,0 +1,78 @@
+from gudhi.tensorflow import *
+import numpy as np
+import tensorflow as tf
+import gudhi as gd
+
+def test_rips_diff():
+
+ Xinit = np.array([[1.,1.],[2.,2.]], dtype=np.float32)
+ X = tf.Variable(initial_value=Xinit, trainable=True)
+ rl = RipsLayer(maximum_edge_length=2., homology_dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = rl.call(X)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+ grads = tape.gradient(loss, [X])
+ assert tf.norm(grads[0]-tf.constant([[-.5,-.5],[.5,.5]]),1) <= 1e-6
+
+def test_cubical_diff():
+
+ Xinit = np.array([[0.,2.,2.],[2.,2.,2.],[2.,2.,1.]], dtype=np.float32)
+ X = tf.Variable(initial_value=Xinit, trainable=True)
+ cl = CubicalLayer(homology_dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = cl.call(X)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+ grads = tape.gradient(loss, [X])
+ assert tf.norm(grads[0]-tf.constant([[0.,0.,0.],[0.,.5,0.],[0.,0.,-.5]]),1) <= 1e-6
+
+def test_nonsquare_cubical_diff():
+
+ Xinit = np.array([[-1.,1.,0.],[1.,1.,1.]], dtype=np.float32)
+ X = tf.Variable(initial_value=Xinit, trainable=True)
+ cl = CubicalLayer(homology_dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = cl.call(X)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+ grads = tape.gradient(loss, [X])
+ assert tf.norm(grads[0]-tf.constant([[0.,0.5,-0.5],[0.,0.,0.]]),1) <= 1e-6
+
+def test_st_diff():
+
+ st = gd.SimplexTree()
+ st.insert([0])
+ st.insert([1])
+ st.insert([2])
+ st.insert([3])
+ st.insert([4])
+ st.insert([5])
+ st.insert([6])
+ st.insert([7])
+ st.insert([8])
+ st.insert([9])
+ st.insert([10])
+ st.insert([0, 1])
+ st.insert([1, 2])
+ st.insert([2, 3])
+ st.insert([3, 4])
+ st.insert([4, 5])
+ st.insert([5, 6])
+ st.insert([6, 7])
+ st.insert([7, 8])
+ st.insert([8, 9])
+ st.insert([9, 10])
+
+ Finit = np.array([6.,4.,3.,4.,5.,4.,3.,2.,3.,4.,5.], dtype=np.float32)
+ F = tf.Variable(initial_value=Finit, trainable=True)
+ sl = LowerStarSimplexTreeLayer(simplextree=st, homology_dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = sl.call(F)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+ grads = tape.gradient(loss, [F])
+
+ assert tf.math.reduce_all(tf.math.equal(grads[0].indices, tf.constant([2,4])))
+ assert tf.math.reduce_all(tf.math.equal(grads[0].values, tf.constant([-1.,1.])))
+
diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py
index e46d616c..b276f041 100755
--- a/src/python/test/test_dtm.py
+++ b/src/python/test/test_dtm.py
@@ -91,11 +91,11 @@ def test_density():
def test_dtm_overflow_warnings():
pts = numpy.array([[10., 100000000000000000000000000000.], [1000., 100000000000000000000000000.]])
-
- with warnings.catch_warnings(record=True) as w:
- # TODO Test "keops" implementation as well when next version of pykeops (current is 1.5) is released (should fix the problem (cf. issue #543))
- dtm = DistanceToMeasure(2, implementation="hnsw")
- r = dtm.fit_transform(pts)
- assert len(w) == 1
- assert issubclass(w[0].category, RuntimeWarning)
- assert "Overflow" in str(w[0].message)
+ impl_warn = ["keops", "hnsw"]
+ for impl in impl_warn:
+ with warnings.catch_warnings(record=True) as w:
+ dtm = DistanceToMeasure(2, implementation=impl)
+ r = dtm.fit_transform(pts)
+ assert len(w) == 1
+ assert issubclass(w[0].category, RuntimeWarning)
+ assert "Overflow" in str(w[0].message)
diff --git a/src/python/test/test_persistence_graphical_tools.py b/src/python/test/test_persistence_graphical_tools.py
new file mode 100644
index 00000000..c19836b7
--- /dev/null
+++ b/src/python/test/test_persistence_graphical_tools.py
@@ -0,0 +1,121 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Vincent Rouvreau
+
+ Copyright (C) 2021 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+import gudhi as gd
+import numpy as np
+import matplotlib as plt
+import pytest
+
+
+def test_array_handler():
+ diags = np.array([[1, 2], [3, 4], [5, 6]], float)
+ arr_diags = gd.persistence_graphical_tools._array_handler(diags)
+ for idx in range(len(diags)):
+ assert arr_diags[idx][0] == 0
+ np.testing.assert_array_equal(arr_diags[idx][1], diags[idx])
+
+ diags = [(1.0, 2.0), (3.0, 4.0), (5.0, 6.0)]
+ arr_diags = gd.persistence_graphical_tools._array_handler(diags)
+ for idx in range(len(diags)):
+ assert arr_diags[idx][0] == 0
+ assert arr_diags[idx][1] == diags[idx]
+
+ diags = [(0, (1.0, 2.0)), (0, (3.0, 4.0)), (0, (5.0, 6.0))]
+ assert gd.persistence_graphical_tools._array_handler(diags) == diags
+
+
+def test_min_birth_max_death():
+ diags = [
+ (0, (0.0, float("inf"))),
+ (0, (0.0983494, float("inf"))),
+ (0, (0.0, 0.122545)),
+ (0, (0.0, 0.12047)),
+ (0, (0.0, 0.118398)),
+ (0, (0.118398, 1.0)),
+ (0, (0.0, 0.117908)),
+ (0, (0.0, 0.112307)),
+ (0, (0.0, 0.107535)),
+ (0, (0.0, 0.106382)),
+ ]
+ assert gd.persistence_graphical_tools.__min_birth_max_death(diags) == (0.0, 1.0)
+ assert gd.persistence_graphical_tools.__min_birth_max_death(diags, band=4.0) == (0.0, 5.0)
+
+
+def test_limit_min_birth_max_death():
+ diags = [
+ (0, (2.0, float("inf"))),
+ (0, (2.0, float("inf"))),
+ ]
+ assert gd.persistence_graphical_tools.__min_birth_max_death(diags) == (2.0, 3.0)
+ assert gd.persistence_graphical_tools.__min_birth_max_death(diags, band=4.0) == (2.0, 6.0)
+
+
+def test_limit_to_max_intervals():
+ diags = [
+ (0, (0.0, float("inf"))),
+ (0, (0.0983494, float("inf"))),
+ (0, (0.0, 0.122545)),
+ (0, (0.0, 0.12047)),
+ (0, (0.0, 0.118398)),
+ (0, (0.118398, 1.0)),
+ (0, (0.0, 0.117908)),
+ (0, (0.0, 0.112307)),
+ (0, (0.0, 0.107535)),
+ (0, (0.0, 0.106382)),
+ ]
+ # check no warnings if max_intervals equals to the diagrams number
+ with pytest.warns(None) as record:
+ truncated_diags = gd.persistence_graphical_tools._limit_to_max_intervals(
+ diags, 10, key=lambda life_time: life_time[1][1] - life_time[1][0]
+ )
+ # check diagrams are not sorted
+ assert truncated_diags == diags
+ assert len(record) == 0
+
+ # check warning if max_intervals lower than the diagrams number
+ with pytest.warns(UserWarning) as record:
+ truncated_diags = gd.persistence_graphical_tools._limit_to_max_intervals(
+ diags, 5, key=lambda life_time: life_time[1][1] - life_time[1][0]
+ )
+ # check diagrams are truncated and sorted by life time
+ assert truncated_diags == [
+ (0, (0.0, float("inf"))),
+ (0, (0.0983494, float("inf"))),
+ (0, (0.118398, 1.0)),
+ (0, (0.0, 0.122545)),
+ (0, (0.0, 0.12047)),
+ ]
+ assert len(record) == 1
+
+
+def _limit_plot_persistence(function):
+ pplot = function(persistence=[])
+ assert isinstance(pplot, plt.axes.SubplotBase)
+ pplot = function(persistence=[], legend=True)
+ assert isinstance(pplot, plt.axes.SubplotBase)
+ pplot = function(persistence=[(0, float("inf"))])
+ assert isinstance(pplot, plt.axes.SubplotBase)
+ pplot = function(persistence=[(0, float("inf"))], legend=True)
+ assert isinstance(pplot, plt.axes.SubplotBase)
+
+
+def test_limit_plot_persistence():
+ for function in [gd.plot_persistence_barcode, gd.plot_persistence_diagram, gd.plot_persistence_density]:
+ _limit_plot_persistence(function)
+
+
+def _non_existing_persistence_file(function):
+ with pytest.raises(FileNotFoundError):
+ function(persistence_file="pouetpouettralala.toubiloubabdou")
+
+
+def test_non_existing_persistence_file():
+ for function in [gd.plot_persistence_barcode, gd.plot_persistence_diagram, gd.plot_persistence_density]:
+ _non_existing_persistence_file(function)
diff --git a/src/python/test/test_remote_datasets.py b/src/python/test/test_remote_datasets.py
new file mode 100644
index 00000000..e5d2de82
--- /dev/null
+++ b/src/python/test/test_remote_datasets.py
@@ -0,0 +1,87 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Hind Montassif
+#
+# Copyright (C) 2021 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+from gudhi.datasets import remote
+
+import shutil
+import io
+import sys
+import pytest
+
+from os.path import isdir, expanduser, exists
+from os import remove, environ
+
+def test_data_home():
+ # Test _get_data_home and clear_data_home on new empty folder
+ empty_data_home = remote._get_data_home(data_home="empty_folder_for_test")
+ assert isdir(empty_data_home)
+
+ remote.clear_data_home(data_home=empty_data_home)
+ assert not isdir(empty_data_home)
+
+def test_fetch_remote():
+ # Test fetch with a wrong checksum
+ with pytest.raises(OSError):
+ remote._fetch_remote("https://raw.githubusercontent.com/GUDHI/gudhi-data/main/points/spiral_2d/spiral_2d.npy", "tmp_spiral_2d.npy", file_checksum = 'XXXXXXXXXX')
+ assert not exists("tmp_spiral_2d.npy")
+
+def _get_bunny_license_print(accept_license = False):
+ capturedOutput = io.StringIO()
+ # Redirect stdout
+ sys.stdout = capturedOutput
+
+ bunny_arr = remote.fetch_bunny("./tmp_for_test/bunny.npy", accept_license)
+ assert bunny_arr.shape == (35947, 3)
+ del bunny_arr
+ remove("./tmp_for_test/bunny.npy")
+
+ # Reset redirect
+ sys.stdout = sys.__stdout__
+ return capturedOutput
+
+def test_print_bunny_license():
+ # Test not printing bunny.npy LICENSE when accept_license = True
+ assert "" == _get_bunny_license_print(accept_license = True).getvalue()
+ # Test printing bunny.LICENSE file when fetching bunny.npy with accept_license = False (default)
+ with open("./tmp_for_test/bunny.LICENSE") as f:
+ assert f.read().rstrip("\n") == _get_bunny_license_print().getvalue().rstrip("\n")
+ shutil.rmtree("./tmp_for_test")
+
+def test_fetch_remote_datasets_wrapped():
+ # Test fetch_spiral_2d and fetch_bunny wrapping functions with data directory different from default (twice, to test case of already fetched files)
+ # Default case is not tested because it would fail in case the user sets the 'GUDHI_DATA' environment variable locally
+ for i in range(2):
+ spiral_2d_arr = remote.fetch_spiral_2d("./another_fetch_folder_for_test/spiral_2d.npy")
+ assert spiral_2d_arr.shape == (114562, 2)
+
+ bunny_arr = remote.fetch_bunny("./another_fetch_folder_for_test/bunny.npy")
+ assert bunny_arr.shape == (35947, 3)
+
+ # Check that the directory was created
+ assert isdir("./another_fetch_folder_for_test")
+ # Check downloaded files
+ assert exists("./another_fetch_folder_for_test/spiral_2d.npy")
+ assert exists("./another_fetch_folder_for_test/bunny.npy")
+ assert exists("./another_fetch_folder_for_test/bunny.LICENSE")
+
+ # Remove test folders
+ del spiral_2d_arr
+ del bunny_arr
+ shutil.rmtree("./another_fetch_folder_for_test")
+
+def test_gudhi_data_env():
+ # Set environment variable "GUDHI_DATA"
+ environ["GUDHI_DATA"] = "./test_folder_from_env_var"
+ bunny_arr = remote.fetch_bunny()
+ assert bunny_arr.shape == (35947, 3)
+ assert exists("./test_folder_from_env_var/points/bunny/bunny.npy")
+ assert exists("./test_folder_from_env_var/points/bunny/bunny.LICENSE")
+ # Remove test folder
+ del bunny_arr
+ shutil.rmtree("./test_folder_from_env_var")
diff --git a/src/python/test/test_representations.py b/src/python/test/test_representations.py
index d219ce7a..4a455bb6 100755
--- a/src/python/test/test_representations.py
+++ b/src/python/test/test_representations.py
@@ -152,7 +152,26 @@ def test_vectorization_empty_diagrams():
scv = Entropy(mode="vector", normalized=False, resolution=random_resolution)(empty_diag)
assert not np.any(scv)
assert scv.shape[0] == random_resolution
-
+
+def test_entropy_miscalculation():
+ diag_ex = np.array([[0.0,1.0], [0.0,1.0], [0.0,2.0]])
+ def pe(pd):
+ l = pd[:,1] - pd[:,0]
+ l = l/sum(l)
+ return -np.dot(l, np.log(l))
+ sce = Entropy(mode="scalar")
+ assert [[pe(diag_ex)]] == sce.fit_transform([diag_ex])
+ sce = Entropy(mode="vector", resolution=4, normalized=False)
+ pef = [-1/4*np.log(1/4)-1/4*np.log(1/4)-1/2*np.log(1/2),
+ -1/4*np.log(1/4)-1/4*np.log(1/4)-1/2*np.log(1/2),
+ -1/2*np.log(1/2),
+ 0.0]
+ assert all(([pef] == sce.fit_transform([diag_ex]))[0])
+ sce = Entropy(mode="vector", resolution=4, normalized=True)
+ pefN = (sce.fit_transform([diag_ex]))[0]
+ area = np.linalg.norm(pefN, ord=1)
+ assert area==1
+
def test_kernel_empty_diagrams():
empty_diag = np.empty(shape = [0, 2])
assert SlicedWassersteinDistance(num_directions=100)(empty_diag, empty_diag) == 0.
diff --git a/src/python/test/test_representations_preprocessing.py b/src/python/test/test_representations_preprocessing.py
new file mode 100644
index 00000000..838cf30c
--- /dev/null
+++ b/src/python/test/test_representations_preprocessing.py
@@ -0,0 +1,39 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Vincent Rouvreau
+
+ Copyright (C) 2021 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.representations.preprocessing import DimensionSelector
+import numpy as np
+import pytest
+
+H0_0 = np.array([0.0, 0.0])
+H1_0 = np.array([1.0, 0.0])
+H0_1 = np.array([0.0, 1.0])
+H1_1 = np.array([1.0, 1.0])
+H0_2 = np.array([0.0, 2.0])
+H1_2 = np.array([1.0, 2.0])
+
+
+def test_dimension_selector():
+ X = [[H0_0, H1_0], [H0_1, H1_1], [H0_2, H1_2]]
+ ds = DimensionSelector(index=0)
+ h0 = ds.fit_transform(X)
+ np.testing.assert_array_equal(h0[0], H0_0)
+ np.testing.assert_array_equal(h0[1], H0_1)
+ np.testing.assert_array_equal(h0[2], H0_2)
+
+ ds = DimensionSelector(index=1)
+ h1 = ds.fit_transform(X)
+ np.testing.assert_array_equal(h1[0], H1_0)
+ np.testing.assert_array_equal(h1[1], H1_1)
+ np.testing.assert_array_equal(h1[2], H1_2)
+
+ ds = DimensionSelector(index=2)
+ with pytest.raises(IndexError):
+ h2 = ds.fit_transform([[H0_0, H1_0], [H0_1, H1_1], [H0_2, H1_2]])
diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py
index 15279c28..59fd889a 100755
--- a/src/python/test/test_simplex_tree.py
+++ b/src/python/test/test_simplex_tree.py
@@ -8,10 +8,9 @@
- YYYY/MM Author: Description of the modification
"""
-from gudhi import SimplexTree, __GUDHI_USE_EIGEN3
+from gudhi import SimplexTree
import numpy as np
import pytest
-import numpy as np
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
@@ -322,6 +321,10 @@ def test_extend_filtration():
]
dgms = st.extended_persistence(min_persistence=-1.0)
+ assert len(dgms) == 4
+ # Sort by (death-birth) descending - we are only interested in those with the longest life span
+ for idx in range(4):
+ dgms[idx] = sorted(dgms[idx], key=lambda x: (-abs(x[1][0] - x[1][1])))
assert dgms[0][0][1][0] == pytest.approx(2.0)
assert dgms[0][0][1][1] == pytest.approx(3.0)
@@ -358,16 +361,11 @@ def test_collapse_edges():
assert st.num_simplices() == 10
- if __GUDHI_USE_EIGEN3:
- st.collapse_edges()
- assert st.num_simplices() == 9
- assert st.find([1, 3]) == False
- for simplex in st.get_skeleton(0):
- assert simplex[1] == 1.0
- else:
- # If no Eigen3, collapse_edges throws an exception
- with pytest.raises(RuntimeError):
- st.collapse_edges()
+ st.collapse_edges()
+ assert st.num_simplices() == 9
+ assert st.find([0, 2]) == False # [1, 3] would be fine as well
+ for simplex in st.get_skeleton(0):
+ assert simplex[1] == 1.0
def test_reset_filtration():
@@ -619,7 +617,7 @@ def test_expansion_with_blocker():
def blocker(simplex):
try:
- # Block all simplices that countains vertex 6
+ # Block all simplices that contain vertex 6
simplex.index(6)
print(simplex, " is blocked")
return True
diff --git a/src/python/test/test_sklearn_cubical_persistence.py b/src/python/test/test_sklearn_cubical_persistence.py
new file mode 100644
index 00000000..1c05a215
--- /dev/null
+++ b/src/python/test/test_sklearn_cubical_persistence.py
@@ -0,0 +1,59 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Vincent Rouvreau
+
+ Copyright (C) 2021 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.sklearn.cubical_persistence import CubicalPersistence
+import numpy as np
+from sklearn import datasets
+
+CUBICAL_PERSISTENCE_H0_IMG0 = np.array([[0.0, 6.0], [0.0, 8.0], [0.0, np.inf]])
+
+
+def test_simple_constructor_from_top_cells():
+ cells = datasets.load_digits().images[0]
+ cp = CubicalPersistence(homology_dimensions=0)
+ np.testing.assert_array_equal(cp._CubicalPersistence__transform_only_this_dim(cells), CUBICAL_PERSISTENCE_H0_IMG0)
+ cp = CubicalPersistence(homology_dimensions=[0, 2])
+ diags = cp._CubicalPersistence__transform(cells)
+ assert len(diags) == 2
+ np.testing.assert_array_equal(diags[0], CUBICAL_PERSISTENCE_H0_IMG0)
+
+
+def test_simple_constructor_from_top_cells_list():
+ digits = datasets.load_digits().images[:10]
+ cp = CubicalPersistence(homology_dimensions=0, n_jobs=-2)
+
+ diags = cp.fit_transform(digits)
+ assert len(diags) == 10
+ np.testing.assert_array_equal(diags[0], CUBICAL_PERSISTENCE_H0_IMG0)
+
+ cp = CubicalPersistence(homology_dimensions=[0, 1], n_jobs=-1)
+ diagsH0H1 = cp.fit_transform(digits)
+ assert len(diagsH0H1) == 10
+ for idx in range(10):
+ np.testing.assert_array_equal(diags[idx], diagsH0H1[idx][0])
+
+def test_simple_constructor_from_flattened_cells():
+ cells = datasets.load_digits().images[0]
+ # Not squared (extended) flatten cells
+ flat_cells = np.hstack((cells, np.zeros((cells.shape[0], 2)))).flatten()
+
+ cp = CubicalPersistence(homology_dimensions=0, newshape=[-1, 8, 10])
+ diags = cp.fit_transform([flat_cells])
+
+ np.testing.assert_array_equal(diags[0], CUBICAL_PERSISTENCE_H0_IMG0)
+
+ # Not squared (extended) non-flatten cells
+ cells = np.hstack((cells, np.zeros((cells.shape[0], 2))))
+
+ # The aim of this second part of the test is to resize even if not mandatory
+ cp = CubicalPersistence(homology_dimensions=0, newshape=[-1, 8, 10])
+ diags = cp.fit_transform([cells])
+
+ np.testing.assert_array_equal(diags[0], CUBICAL_PERSISTENCE_H0_IMG0)
diff --git a/src/python/test/test_subsampling.py b/src/python/test/test_subsampling.py
index 4019852e..3431f372 100755
--- a/src/python/test/test_subsampling.py
+++ b/src/python/test/test_subsampling.py
@@ -91,7 +91,7 @@ def test_simple_choose_n_farthest_points_randomed():
assert gudhi.choose_n_farthest_points(points=[], nb_points=1) == []
assert gudhi.choose_n_farthest_points(points=point_set, nb_points=0) == []
- # Go furter than point set on purpose
+ # Go further than point set on purpose
for iter in range(1, 10):
sub_set = gudhi.choose_n_farthest_points(points=point_set, nb_points=iter)
for sub in sub_set:
@@ -117,7 +117,7 @@ def test_simple_pick_n_random_points():
assert gudhi.pick_n_random_points(points=[], nb_points=1) == []
assert gudhi.pick_n_random_points(points=point_set, nb_points=0) == []
- # Go furter than point set on purpose
+ # Go further than point set on purpose
for iter in range(1, 10):
sub_set = gudhi.pick_n_random_points(points=point_set, nb_points=iter)
for sub in sub_set: