summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGard Spreemann <gspr@nonempty.org>2022-01-14 09:15:35 +0100
committerGard Spreemann <gspr@nonempty.org>2022-01-14 09:15:35 +0100
commitdbc404626955aee632fa47ee7a4d4c3add7d6188 (patch)
treeb7acfc83c9ba316216a93e9a7d14c68c11d92804
parent2c221bfcf8effff9b010de8b2e13a22f6bc15201 (diff)
parent17c3c6a07cdb1b4d4f735f3bc996af30e216dfbe (diff)
Merge tag 'tags/gudhi-release-3.5.0' into dfsg/latest
-rw-r--r--.appveyor.yml55
-rw-r--r--.circleci/config.yml20
-rw-r--r--.github/build-requirements.txt5
-rw-r--r--.github/for_maintainers/new_gudhi_version_creation.md66
-rw-r--r--.github/how_to_use_github_to_contribute_to_gudhi.md3
-rw-r--r--.github/next_release.md15
-rw-r--r--.github/test-requirements.txt15
-rw-r--r--.github/workflows/pip-build-linux.yml20
-rw-r--r--.github/workflows/pip-build-osx.yml9
-rw-r--r--.github/workflows/pip-build-windows.yml36
-rw-r--r--.github/workflows/pip-packaging-linux.yml62
-rw-r--r--.github/workflows/pip-packaging-osx.yml9
-rw-r--r--.github/workflows/pip-packaging-windows.yml32
-rw-r--r--.gitmodules3
-rw-r--r--CMakeGUDHIVersion.txt4
-rw-r--r--CMakeLists.txt1
-rw-r--r--Dockerfile_for_circleci_image69
-rw-r--r--Dockerfile_for_circleci_image_without_cgal55
-rw-r--r--Dockerfile_for_pip52
-rw-r--r--Dockerfile_gudhi_installation80
-rw-r--r--azure-pipelines.yml7
-rw-r--r--biblio/bibliography.bib16
-rw-r--r--data/persistence_diagram/PD1.pers3
-rw-r--r--data/persistence_diagram/PD2.pers2
m---------ext/gudhi-deploy0
m---------ext/hera0
-rw-r--r--scripts/cpp_examples_for_doxygen.py16
-rwxr-xr-xscripts/create_gudhi_version.sh66
-rw-r--r--src/Alpha_complex/doc/Intro_alpha_complex.h16
-rw-r--r--src/Alpha_complex/include/gudhi/Alpha_complex.h6
-rw-r--r--src/Alpha_complex/include/gudhi/Alpha_complex_3d.h6
-rw-r--r--src/Bottleneck_distance/doc/Intro_bottleneck_distance.h2
-rw-r--r--src/Bottleneck_distance/doc/perturb_pd.pngbin20864 -> 15532 bytes
-rw-r--r--src/Bottleneck_distance/utilities/bottleneckdistance.md4
-rw-r--r--src/CMakeLists.txt1
-rw-r--r--src/Cech_complex/benchmark/cech_complex_benchmark.cpp2
-rw-r--r--src/Cech_complex/doc/Intro_cech_complex.h6
-rw-r--r--src/Collapse/doc/intro_edge_collapse.h4
-rw-r--r--src/Coxeter_triangulation/concept/FunctionForImplicitManifold.h46
-rw-r--r--src/Coxeter_triangulation/concept/IntersectionOracle.h104
-rw-r--r--src/Coxeter_triangulation/concept/SimplexInCoxeterTriangulation.h81
-rw-r--r--src/Coxeter_triangulation/concept/TriangulationForManifoldTracing.h56
-rw-r--r--src/Coxeter_triangulation/doc/custom_function.pngbin0 -> 256301 bytes
-rw-r--r--src/Coxeter_triangulation/doc/flat_torus_with_boundary.pngbin0 -> 222900 bytes
-rw-r--r--src/Coxeter_triangulation/doc/intro_coxeter_triangulation.h240
-rw-r--r--src/Coxeter_triangulation/doc/manifold_tracing_on_custom_function_example.pngbin0 -> 589120 bytes
-rw-r--r--src/Coxeter_triangulation/doc/two_triangulations.pngbin0 -> 39507 bytes
-rw-r--r--src/Coxeter_triangulation/example/CMakeLists.txt19
-rw-r--r--src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold.cpp55
-rw-r--r--src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold_for_doc.txt26
-rw-r--r--src/Coxeter_triangulation/example/manifold_tracing_custom_function.cpp87
-rw-r--r--src/Coxeter_triangulation/example/manifold_tracing_flat_torus_with_boundary.cpp72
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation.h77
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h340
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Hasse_diagram_cell.h285
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Query_result.h40
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Freudenthal_triangulation.h219
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Cartesian_product.h157
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Constant_function.h64
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Embed_in_Rd.h93
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_Sm_in_Rd.h110
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h91
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_chair_in_R3.h80
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_iron_in_R3.h69
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_lemniscate_revolution_in_R3.h85
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h79
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_torus_in_R3.h71
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_whitney_umbrella_in_R3.h78
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Linear_transformation.h88
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Negation.h84
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/PL_approximation.h111
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Translate.h89
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/random_orthogonal_matrix.h72
-rw-r--r--src/Coxeter_triangulation/include/gudhi/IO/Mesh_medit.h60
-rw-r--r--src/Coxeter_triangulation/include/gudhi/IO/build_mesh_from_cell_complex.h171
-rw-r--r--src/Coxeter_triangulation/include/gudhi/IO/output_debug_traces_to_html.h550
-rw-r--r--src/Coxeter_triangulation/include/gudhi/IO/output_meshes_to_medit.h154
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Implicit_manifold_intersection_oracle.h261
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Manifold_tracing.h270
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation.h216
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Combination_iterator.h83
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h114
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Ordered_set_partition_iterator.h93
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutahedral_representation_iterators.h254
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutation_iterator.h120
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Set_partition_iterator.h111
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Simplex_comparator.h54
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Size_range.h73
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/face_from_indices.h66
-rw-r--r--src/Coxeter_triangulation/test/CMakeLists.txt30
-rw-r--r--src/Coxeter_triangulation/test/cell_complex_test.cpp59
-rw-r--r--src/Coxeter_triangulation/test/freud_triang_test.cpp114
-rw-r--r--src/Coxeter_triangulation/test/function_test.cpp158
-rw-r--r--src/Coxeter_triangulation/test/manifold_tracing_test.cpp62
-rw-r--r--src/Coxeter_triangulation/test/oracle_test.cpp56
-rw-r--r--src/Coxeter_triangulation/test/perm_rep_test.cpp61
-rw-r--r--src/Coxeter_triangulation/test/random_orthogonal_matrix_function_test.cpp36
-rw-r--r--src/Doxyfile.in491
-rw-r--r--src/Nerve_GIC/doc/Intro_graph_induced_complex.h10
-rw-r--r--src/Persistent_cohomology/doc/Intro_persistent_cohomology.h14
-rw-r--r--src/Persistent_cohomology/example/CMakeLists.txt6
-rw-r--r--src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h2
-rw-r--r--src/Persistent_cohomology/include/gudhi/Persistent_cohomology/Field_Zp.h18
-rw-r--r--src/Persistent_cohomology/test/persistent_cohomology_unit_test.cpp176
-rw-r--r--src/Persistent_cohomology/test/persistent_cohomology_unit_test_multi_field.cpp84
-rw-r--r--src/Rips_complex/doc/Intro_rips_complex.h24
-rw-r--r--src/Rips_complex/include/gudhi/Sparse_rips_complex.h115
-rw-r--r--src/Simplex_tree/doc/Intro_simplex_tree.h8
-rw-r--r--src/Simplex_tree/example/CMakeLists.txt2
-rw-r--r--src/Simplex_tree/example/README73
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree.h19
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker.h9
-rw-r--r--src/Spatial_searching/doc/Intro_spatial_searching.h2
-rw-r--r--src/Spatial_searching/include/gudhi/Kd_tree_search.h2
-rw-r--r--src/Subsampling/doc/Intro_subsampling.h6
-rw-r--r--src/Subsampling/include/gudhi/choose_n_farthest_points.h55
-rw-r--r--src/Subsampling/test/test_choose_n_farthest_points.cpp5
-rw-r--r--src/Tangential_complex/benchmark/benchmark_tc.cpp2
-rw-r--r--src/Tangential_complex/doc/Intro_tangential_complex.h4
-rw-r--r--src/Toplex_map/benchmark/CMakeLists.txt4
-rw-r--r--src/Witness_complex/doc/Witness_complex_doc.h4
-rw-r--r--src/cmake/modules/GUDHI_doxygen_target.cmake47
-rw-r--r--src/cmake/modules/GUDHI_third_party_libraries.cmake3
-rw-r--r--src/cmake/modules/GUDHI_user_version_target.cmake11
-rw-r--r--src/common/benchmark/CMakeLists.txt4
-rw-r--r--src/common/doc/examples.h224
-rw-r--r--src/common/doc/header.html1
-rw-r--r--src/common/doc/installation.h177
-rw-r--r--src/common/doc/main_page.md28
-rw-r--r--src/common/include/gudhi/Points_3D_off_io.h4
-rw-r--r--src/common/include/gudhi/Points_off_io.h4
-rw-r--r--src/common/include/gudhi/random_point_generators.h65
-rw-r--r--src/common/include/gudhi/reader_utils.h6
-rw-r--r--src/common/test/test_distance_matrix_reader.cpp2
-rw-r--r--src/common/utilities/off_file_from_shape_generator.cpp2
-rw-r--r--src/python/CMakeLists.txt231
-rw-r--r--src/python/doc/_templates/layout.html1
-rw-r--r--src/python/doc/alpha_complex_user.rst5
-rwxr-xr-xsrc/python/doc/conf.py5
-rw-r--r--src/python/doc/datasets_generators.inc14
-rw-r--r--src/python/doc/datasets_generators.rst105
-rw-r--r--src/python/doc/examples.rst1
-rw-r--r--src/python/doc/img/sphere_3d.pngbin0 -> 529148 bytes
-rw-r--r--src/python/doc/index.rst5
-rw-r--r--src/python/doc/installation.rst33
-rw-r--r--src/python/doc/wasserstein_distance_user.rst29
-rwxr-xr-xsrc/python/example/alpha_complex_diagram_persistence_from_off_file_example.py23
-rw-r--r--src/python/example/alpha_complex_from_generated_points_on_sphere_example.py35
-rwxr-xr-xsrc/python/example/alpha_complex_from_points_example.py2
-rwxr-xr-xsrc/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py5
-rw-r--r--src/python/gudhi/clustering/tomato.py4
-rw-r--r--src/python/gudhi/cubical_complex.pyx12
-rw-r--r--src/python/gudhi/datasets/__init__.py0
-rw-r--r--src/python/gudhi/datasets/generators/__init__.py0
-rw-r--r--src/python/gudhi/datasets/generators/_points.cc121
-rw-r--r--src/python/gudhi/datasets/generators/points.py59
-rw-r--r--src/python/gudhi/periodic_cubical_complex.pyx12
-rw-r--r--src/python/gudhi/point_cloud/knn.py12
-rw-r--r--src/python/gudhi/representations/vector_methods.py80
-rw-r--r--src/python/gudhi/simplex_tree.pxd4
-rw-r--r--src/python/gudhi/simplex_tree.pyx33
-rw-r--r--src/python/gudhi/wasserstein/wasserstein.py222
-rw-r--r--src/python/pyproject.toml3
-rw-r--r--src/python/setup.py.in11
-rwxr-xr-xsrc/python/test/test_cubical_complex.py25
-rwxr-xr-xsrc/python/test/test_datasets_generators.py39
-rwxr-xr-xsrc/python/test/test_dtm.py12
-rwxr-xr-xsrc/python/test/test_reader_utils.py2
-rwxr-xr-xsrc/python/test/test_representations.py71
-rwxr-xr-xsrc/python/test/test_rips_complex.py21
-rwxr-xr-xsrc/python/test/test_simplex_tree.py44
-rwxr-xr-xsrc/python/test/test_tomato.py2
-rwxr-xr-xsrc/python/test/test_wasserstein_distance.py109
173 files changed, 8849 insertions, 1418 deletions
diff --git a/.appveyor.yml b/.appveyor.yml
index a257debc..33458a28 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -1,5 +1,5 @@
image:
- - Visual Studio 2017
+ - Visual Studio 2019
build:
parallel: true
@@ -10,11 +10,10 @@ configuration:
environment:
# update the vcpkg cache even if build fails
- APPVEYOR_SAVE_CACHE_ON_ERROR: true
- PYTHON: "C:\\Python37-x64"
- CMAKE_GMP_FLAGS: -DGMP_INCLUDE_DIR="c:/Tools/vcpkg/installed/x64-windows/include" -DGMP_LIBRARIES="c:/Tools/vcpkg/installed/x64-windows/lib/mpir.lib"
- CMAKE_MPFR_FLAGS: -DMPFR_INCLUDE_DIR="c:/Tools/vcpkg/installed/x64-windows/include" -DMPFR_LIBRARIES="c:/Tools/vcpkg/installed/x64-windows/lib/mpfr.lib"
- CMAKE_VCPKG_FLAGS: -DCMAKE_TOOLCHAIN_FILE=c:/Tools/vcpkg/scripts/buildsystems/vcpkg.cmake
+ # APPVEYOR_SAVE_CACHE_ON_ERROR: true
+ PYTHON: "C:\\Python39-x64"
+ PYTHONPATH: "C:\\Python39-x64\\lib\\site-packages"
+ CMAKE_VCPKG_FLAGS: -DVCPKG_TARGET_TRIPLET=x64-windows -DCMAKE_TOOLCHAIN_FILE=c:\Tools\vcpkg\scripts\buildsystems\vcpkg.cmake
matrix:
- target: Examples
@@ -30,38 +29,52 @@ environment:
CMAKE_FLAGS: -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=ON
-cache:
- - c:\Tools\vcpkg\installed
- - '%LOCALAPPDATA%\pip\Cache'
+#cache:
+# - c:\Tools\vcpkg\installed
+# - '%LOCALAPPDATA%\pip\Cache'
init:
- echo %target%
-# tbb:x64-windows
install:
- git submodule update --init
- - vcpkg install boost-disjoint-sets:x64-windows boost-serialization:x64-windows boost-date-time:x64-windows boost-system:x64-windows boost-filesystem:x64-windows boost-units:x64-windows boost-thread:x64-windows boost-program-options:x64-windows eigen3:x64-windows mpfr:x64-windows mpir:x64-windows cgal:x64-windows
- - SET PATH=c:\Tools\vcpkg\installed\x64-windows\bin;%PATH%
- - SET PATH=%PYTHON%;%PYTHON%\Scripts;%PYTHON%\Library\bin;%PATH%
- - SET PYTHONPATH=%PYTHON%\\Lib\\site-packages;%PYTHONPATH%
- - CALL "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvarsall.bat" amd64
+ - vcpkg update
+ - vcpkg remove --outdated
+ - vcpkg upgrade --no-dry-run
+ - vcpkg install boost-filesystem:x64-windows boost-test:x64-windows boost-program-options:x64-windows tbb:x64-windows eigen3:x64-windows cgal:x64-windows
+ - dir "C:\Tools\vcpkg\installed\x64-windows\bin\"
+ - vcpkg integrate install
+ - CALL "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" amd64
+ - "set PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
- python --version
- pip --version
- - python -m pip install --user --upgrade pip
- - python -m pip install --user -r .github/build-requirements.txt
+ - python -m pip install --upgrade pip
+ - python -m pip install --upgrade setuptools
+ - python -m pip install -r ext\gudhi-deploy\build-requirements.txt
# No PyKeOps on windows, let's workaround this one.
- - for /F "tokens=*" %%A in (.github/test-requirements.txt) do python -m pip install --user %%A
+ - for /F "tokens=*" %%A in (ext\gudhi-deploy\test-requirements.txt) do python -m pip install %%A
+ - dir "c:\python39-x64\lib\site-packages"
+ - dir "%LOCALAPPDATA%\pip\Cache"
+ - python -c "from scipy import spatial; print(spatial.cKDTree)"
build_script:
- mkdir build
- cd build
- - cmake -G "Visual Studio 15 2017 Win64" %CMAKE_FLAGS% %CMAKE_GMP_FLAGS% %CMAKE_MPFR_FLAGS% %CMAKE_VCPKG_FLAGS% ..
+ - cmake -G "Visual Studio 16 2019" -A x64 -DCMAKE_BUILD_TYPE=Release %CMAKE_FLAGS% %CMAKE_VCPKG_FLAGS% ..
- if [%target%]==[Python] (
- cd src/python &
+ cd src\python &
+ dir . &
type setup.py &
- MSBuild Cython.sln /m /p:Configuration=Release /p:Platform=x64 &
+ copy "C:\Tools\vcpkg\installed\x64-windows\bin\mpfr-6.dll" ".\gudhi\" &
+ copy "C:\Tools\vcpkg\installed\x64-windows\bin\gmp.dll" ".\gudhi\" &
+ copy "C:\Tools\vcpkg\installed\x64-windows\bin\tbb.dll" ".\gudhi\" &
+ copy "C:\Tools\vcpkg\installed\x64-windows\bin\tbbmalloc.dll" ".\gudhi\" &
+ python setup.py build_ext --inplace &
+ SET PYTHONPATH=%CD%;%PYTHONPATH% &
+ echo %PYTHONPATH% &
ctest -j 1 --output-on-failure -C Release
) else (
+ dir . &
MSBuild GUDHIdev.sln /m /p:Configuration=Release /p:Platform=x64 &
ctest -j 1 --output-on-failure -C Release -E diff_files
)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index d95b8d36..f6a875dd 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -1,7 +1,11 @@
version: 2.0
jobs:
+
+### With all third parties
+
examples:
docker:
+ # cf. https://github.com/GUDHI/gudhi-deploy/blob/main/Dockerfile_for_circleci_image
- image: gudhi/ci_for_gudhi:latest
steps:
- checkout
@@ -86,17 +90,26 @@ jobs:
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=OFF -DUSER_VERSION_DIR=version ..
+ make user_version
+ cd version
+ mkdir build
+ cd build
+ cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=OFF ..
make doxygen 2>&1 | tee dox.log
grep warning dox.log
- cp dox.log version/doc/html/
- cp -R version/doc/html /tmp/doxygen
+ cp dox.log html/
+ cp -R html /tmp/doxygen
- store_artifacts:
path: /tmp/doxygen
destination: doxygen
+
+### With all third parties, except CGAL and Eigen
+
examples_without_cgal_eigen:
docker:
+ # cf. https://github.com/GUDHI/gudhi-deploy/blob/main/Dockerfile_for_circleci_image_without_cgal
- image: gudhi/ci_for_gudhi_wo_cgal:latest
steps:
- checkout
@@ -154,6 +167,9 @@ jobs:
python3 setup.py build_ext --inplace
ctest --output-on-failure
+
+### With all third parties, except CGAL
+
examples_without_cgal:
docker:
- image: gudhi/ci_for_gudhi_wo_cgal:latest
diff --git a/.github/build-requirements.txt b/.github/build-requirements.txt
deleted file mode 100644
index 7de60d23..00000000
--- a/.github/build-requirements.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-setuptools
-wheel
-numpy
-Cython
-pybind11 \ No newline at end of file
diff --git a/.github/for_maintainers/new_gudhi_version_creation.md b/.github/for_maintainers/new_gudhi_version_creation.md
index 4de81b8a..19ef168e 100644
--- a/.github/for_maintainers/new_gudhi_version_creation.md
+++ b/.github/for_maintainers/new_gudhi_version_creation.md
@@ -26,7 +26,7 @@ md5sum gudhi.@GUDHI_VERSION@.tar.gz > md5sum.txt
sha256sum gudhi.@GUDHI_VERSION@.tar.gz > sha256sum.txt
sha512sum gudhi.@GUDHI_VERSION@.tar.gz > sha512sum.txt
-make -j 4 all && ctest -j 4 --output-on-failure
+make && ctest --output-on-failure
```
***[Check there are no error]***
@@ -34,16 +34,21 @@ make -j 4 all && ctest -j 4 --output-on-failure
## Create the documentation
```bash
mkdir gudhi.doc.@GUDHI_VERSION@
-make doxygen 2>&1 | tee dox.log && grep warning dox.log
```
***[Check there are no error and the warnings]***
```bash
-cp -R gudhi.@GUDHI_VERSION@/doc/html gudhi.doc.@GUDHI_VERSION@/cpp
cd gudhi.@GUDHI_VERSION@
rm -rf build; mkdir build; cd build
cmake -DCMAKE_BUILD_TYPE=Release -DCGAL_DIR=/your/path/to/CGAL -DWITH_GUDHI_EXAMPLE=ON -DPython_ADDITIONAL_VERSIONS=3 ..
+make doxygen 2>&1 | tee dox.log && grep warning dox.log
+```
+
+***[Check there are no error and the warnings]***
+
+```bash
+cp -R html ../../gudhi.doc.@GUDHI_VERSION@/cpp
export LC_ALL=en_US.UTF-8 # cf. bug https://github.com/GUDHI/gudhi-devel/issues/111
make sphinx
```
@@ -56,27 +61,25 @@ cd ../..
tar -czvf gudhi.doc.@GUDHI_VERSION@.tar.gz gudhi.doc.@GUDHI_VERSION@
cd gudhi.@GUDHI_VERSION@/build
-make -j 4 all && ctest -j 4 --output-on-failure
+make && ctest --output-on-failure
```
***[Check there are no error]***
## Upload the documentation
-Upload by ftp the content of the directory gudhi.doc.@GUDHI_VERSION@/cpp in a new directory on ForgeLogin@scm.gforge.inria.fr:/home/groups/gudhi/htdocs/doc/@GUDHI_VERSION@
+[GUDHI GitHub pages](https://gudhi.github.io/) is only used as a _"qualification"_ web hosting service.
+The _"production"_ web hosting service is https://files.inria.fr (cf. [this doc](https://doc-si.inria.fr/display/SU/Espace+web)
+or [this one](https://www.nextinpact.com/article/30325/109058-se-connecter-a-serveur-webdav-sous-linux-macos-ou-windows)).
-Upload by ftp the content of the directory gudhi.doc.@GUDHI_VERSION@/python in a new directory on ForgeLogin@scm.gforge.inria.fr:/home/groups/gudhi/htdocs/python/@GUDHI_VERSION@
+Upload the content of the directory gudhi.doc.@GUDHI_VERSION@/cpp in a new directory on gudhi WebDAV in doc/@GUDHI_VERSION@
+Delete the directory doc/latest on gudhi WebDAV.
+Copy gudhi WebDAV doc/@GUDHI_VERSION@ as doc/latest (no symbolic link with WebDAV).
+
+Upload the content of the directory gudhi.doc.@GUDHI_VERSION@/python in a new directory on gudhi WebDAV in python/@GUDHI_VERSION@
+Delete the directory python/latest on gudhi WebDAV.
+Copy gudhi WebDAV python/@GUDHI_VERSION@ as python/latest (no symbolic link with WebDAV).
-Through ssh, make the **latest** link to your new version of the documentation:
-```bash
-ssh ForgeLogin@scm.gforge.inria.fr
-cd /home/groups/gudhi/htdocs/doc
-rm latest
-ln -s @GUDHI_VERSION@ latest
-cd /home/groups/gudhi/htdocs/python
-rm latest
-ln -s @GUDHI_VERSION@ latest
-```
## Put a version label on files
@@ -90,7 +93,8 @@ ln -s @GUDHI_VERSION@ latest
## Pip package
-The pip package construction shall be started on release creation, you just have to check [gudhi github actions](https://github.com/GUDHI/gudhi-devel/actions) results.
+The pip package construction shall be started on release creation, you just have to check
+[gudhi github actions](https://github.com/GUDHI/gudhi-devel/actions) results.
The version number must be conform to [pep440](https://www.python.org/dev/peps/pep-0440/#pre-releases)
## Conda package
@@ -105,30 +109,22 @@ If you need to update conda tools (conda-build, conda-smithy, ...), add a commen
## Docker image
-You have to modify the `Dockerfile_gudhi_installation` at the root of this repository in order to use the last release, cf. lines:
+You have to modify the
+[Dockerfile_gudhi_installation](https://github.com/GUDHI/gudhi-deploy/blob/main/Dockerfile_for_gudhi_installation)
+in gudhi-deploy repository in order to use the last release, cf. lines:
```
...
-RUN curl -LO "https://github.com/GUDHI/gudhi-devel/releases/download/tags%2Fgudhi-release-@GUDHI_VERSION@/gudhi.@GUDHI_VERSION@.tar.gz" \
-&& tar xf gudhi.@GUDHI_VERSION@.tar.gz \
-&& cd gudhi.@GUDHI_VERSION@ \
+ARG GUDHI_VERSION="3.X.X"
...
```
-Build and push images to docker hub:
-```
-docker build -f Dockerfile_gudhi_installation -t gudhi/latest_gudhi_version:@GUDHI_VERSION@ .
-docker run --rm -it gudhi/latest_gudhi_version:@GUDHI_VERSION@
-```
-
-***[Check there are no error with utils and python version]***
+After pushing the changes the docker image build will be automatically performed for
+[latest_gudhi_version](https://hub.docker.com/repository/docker/gudhi/latest_gudhi_version)
+docker image on docker hub.
-```
-docker tag gudhi/latest_gudhi_version:@GUDHI_VERSION@ gudhi/latest_gudhi_version:latest
-docker push gudhi/latest_gudhi_version:latest
-docker push gudhi/latest_gudhi_version:@GUDHI_VERSION@
-```
+***[Check there are no error]***
## Mail sending
Send version mail to the following lists :
-* gudhi-devel@lists.gforge.inria.fr
-* gudhi-users@lists.gforge.inria.fr (not for release candidate)
+* gudhi-devel@inria.fr
+* gudhi-users@inria.fr (not for release candidate)
diff --git a/.github/how_to_use_github_to_contribute_to_gudhi.md b/.github/how_to_use_github_to_contribute_to_gudhi.md
index 747ca39b..738c1ce9 100644
--- a/.github/how_to_use_github_to_contribute_to_gudhi.md
+++ b/.github/how_to_use_github_to_contribute_to_gudhi.md
@@ -33,6 +33,9 @@ Hera, used for Wasserstein distance, is available on an external git repository.
git submodule update --init
```
+[gudhi-deploy](https://github.com/GUDHI/gudhi-deploy) is used for Continuous Integration python
+requirements and will also be downloaded by the above command.
+
## Configuring a remote for a fork
```bash
git remote add upstream https://github.com/GUDHI/gudhi-devel.git
diff --git a/.github/next_release.md b/.github/next_release.md
index 26143b0e..50207638 100644
--- a/.github/next_release.md
+++ b/.github/next_release.md
@@ -1,19 +1,20 @@
We are pleased to announce the release 3.5.0 of the GUDHI library.
-As a major new feature, the GUDHI library now offers ...
+As a major new feature, the GUDHI library now offers Coxeter triangulations and points generators.
+The support for python 3.10 is available.
We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz).
-Below is a list of changes made since GUDHI 3.4.0:
+Below is a list of changes made since GUDHI 3.4.1:
-- [Module](link)
- - ...
+- [Coxeter triangulation](https://gudhi.inria.fr/doc/latest/group__coxeter__triangulation.html)
+ - constructs a piecewise-linear approximation of an m-dimensional smooth manifold embedded in R^d using an ambient triangulation.
-- [Module](link)
- - ...
+- [Datasets generators](https://gudhi.inria.fr/python/latest/datasets_generators.html)
+ - the python module `points` enables the generation of points on a sphere or a flat torus.
- Miscellaneous
- - The [list of bugs that were solved since GUDHI-3.4.0](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.5.0+is%3Aclosed) is available on GitHub.
+ - The [list of bugs that were solved since GUDHI-3.4.1](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.5.0+is%3Aclosed) is available on GitHub.
All modules are distributed under the terms of the MIT license.
However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details.
diff --git a/.github/test-requirements.txt b/.github/test-requirements.txt
deleted file mode 100644
index d0803574..00000000
--- a/.github/test-requirements.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-pytest
-pytest-cov
-sphinx
-sphinxcontrib-bibtex==1.0.0
-sphinx-paramlinks
-matplotlib
-scipy
-scikit-learn
-POT
-tensorflow
-tensorflow-addons
-torch<1.5
-pykeops
-hnswlib
-eagerpy
diff --git a/.github/workflows/pip-build-linux.yml b/.github/workflows/pip-build-linux.yml
index cf8ddadf..a2b4f085 100644
--- a/.github/workflows/pip-build-linux.yml
+++ b/.github/workflows/pip-build-linux.yml
@@ -4,18 +4,24 @@ on: [push, pull_request]
jobs:
build:
- name: build pip wheels
+ name: build pip wheel
runs-on: ubuntu-latest
+ # cf. https://github.com/GUDHI/gudhi-deploy/blob/main/Dockerfile_for_pip
container: gudhi/pip_for_gudhi
steps:
- uses: actions/checkout@v1
with:
submodules: true
- - name: Build wheels for Python 3.9
+ - name: Build wheel for Python 3.10
run: |
- mkdir build_39
- cd build_39
- cmake -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=$PYTHON39/bin/python ..
+ mkdir build_310
+ cd build_310
+ cmake -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=$PYTHON310/bin/python ..
cd src/python
- $PYTHON39/bin/python setup.py bdist_wheel
- auditwheel repair dist/*.whl \ No newline at end of file
+ $PYTHON310/bin/python setup.py bdist_wheel
+ auditwheel repair dist/*.whl
+ - name: Install and test wheel for Python 3.10
+ run: |
+ $PYTHON310/bin/python -m pip install --user pytest build_310/src/python/dist/*.whl
+ $PYTHON310/bin/python -c "import gudhi; print(gudhi.__version__)"
+ $PYTHON310/bin/python -m pytest src/python/test/test_alpha_complex.py
diff --git a/.github/workflows/pip-build-osx.yml b/.github/workflows/pip-build-osx.yml
index 50b8b09c..99d515ff 100644
--- a/.github/workflows/pip-build-osx.yml
+++ b/.github/workflows/pip-build-osx.yml
@@ -8,7 +8,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ['3.9']
+ python-version: ['3.10']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v1
@@ -22,7 +22,7 @@ jobs:
run: |
brew update || true
brew install boost eigen gmp mpfr cgal || true
- python -m pip install --user -r .github/build-requirements.txt
+ python -m pip install --user -r ext/gudhi-deploy/build-requirements.txt
python -m pip install --user twine delocate
- name: Build python wheel
run: |
@@ -32,3 +32,8 @@ jobs:
cmake -DCMAKE_BUILD_TYPE=Release -DPython_ADDITIONAL_VERSIONS=3 ..
cd src/python
python setup.py bdist_wheel
+ - name: Install and test python wheel
+ run: |
+ python -m pip install --user pytest build/src/python/dist/*.whl
+ python -c "import gudhi; print(gudhi.__version__)"
+ python -m pytest src/python/test/test_alpha_complex.py
diff --git a/.github/workflows/pip-build-windows.yml b/.github/workflows/pip-build-windows.yml
index aacbbc52..954b59d5 100644
--- a/.github/workflows/pip-build-windows.yml
+++ b/.github/workflows/pip-build-windows.yml
@@ -8,7 +8,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ['3.9']
+ python-version: ['3.10']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v1
@@ -20,18 +20,30 @@ jobs:
architecture: x64
- name: Install dependencies
run: |
- vcpkg update
- vcpkg upgrade --no-dry-run
- type c:/vcpkg/ports/cgal/portfile.cmake
+ set VCPKG_BUILD_TYPE=release
vcpkg install eigen3 cgal --triplet x64-windows
- python -m pip install --user -r .github/build-requirements.txt
+ vcpkg version
+ ls "C:\vcpkg\installed\x64-windows\bin\"
+ python -m pip install --user -r .\ext\gudhi-deploy\build-requirements.txt
python -m pip list
- - name: Build python wheel
+ - name: Build python wheel and install it
run: |
mkdir build
- cd build
- cmake -DCMAKE_BUILD_TYPE=Release -DGMP_INCLUDE_DIR="c:/vcpkg/installed/x64-windows/include" -DGMP_LIBRARIES="c:/vcpkg/installed/x64-windows/lib/mpir.lib" -DGMP_LIBRARIES_DIR="c:/vcpkg/installed/x64-windows/lib" -DCMAKE_TOOLCHAIN_FILE=C:/vcpkg/scripts/buildsystems/vcpkg.cmake -DVCPKG_TARGET_TRIPLET=x64-windows -DPython_ADDITIONAL_VERSIONS=3 ..
- cd src/python
- cp c:/vcpkg/installed/x64-windows/bin/mpfr.dll gudhi/
- cp c:/vcpkg/installed/x64-windows/bin/mpir.dll gudhi/
- python setup.py bdist_wheel \ No newline at end of file
+ cd ".\build\"
+ cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=c:\vcpkg\scripts\buildsystems\vcpkg.cmake -DVCPKG_TARGET_TRIPLET=x64-windows ..
+ Get-Location
+ dir
+ cd ".\src\python\"
+ cp "C:\vcpkg\installed\x64-windows\bin\mpfr-6.dll" ".\gudhi\"
+ cp "C:\vcpkg\installed\x64-windows\bin\gmp.dll" ".\gudhi\"
+ python setup.py bdist_wheel
+ ls dist
+ cd ".\dist\"
+ Get-ChildItem *.whl | ForEach-Object{python -m pip install --user $_.Name}
+ - name: Test python wheel
+ run: |
+ Get-Location
+ dir
+ python -m pip install --user pytest
+ python -c "import gudhi; print(gudhi.__version__)"
+ python -m pytest ".\src\python\test\test_alpha_complex.py"
diff --git a/.github/workflows/pip-packaging-linux.yml b/.github/workflows/pip-packaging-linux.yml
index 469c3b3b..98173ed3 100644
--- a/.github/workflows/pip-packaging-linux.yml
+++ b/.github/workflows/pip-packaging-linux.yml
@@ -6,22 +6,15 @@ on:
jobs:
build:
- name: build pip wheels
+ name: build pip wheel
runs-on: ubuntu-latest
+ # cf. https://github.com/GUDHI/gudhi-deploy/blob/main/Dockerfile_for_pip
container: gudhi/pip_for_gudhi
steps:
- uses: actions/checkout@v1
with:
submodules: true
- - name: Build wheels for Python 3.5
- run: |
- mkdir build_35
- cd build_35
- cmake -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=$PYTHON35/bin/python ..
- cd src/python
- $PYTHON35/bin/python setup.py bdist_wheel
- auditwheel repair dist/*.whl
- - name: Build wheels for Python 3.6
+ - name: Build wheel for Python 3.6
run: |
mkdir build_36
cd build_36
@@ -29,7 +22,12 @@ jobs:
cd src/python
$PYTHON36/bin/python setup.py bdist_wheel
auditwheel repair dist/*.whl
- - name: Build wheels for Python 3.7
+ - name: Install and test wheel for Python 3.6
+ run: |
+ $PYTHON36/bin/python -m pip install --user pytest build_36/src/python/dist/*.whl
+ $PYTHON36/bin/python -c "import gudhi; print(gudhi.__version__)"
+ $PYTHON36/bin/python -m pytest src/python/test/test_alpha_complex.py
+ - name: Build wheel for Python 3.7
run: |
mkdir build_37
cd build_37
@@ -37,7 +35,12 @@ jobs:
cd src/python
$PYTHON37/bin/python setup.py bdist_wheel
auditwheel repair dist/*.whl
- - name: Build wheels for Python 3.8
+ - name: Install and test wheel for Python 3.7
+ run: |
+ $PYTHON37/bin/python -m pip install --user pytest build_37/src/python/dist/*.whl
+ $PYTHON37/bin/python -c "import gudhi; print(gudhi.__version__)"
+ $PYTHON37/bin/python -m pytest src/python/test/test_alpha_complex.py
+ - name: Build wheel for Python 3.8
run: |
mkdir build_38
cd build_38
@@ -45,7 +48,12 @@ jobs:
cd src/python
$PYTHON38/bin/python setup.py bdist_wheel
auditwheel repair dist/*.whl
- - name: Build wheels for Python 3.9
+ - name: Install and test wheel for Python 3.8
+ run: |
+ $PYTHON38/bin/python -m pip install --user pytest build_38/src/python/dist/*.whl
+ $PYTHON38/bin/python -c "import gudhi; print(gudhi.__version__)"
+ $PYTHON38/bin/python -m pytest src/python/test/test_alpha_complex.py
+ - name: Build wheel for Python 3.9
run: |
mkdir build_39
cd build_39
@@ -53,13 +61,31 @@ jobs:
cd src/python
$PYTHON39/bin/python setup.py bdist_wheel
auditwheel repair dist/*.whl
+ - name: Install and test wheel for Python 3.9
+ run: |
+ $PYTHON39/bin/python -m pip install --user pytest build_39/src/python/dist/*.whl
+ $PYTHON39/bin/python -c "import gudhi; print(gudhi.__version__)"
+ $PYTHON39/bin/python -m pytest src/python/test/test_alpha_complex.py
+ - name: Build wheel for Python 3.10
+ run: |
+ mkdir build_310
+ cd build_310
+ cmake -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=$PYTHON310/bin/python ..
+ cd src/python
+ $PYTHON310/bin/python setup.py bdist_wheel
+ auditwheel repair dist/*.whl
+ - name: Install and test wheel for Python 3.10
+ run: |
+ $PYTHON310/bin/python -m pip install --user pytest build_310/src/python/dist/*.whl
+ $PYTHON310/bin/python -c "import gudhi; print(gudhi.__version__)"
+ $PYTHON310/bin/python -m pytest src/python/test/test_alpha_complex.py
- name: Publish on PyPi
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
run: |
- $PYTHON39/bin/python -m twine upload build_35/src/python/wheelhouse/*
- $PYTHON39/bin/python -m twine upload build_36/src/python/wheelhouse/*
- $PYTHON39/bin/python -m twine upload build_37/src/python/wheelhouse/*
- $PYTHON39/bin/python -m twine upload build_38/src/python/wheelhouse/*
- $PYTHON39/bin/python -m twine upload build_39/src/python/wheelhouse/* \ No newline at end of file
+ $PYTHON36/bin/python -m twine upload build_36/src/python/wheelhouse/*
+ $PYTHON36/bin/python -m twine upload build_37/src/python/wheelhouse/*
+ $PYTHON36/bin/python -m twine upload build_38/src/python/wheelhouse/*
+ $PYTHON36/bin/python -m twine upload build_39/src/python/wheelhouse/*
+ $PYTHON36/bin/python -m twine upload build_310/src/python/wheelhouse/*
diff --git a/.github/workflows/pip-packaging-osx.yml b/.github/workflows/pip-packaging-osx.yml
index 46441e65..7417300a 100644
--- a/.github/workflows/pip-packaging-osx.yml
+++ b/.github/workflows/pip-packaging-osx.yml
@@ -10,7 +10,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ['3.5', '3.6', '3.7', '3.8', '3.9']
+ python-version: ['3.7', '3.8', '3.9', '3.10']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v1
@@ -24,7 +24,7 @@ jobs:
run: |
brew update || true
brew install boost eigen gmp mpfr cgal || true
- python -m pip install --user -r .github/build-requirements.txt
+ python -m pip install --user -r ext/gudhi-deploy/build-requirements.txt
python -m pip install --user twine delocate
- name: Build python wheel
run: |
@@ -34,6 +34,11 @@ jobs:
cmake -DCMAKE_BUILD_TYPE=Release -DPython_ADDITIONAL_VERSIONS=3 ..
cd src/python
python setup.py bdist_wheel
+ - name: Install and test python wheel
+ run: |
+ python -m pip install --user pytest build/src/python/dist/*.whl
+ python -c "import gudhi; print(gudhi.__version__)"
+ python -m pytest src/python/test/test_alpha_complex.py
- name: Publish on PyPi
env:
TWINE_USERNAME: __token__
diff --git a/.github/workflows/pip-packaging-windows.yml b/.github/workflows/pip-packaging-windows.yml
index 3a751486..962ae68a 100644
--- a/.github/workflows/pip-packaging-windows.yml
+++ b/.github/workflows/pip-packaging-windows.yml
@@ -10,7 +10,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ['3.5', '3.6', '3.7', '3.8', '3.9']
+ python-version: ['3.6', '3.7', '3.8', '3.9', '3.10']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v1
@@ -22,22 +22,32 @@ jobs:
architecture: x64
- name: Install dependencies
run: |
- vcpkg update
- vcpkg upgrade --no-dry-run
- type c:/vcpkg/ports/cgal/portfile.cmake
+ set VCPKG_BUILD_TYPE=release
vcpkg install eigen3 cgal --triplet x64-windows
- python -m pip install --user -r .github/build-requirements.txt
+ vcpkg version
+ ls "C:\vcpkg\installed\x64-windows\bin\"
+ python -m pip install --user -r .\ext\gudhi-deploy\build-requirements.txt
python -m pip install --user twine
python -m pip list
- - name: Build python wheel
+ - name: Build python wheel and install it
run: |
mkdir build
- cd build
- cmake -DCMAKE_BUILD_TYPE=Release -DGMP_INCLUDE_DIR="c:/vcpkg/installed/x64-windows/include" -DGMP_LIBRARIES="c:/vcpkg/installed/x64-windows/lib/mpir.lib" -DGMP_LIBRARIES_DIR="c:/vcpkg/installed/x64-windows/lib" -DCMAKE_TOOLCHAIN_FILE=C:/vcpkg/scripts/buildsystems/vcpkg.cmake -DVCPKG_TARGET_TRIPLET=x64-windows -DPython_ADDITIONAL_VERSIONS=3 ..
- cd src/python
- cp c:/vcpkg/installed/x64-windows/bin/mpfr.dll gudhi/
- cp c:/vcpkg/installed/x64-windows/bin/mpir.dll gudhi/
+ cd ".\build\"
+ cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=c:\vcpkg\scripts\buildsystems\vcpkg.cmake -DVCPKG_TARGET_TRIPLET=x64-windows ..
+ Get-Location
+ dir
+ cd ".\src\python\"
+ cp "C:\vcpkg\installed\x64-windows\bin\mpfr-6.dll" ".\gudhi\"
+ cp "C:\vcpkg\installed\x64-windows\bin\gmp.dll" ".\gudhi\"
python setup.py bdist_wheel
+ ls dist
+ cd ".\dist\"
+ Get-ChildItem *.whl | ForEach-Object{python -m pip install --user $_.Name}
+ - name: Test python wheel
+ run: |
+ python -m pip install --user pytest
+ python -c "import gudhi; print(gudhi.__version__)"
+ python -m pytest ".\src\python\test\test_alpha_complex.py"
- name: Publish on PyPi
env:
TWINE_USERNAME: __token__
diff --git a/.gitmodules b/.gitmodules
index f70c570d..2aa8ad96 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +1,6 @@
[submodule "ext/hera"]
path = ext/hera
url = https://github.com/grey-narn/hera.git
+[submodule "ext/gudhi-deploy"]
+ path = ext/gudhi-deploy
+ url = https://github.com/GUDHI/gudhi-deploy
diff --git a/CMakeGUDHIVersion.txt b/CMakeGUDHIVersion.txt
index 5b0777a6..75be0646 100644
--- a/CMakeGUDHIVersion.txt
+++ b/CMakeGUDHIVersion.txt
@@ -1,8 +1,8 @@
# Must be conform to pep440 - https://www.python.org/dev/peps/pep-0440/#pre-releases
set (GUDHI_MAJOR_VERSION 3)
-set (GUDHI_MINOR_VERSION 4)
+set (GUDHI_MINOR_VERSION 5)
# GUDHI_PATCH_VERSION can be 'ZaN' for Alpha release, 'ZbN' for Beta release, 'ZrcN' for release candidate or 'Z' for a final release.
-set (GUDHI_PATCH_VERSION 1)
+set (GUDHI_PATCH_VERSION 0)
set(GUDHI_VERSION ${GUDHI_MAJOR_VERSION}.${GUDHI_MINOR_VERSION}.${GUDHI_PATCH_VERSION})
message(STATUS "GUDHI version : ${GUDHI_VERSION}")
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4257a025..d0cf6a25 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -27,6 +27,7 @@ add_gudhi_module(Bitmap_cubical_complex)
add_gudhi_module(Bottleneck_distance)
add_gudhi_module(Collapse)
add_gudhi_module(Contraction)
+add_gudhi_module(Coxeter_triangulation)
add_gudhi_module(Cech_complex)
add_gudhi_module(Hasse_complex)
add_gudhi_module(Persistence_representations)
diff --git a/Dockerfile_for_circleci_image b/Dockerfile_for_circleci_image
deleted file mode 100644
index 60c98f66..00000000
--- a/Dockerfile_for_circleci_image
+++ /dev/null
@@ -1,69 +0,0 @@
-FROM ubuntu:20.04
-
-# Update and upgrade distribution
-RUN apt-get update && \
- apt-get upgrade -y
-
-# Tools necessary for installing and configuring Ubuntu
-RUN apt-get install -y \
- apt-utils \
- locales \
- tzdata
-
-# Timezone
-RUN echo "Europe/Paris" | tee /etc/timezone && \
- ln -fs /usr/share/zoneinfo/Europe/Paris /etc/localtime && \
- dpkg-reconfigure -f noninteractive tzdata
-
-# Locale with UTF-8 support
-RUN echo en_US.UTF-8 UTF-8 >> /etc/locale.gen && \
- locale-gen && \
- update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
-ENV LANG en_US.UTF-8
-ENV LANGUAGE en_US:en
-ENV LC_ALL en_US.UTF-8
-
-# Update again
-RUN apt-get update
-
-# Required for Gudhi compilation
-RUN apt-get install -y make \
- git \
- g++ \
- cmake \
- graphviz \
- perl \
- texlive-full \
- biber \
- doxygen \
- libboost-all-dev \
- libeigen3-dev \
- libgmp3-dev \
- libmpfr-dev \
- libtbb-dev \
- locales \
- python3 \
- python3-pip \
- python3-tk \
- python3-grpcio \
- libfreetype6-dev \
- pkg-config \
- curl
-
-RUN curl -LO "https://github.com/CGAL/cgal/releases/download/v5.2/CGAL-5.2.tar.xz" \
- && tar xf CGAL-5.2.tar.xz \
- && mkdir build \
- && cd build \
- && cmake -DCMAKE_BUILD_TYPE=Release ../CGAL-5.2/ \
- && make install \
- && cd .. \
- && rm -rf build CGAL-5.2
-
-ADD .github/build-requirements.txt /
-ADD .github/test-requirements.txt /
-
-RUN pip3 install -r build-requirements.txt
-RUN pip3 --no-cache-dir install -r test-requirements.txt
-
-# apt clean up
-RUN apt-get autoremove && rm -rf /var/lib/apt/lists/*
diff --git a/Dockerfile_for_circleci_image_without_cgal b/Dockerfile_for_circleci_image_without_cgal
deleted file mode 100644
index 7bf96667..00000000
--- a/Dockerfile_for_circleci_image_without_cgal
+++ /dev/null
@@ -1,55 +0,0 @@
-FROM ubuntu:20.04
-
-# Update and upgrade distribution
-RUN apt update && \
- apt upgrade -y
-
-# Tools necessary for installing and configuring Ubuntu
-RUN apt install -y \
- apt-utils \
- locales \
- tzdata
-
-# Timezone
-RUN echo "Europe/Paris" | tee /etc/timezone && \
- ln -fs /usr/share/zoneinfo/Europe/Paris /etc/localtime && \
- dpkg-reconfigure -f noninteractive tzdata
-
-# Locale with UTF-8 support
-RUN echo en_US.UTF-8 UTF-8 >> /etc/locale.gen && \
- locale-gen && \
- update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
-ENV LANG en_US.UTF-8
-ENV LANGUAGE en_US:en
-ENV LC_ALL en_US.UTF-8
-
-# Update again
-RUN apt update
-
-# Required for Gudhi compilation
-RUN apt install -y make \
- git \
- g++ \
- cmake \
- perl \
- libboost-all-dev \
- locales \
- python3 \
- python3-pip \
- python3-tk \
- python3-grpcio \
- libfreetype6-dev \
- pkg-config \
- curl
-
-RUN curl -LO "https://gitlab.com/libeigen/eigen/-/archive/3.3.9/eigen-3.3.9.tar.gz" \
- && tar xf eigen-3.3.9.tar.gz
-
-ADD .github/build-requirements.txt /
-ADD .github/test-requirements.txt /
-
-RUN pip3 install -r build-requirements.txt
-RUN pip3 --no-cache-dir install -r test-requirements.txt
-
-# apt clean up
-RUN apt-get autoremove && rm -rf /var/lib/apt/lists/*
diff --git a/Dockerfile_for_pip b/Dockerfile_for_pip
deleted file mode 100644
index ada39647..00000000
--- a/Dockerfile_for_pip
+++ /dev/null
@@ -1,52 +0,0 @@
-FROM quay.io/pypa/manylinux2014_x86_64
-
-RUN yum -y update && yum -y install \
- wget \
- zlib-devel \
- eigen3-devel \
- mpfr-devel \
- gmp-devel \
- devtoolset-8 \
- && yum clean all
-
-RUN mkdir -p /opt/cmake \
- && wget https://github.com/Kitware/CMake/releases/download/v3.16.2/cmake-3.16.2-Linux-x86_64.sh \
- && sh cmake-3.16.2-Linux-x86_64.sh --skip-license --prefix=/opt/cmake \
- && rm -f cmake-3.16.2-Linux-x86_64.sh
-
-# yum install boost-devel installs boost 1.53 and copy is the only way to install headers only boost
-RUN wget https://dl.bintray.com/boostorg/release/1.73.0/source/boost_1_73_0.tar.gz \
- && tar xf boost_1_73_0.tar.gz \
- && cd boost_1_73_0 \
- && ./bootstrap.sh \
- && ls \
- && cp -r boost /usr/local/include/ \
- && cd .. \
- && rm -rf boost
-
-RUN wget https://github.com/CGAL/cgal/releases/download/v5.2/CGAL-5.2.tar.xz \
- && tar xf CGAL-5.2.tar.xz \
- && mkdir build \
- && cd build \
- && /opt/cmake/bin/cmake -DCMAKE_BUILD_TYPE=Release ../CGAL-5.2/ \
- && make install \
- && cd .. \
- && rm -rf build CGAL-5.2
-
-ADD .github/build-requirements.txt /
-
-RUN /opt/python/cp35-cp35m/bin/pip install -r build-requirements.txt \
- && /opt/python/cp36-cp36m/bin/pip install -r build-requirements.txt\
- && /opt/python/cp37-cp37m/bin/pip install -r build-requirements.txt\
- && /opt/python/cp38-cp38/bin/pip install -r build-requirements.txt\
- && /opt/python/cp39-cp39/bin/pip install -r build-requirements.txt\
- && /opt/python/cp39-cp39/bin/pip install twine
-
-ENV PYTHON35="/opt/python/cp35-cp35m/"
-ENV PYTHON36="/opt/python/cp36-cp36m/"
-ENV PYTHON37="/opt/python/cp37-cp37m/"
-ENV PYTHON38="/opt/python/cp38-cp38/"
-ENV PYTHON39="/opt/python/cp39-cp39/"
-
-ENV PATH="/opt/cmake/bin:${PATH}"
-ENV PATH="/opt/rh/devtoolset-8/root/usr/bin:${PATH}"
diff --git a/Dockerfile_gudhi_installation b/Dockerfile_gudhi_installation
deleted file mode 100644
index b0e46d72..00000000
--- a/Dockerfile_gudhi_installation
+++ /dev/null
@@ -1,80 +0,0 @@
-FROM ubuntu:20.04
-
-# Update and upgrade distribution
-RUN apt-get update && \
- apt-get upgrade -y
-
-# Tools necessary for installing and configuring Ubuntu
-RUN apt-get install -y \
- apt-utils \
- locales \
- tzdata
-
-# Timezone
-RUN echo "Europe/Paris" | tee /etc/timezone && \
- ln -fs /usr/share/zoneinfo/Europe/Paris /etc/localtime && \
- dpkg-reconfigure -f noninteractive tzdata
-
-# Locale with UTF-8 support
-RUN echo en_US.UTF-8 UTF-8 >> /etc/locale.gen && \
- locale-gen && \
- update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
-ENV LANG en_US.UTF-8
-ENV LANGUAGE en_US:en
-ENV LC_ALL en_US.UTF-8
-
-# Update again
-RUN apt-get update
-
-# Required for Gudhi compilation
-RUN apt-get install -y make \
- g++ \
- cmake \
- graphviz \
- perl \
- texlive-bibtex-extra \
- biber \
- libboost-all-dev \
- libeigen3-dev \
- libgmp3-dev \
- libmpfr-dev \
- libtbb-dev \
- libcgal-dev \
- locales \
- python3 \
- python3-pip \
- python3-pytest \
- python3-tk \
- python3-pybind11 \
- libfreetype6-dev \
- pkg-config \
- curl
-
-RUN curl -LO "https://github.com/CGAL/cgal/releases/download/v5.1/CGAL-5.1.tar.xz" \
- && tar xf CGAL-5.1.tar.xz \
- && mkdir build \
- && cd build \
- && cmake -DCMAKE_BUILD_TYPE=Release ../CGAL-5.1/ \
- && make install \
- && cd .. \
- && rm -rf build CGAL-5.1
-
-RUN pip3 install \
- numpy \
- matplotlib \
- scipy \
- Cython \
- POT \
- scikit-learn
-
-# apt clean up
-RUN apt-get autoremove && rm -rf /var/lib/apt/lists/*
-
-RUN curl -LO "https://github.com/GUDHI/gudhi-devel/releases/download/tags%2Fgudhi-release-3.3.0/gudhi.3.3.0.tar.gz" \
-&& tar xf gudhi.3.3.0.tar.gz \
-&& cd gudhi.3.3.0 \
-&& mkdir build && cd build && cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_PYTHON=OFF -DPython_ADDITIONAL_VERSIONS=3 .. \
-&& make all test install \
-&& cmake -DWITH_GUDHI_PYTHON=ON . \
-&& cd python \
-&& python3 setup.py install
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 8e88cab5..a96323fd 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -19,15 +19,16 @@ jobs:
- bash: |
source activate gudhi_build_env
+ git submodule update --init
sudo conda install --yes --quiet --name gudhi_build_env python=$(pythonVersion)
- python -m pip install --user -r .github/build-requirements.txt
- python -m pip install --user -r .github/test-requirements.txt
+ python -m pip install --user -r ext/gudhi-deploy/build-requirements.txt
+ python -m pip install --user -r ext/gudhi-deploy/test-requirements.txt
+ python -m pip uninstall -y pykeops
brew update || true
brew install graphviz doxygen boost eigen gmp mpfr tbb cgal || true
displayName: 'Install build dependencies'
- bash: |
source activate gudhi_build_env
- git submodule update --init
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..
diff --git a/biblio/bibliography.bib b/biblio/bibliography.bib
index 16fa29d0..b5afff52 100644
--- a/biblio/bibliography.bib
+++ b/biblio/bibliography.bib
@@ -15,7 +15,6 @@ title = {{Statistical analysis and parameter selection for Mapper}},
volume = {19},
year = {2018},
url = {http://jmlr.org/papers/v19/17-291.html},
-doi = {10.5555/3291125.3291137}
}
@inproceedings{Dey13,
@@ -1324,3 +1323,18 @@ year = "2011"
doi = {10.4230/LIPIcs.SoCG.2020.19},
annote = {Keywords: Computational Topology, Topological Data Analysis, Edge Collapse, Simple Collapse, Persistent homology}
}
+
+@phdthesis{KachanovichThesis,
+ TITLE = {{Meshing submanifolds using Coxeter triangulations}},
+ AUTHOR = {Kachanovich, Siargey},
+ URL = {https://hal.inria.fr/tel-02419148},
+ NUMBER = {2019AZUR4072},
+ SCHOOL = {{COMUE Universit{\'e} C{\^o}te d'Azur (2015 - 2019)}},
+ YEAR = {2019},
+ MONTH = Oct,
+ KEYWORDS = {Mesh generation ; Coxeter triangulations ; Simplex quality ; Triangulations of the Euclidean space ; Freudenthal-Kuhn triangulations ; G{\'e}n{\'e}ration de maillages ; Triangulations de Coxeter ; Qualit{\'e} des simplexes ; Triangulations de l'espace euclidien ; Triangulations de Freudenthal-Kuhn},
+ TYPE = {Theses},
+ PDF = {https://hal.inria.fr/tel-02419148v2/file/2019AZUR4072.pdf},
+ HAL_ID = {tel-02419148},
+ HAL_VERSION = {v2},
+} \ No newline at end of file
diff --git a/data/persistence_diagram/PD1.pers b/data/persistence_diagram/PD1.pers
deleted file mode 100644
index 404199b4..00000000
--- a/data/persistence_diagram/PD1.pers
+++ /dev/null
@@ -1,3 +0,0 @@
-2.7 3.7
-9.6 14
-34.2 34.974 \ No newline at end of file
diff --git a/data/persistence_diagram/PD2.pers b/data/persistence_diagram/PD2.pers
deleted file mode 100644
index 125d8e4b..00000000
--- a/data/persistence_diagram/PD2.pers
+++ /dev/null
@@ -1,2 +0,0 @@
-2.8 4.45
-9.5 14.1 \ No newline at end of file
diff --git a/ext/gudhi-deploy b/ext/gudhi-deploy
new file mode 160000
+Subproject 290ade1086bedbc96a35df886cadecabbf4072e
diff --git a/ext/hera b/ext/hera
-Subproject b73ed1face2c609958556e6f2b7704bbd8aaa26
+Subproject b528c4067a8aac346eb307d3c23b82d5953cfe2
diff --git a/scripts/cpp_examples_for_doxygen.py b/scripts/cpp_examples_for_doxygen.py
new file mode 100644
index 00000000..5c091c4f
--- /dev/null
+++ b/scripts/cpp_examples_for_doxygen.py
@@ -0,0 +1,16 @@
+import os
+import glob
+
+for gd_mod in glob.glob("src/*/"):
+ mod_files = []
+ for paths in [gd_mod + 'utilities', gd_mod + 'example']:
+ if os.path.isdir(paths):
+ for root, dirs, files in os.walk(paths):
+ for file in files:
+ if file.endswith(".cpp"):
+ mod_files.append(str(os.path.join(root, file)).split(paths)[1][1:])
+ if len(mod_files) > 0:
+ mod = str(gd_mod).split('/')[1]
+ print(' * \section ' + mod + '_example_section ' + mod)
+ for file in mod_files:
+ print(' * @example ' + file)
diff --git a/scripts/create_gudhi_version.sh b/scripts/create_gudhi_version.sh
deleted file mode 100755
index f2a9233f..00000000
--- a/scripts/create_gudhi_version.sh
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/bash
-
-login="vrouvrea"
-version="2.3.0"
-cgaldir="/home/vincent/workspace/CGAL-4.11-HO/build"
-cpucount=7
-
-
-# We start from scripts dir in the dev branch
-cd ..
-RELATIVEURL=`svn info . |grep -F "Relative URL:" | awk '{print $NF}'`
-
-if [ "$RELATIVEURL" != "^/trunk" ]
-then
-echo "Script must be launched in trunk and not in $RELATIVEURL"
-exit
-fi
-
-rm -rf build; mkdir build; cd build; cmake -DCMAKE_BUILD_TYPE=Debug -DDEBUG_TRACES=ON -DCGAL_DIR=${cgaldir} -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_BENCHMARK=ON -DPython_ADDITIONAL_VERSIONS=3 ..
-cmake -DCMAKE_BUILD_TYPE=Debug .
-
-CURRENTDIRECTORY=`pwd`
-export PYTHONPATH=$CURRENTDIRECTORY/src/cython:$PYTHONPATH
-
-make -j ${cpucount} all test
-
-cd ..
-svn st | grep -v GUDHIVersion.cmake | grep "^\?" | awk "{print \$2}" | xargs rm -rf
-
-svn copy svn+ssh://${login}@scm.gforge.inria.fr/svnroot/gudhi/trunk svn+ssh://${login}@scm.gforge.inria.fr/svnroot/gudhi/tags/gudhi-release-${version} \
- -m "Creating a tag of Gudhi release version ${version}."
-
-cd build
-make user_version
-
-userversiondir=`find . -type d -name "*_GUDHI_${version}" | sed 's/\.\///g'`
-echo "User version directory = ${userversiondir}"
-
-tar -czvf ${userversiondir}.tar.gz ${userversiondir}
-
-userdocdir=${userversiondir/GUDHI/GUDHI_DOC}
-echo "User documentation directory = ${userdocdir}"
-mkdir ${userdocdir}
-make doxygen
-
-cp -R ${userversiondir}/doc/html ${userdocdir}/cpp
-cd ${userversiondir}
-rm -rf build; mkdir build; cd build; cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=./installed -DCGAL_DIR=${cgaldir} -DWITH_GUDHI_EXAMPLE=ON -DPython_ADDITIONAL_VERSIONS=3 ..
-
-CURRENTDIRECTORY=`pwd`
-export PYTHONPATH=$CURRENTDIRECTORY/cython:$PYTHONPATH
-
-make sphinx
-
-cp -R cython/sphinx ../../${userdocdir}/python
-cd ../..
-tar -czvf ${userdocdir}.tar.gz ${userdocdir}
-
-cd ${userversiondir}/build
-make -j ${cpucount} all test install
-
-cd ../..
-actualdir=`pwd`
-echo "Library is available at ${actualdir}/${userversiondir}.tar.gz"
-sha256sum ${userversiondir}.tar.gz
-echo "Documentation is available at ${actualdir}/${userdocdir}.tar.gz"
diff --git a/src/Alpha_complex/doc/Intro_alpha_complex.h b/src/Alpha_complex/doc/Intro_alpha_complex.h
index c068b268..5ab23720 100644
--- a/src/Alpha_complex/doc/Intro_alpha_complex.h
+++ b/src/Alpha_complex/doc/Intro_alpha_complex.h
@@ -83,7 +83,7 @@ Table of Contents
*
* Then, it is asked to display information about the simplicial complex.
*
- * \include Alpha_complex/Alpha_complex_from_points.cpp
+ * \include Alpha_complex_from_points.cpp
*
* When launching:
*
@@ -92,7 +92,7 @@ Table of Contents
*
* the program output is:
*
- * \include Alpha_complex/alphaoffreader_for_doc_60.txt
+ * \include alphaoffreader_for_doc_60.txt
*
* \section createcomplexalgorithm Create complex algorithm
*
@@ -152,6 +152,8 @@ Table of Contents
* not quite define a proper filtration (i.e. non-decreasing with respect to inclusion).
* We fix that up by calling `SimplicialComplexForAlpha::make_filtration_non_decreasing()`.
*
+ * \note This is not the case in `exact` version, this is the reason why it is not called in this case.
+ *
* \subsubsection pruneabove Prune above given filtration value
*
* The simplex tree is pruned from the given maximum \f$ \alpha^2 \f$ value (cf.
@@ -171,7 +173,7 @@ Table of Contents
*
* Then, it is asked to display information about the alpha complex.
*
- * \include Alpha_complex/Weighted_alpha_complex_from_points.cpp
+ * \include Weighted_alpha_complex_from_points.cpp
*
* When launching:
*
@@ -180,7 +182,7 @@ Table of Contents
*
* the program output is:
*
- * \include Alpha_complex/weightedalpha3dfrompoints_for_doc.txt
+ * \include weightedalpha3dfrompoints_for_doc.txt
*
*
* \section offexample Example from OFF file
@@ -190,7 +192,7 @@ Table of Contents
*
* Then, it is asked to display information about the alpha complex.
*
- * \include Alpha_complex/Alpha_complex_from_off.cpp
+ * \include Alpha_complex_from_off.cpp
*
* When launching:
*
@@ -199,7 +201,7 @@ Table of Contents
*
* the program output is:
*
- * \include Alpha_complex/alphaoffreader_for_doc_32.txt
+ * \include alphaoffreader_for_doc_32.txt
*
*
* \section weighted3dexample 3d specific version
@@ -215,7 +217,7 @@ Table of Contents
*
* Then, it is asked to display information about the alpha complex.
*
- * \include Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp
+ * \include Weighted_alpha_complex_3d_from_points.cpp
*
* The results will be the same as in \ref weightedversion .
*
diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex.h b/src/Alpha_complex/include/gudhi/Alpha_complex.h
index b315fa99..e03bb161 100644
--- a/src/Alpha_complex/include/gudhi/Alpha_complex.h
+++ b/src/Alpha_complex/include/gudhi/Alpha_complex.h
@@ -435,8 +435,10 @@ class Alpha_complex {
// --------------------------------------------------------------------------------------------
// --------------------------------------------------------------------------------------------
- // As Alpha value is an approximation, we have to make filtration non decreasing while increasing the dimension
- complex.make_filtration_non_decreasing();
+ if (!exact)
+ // As Alpha value is an approximation, we have to make filtration non decreasing while increasing the dimension
+ // Only in not exact version, cf. https://github.com/GUDHI/gudhi-devel/issues/57
+ complex.make_filtration_non_decreasing();
// Remove all simplices that have a filtration value greater than max_alpha_square
complex.prune_above_filtration(max_alpha_square);
// --------------------------------------------------------------------------------------------
diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
index 4e5fc933..ccc3d852 100644
--- a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
+++ b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
@@ -554,8 +554,10 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_
std::clog << "cells \t\t" << count_cells << std::endl;
#endif // DEBUG_TRACES
// --------------------------------------------------------------------------------------------
- // As Alpha value is an approximation, we have to make filtration non decreasing while increasing the dimension
- complex.make_filtration_non_decreasing();
+ if (Complexity == complexity::FAST)
+ // As Alpha value is an approximation, we have to make filtration non decreasing while increasing the dimension
+ // Only in FAST version, cf. https://github.com/GUDHI/gudhi-devel/issues/57
+ complex.make_filtration_non_decreasing();
// Remove all simplices that have a filtration value greater than max_alpha_square
complex.prune_above_filtration(max_alpha_square);
// --------------------------------------------------------------------------------------------
diff --git a/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h b/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h
index 2a988b4b..4f5a956c 100644
--- a/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h
+++ b/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h
@@ -64,7 +64,7 @@ int main() {
* \section bottleneckbasicexample Basic example
*
* This other example computes the bottleneck distance from 2 persistence diagrams:
- * \include Bottleneck_distance/bottleneck_basic_example.cpp
+ * \include bottleneck_basic_example.cpp
*
* \code
Bottleneck distance = 0.75
diff --git a/src/Bottleneck_distance/doc/perturb_pd.png b/src/Bottleneck_distance/doc/perturb_pd.png
index be638de0..eabf3c8c 100644
--- a/src/Bottleneck_distance/doc/perturb_pd.png
+++ b/src/Bottleneck_distance/doc/perturb_pd.png
Binary files differ
diff --git a/src/Bottleneck_distance/utilities/bottleneckdistance.md b/src/Bottleneck_distance/utilities/bottleneckdistance.md
index a81426cf..2f5dedc9 100644
--- a/src/Bottleneck_distance/utilities/bottleneckdistance.md
+++ b/src/Bottleneck_distance/utilities/bottleneckdistance.md
@@ -10,14 +10,14 @@ Leave the lines above as it is required by the web site generator 'Jekyll'
{:/comment}
-## bottleneck_read_file_example ##
+## bottleneck_distance ##
This program computes the Bottleneck distance between two persistence diagram files.
**Usage**
```
- bottleneck_read_file_example <file_1.pers> <file_2.pers> [<tolerance>]
+ bottleneck_distance <file_1.pers> <file_2.pers> [<tolerance>]
```
where
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 79ec42c1..8f6a1ccc 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -27,6 +27,7 @@ add_gudhi_module(Bottleneck_distance)
add_gudhi_module(Cech_complex)
add_gudhi_module(Contraction)
add_gudhi_module(Collapse)
+add_gudhi_module(Coxeter_triangulation)
add_gudhi_module(Hasse_complex)
add_gudhi_module(Persistence_representations)
add_gudhi_module(Persistent_cohomology)
diff --git a/src/Cech_complex/benchmark/cech_complex_benchmark.cpp b/src/Cech_complex/benchmark/cech_complex_benchmark.cpp
index e489e8a4..2e4adce4 100644
--- a/src/Cech_complex/benchmark/cech_complex_benchmark.cpp
+++ b/src/Cech_complex/benchmark/cech_complex_benchmark.cpp
@@ -49,7 +49,7 @@ class Minimal_enclosing_ball_radius {
point_cloud.push_back(p1);
point_cloud.push_back(p2);
- GUDHI_CHECK((p1.end() - p1.begin()) != (p2.end() - p2.begin()), "inconsistent point dimensions");
+ GUDHI_CHECK((p1.end() - p1.begin()) == (p2.end() - p2.begin()), "inconsistent point dimensions");
Min_sphere min_sphere(p1.end() - p1.begin(), point_cloud.begin(), point_cloud.end());
return std::sqrt(min_sphere.squared_radius());
diff --git a/src/Cech_complex/doc/Intro_cech_complex.h b/src/Cech_complex/doc/Intro_cech_complex.h
index 80c88dc6..698f9749 100644
--- a/src/Cech_complex/doc/Intro_cech_complex.h
+++ b/src/Cech_complex/doc/Intro_cech_complex.h
@@ -71,7 +71,7 @@ namespace cech_complex {
* \ref rips_complex but it offers more topological guarantees.
*
* If the Cech_complex interfaces are not detailed enough for your need, please refer to
- * <a href="_cech_complex_2cech_complex_step_by_step_8cpp-example.html">
+ * <a href="cech_complex_step_by_step_8cpp-example.html">
* cech_complex_step_by_step.cpp</a> example, where the graph construction over the Simplex_tree is more detailed.
*
* \subsection cechpointscloudexample Example from a point cloud
@@ -81,7 +81,7 @@ namespace cech_complex {
*
* Then, it is asked to display information about the simplicial complex.
*
- * \include Cech_complex/cech_complex_example_from_points.cpp
+ * \include cech_complex_example_from_points.cpp
*
* When launching (maximal enclosing ball radius is 1., is expanded until dimension 2):
*
@@ -90,7 +90,7 @@ namespace cech_complex {
*
* the program output is:
*
- * \include Cech_complex/cech_complex_example_from_points_for_doc.txt
+ * \include cech_complex_example_from_points_for_doc.txt
*
*/
/** @} */ // end defgroup cech_complex
diff --git a/src/Collapse/doc/intro_edge_collapse.h b/src/Collapse/doc/intro_edge_collapse.h
index 81edd79f..fde39707 100644
--- a/src/Collapse/doc/intro_edge_collapse.h
+++ b/src/Collapse/doc/intro_edge_collapse.h
@@ -81,7 +81,7 @@ namespace collapse {
* Then it collapses edges and displays a new list of `Filtered_edge` (with less edges)
* that will preserve the persistence homology computation.
*
- * \include Collapse/edge_collapse_basic_example.cpp
+ * \include edge_collapse_basic_example.cpp
*
* When launching the example:
*
@@ -90,7 +90,7 @@ namespace collapse {
*
* the program output is:
*
- * \include Collapse/edge_collapse_example_basic.txt
+ * \include edge_collapse_example_basic.txt
*/
/** @} */ // end defgroup strong_collapse
diff --git a/src/Coxeter_triangulation/concept/FunctionForImplicitManifold.h b/src/Coxeter_triangulation/concept/FunctionForImplicitManifold.h
new file mode 100644
index 00000000..210d804e
--- /dev/null
+++ b/src/Coxeter_triangulation/concept/FunctionForImplicitManifold.h
@@ -0,0 +1,46 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef CONCEPT_COXETER_TRIANGULATION_FUNCTION_FOR_IMPLICIT_MANIFOLD_H_
+#define CONCEPT_COXETER_TRIANGULATION_FUNCTION_FOR_IMPLICIT_MANIFOLD_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \brief The concept FunctionForImplicitManifold describes the requirements
+ * for a type to implement an implicit function class used for example in Manifold_tracing.
+ */
+struct FunctionForImplicitManifold {
+ /** \brief Value of the function at a specified point 'p'.
+ * @param[in] p The input point given by its Cartesian coordinates.
+ * Its size needs to be equal to amb_d().
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const;
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const;
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const;
+
+ /** \brief Returns a point on the zero-set of the function. */
+ Eigen::VectorXd seed() const;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/concept/IntersectionOracle.h b/src/Coxeter_triangulation/concept/IntersectionOracle.h
new file mode 100644
index 00000000..e4e397fa
--- /dev/null
+++ b/src/Coxeter_triangulation/concept/IntersectionOracle.h
@@ -0,0 +1,104 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef CONCEPT_COXETER_TRIANGULATION_INTERSECTION_ORACLE_H_
+#define CONCEPT_COXETER_TRIANGULATION_INTERSECTION_ORACLE_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \brief The concept IntersectionOracle describes the requirements
+ * for a type to implement an intersection oracle class used for example in Manifold_tracing.
+ *
+ */
+struct IntersectionOracle {
+ /** \brief Returns the domain (ambient) dimension of the underlying manifold. */
+ std::size_t amb_d() const;
+
+ /** \brief Returns the codomain dimension of the underlying manifold. */
+ std::size_t cod_d() const;
+
+ /** \brief Intersection query with the relative interior of the manifold.
+ *
+ * \details The returned structure Query_result contains the boolean value
+ * that is true only if the intersection point of the query simplex and
+ * the relative interior of the manifold exists, the intersection point
+ * and the face of the query simplex that contains
+ * the intersection point.
+ *
+ * \tparam Simplex_handle The class of the query simplex.
+ * Needs to be a model of the concept SimplexInCoxeterTriangulation.
+ * \tparam Triangulation The class of the triangulation.
+ * Needs to be a model of the concept TriangulationForManifoldTracing.
+ *
+ * @param[in] simplex The query simplex. The dimension of the simplex
+ * should be the same as the codimension of the manifold
+ * (the codomain dimension of the function).
+ * @param[in] triangulation The ambient triangulation. The dimension of
+ * the triangulation should be the same as the ambient dimension of the manifold
+ * (the domain dimension of the function).
+ */
+ template <class Simplex_handle, class Triangulation>
+ Query_result<Simplex_handle> intersects(const Simplex_handle& simplex, const Triangulation& triangulation) const;
+
+ /** \brief Intersection query with the boundary of the manifold.
+ *
+ * \details The returned structure Query_result contains the boolean value
+ * that is true only if the intersection point of the query simplex and
+ * the boundary of the manifold exists, the intersection point
+ * and the face of the query simplex that contains
+ * the intersection point.
+ *
+ * \tparam Simplex_handle The class of the query simplex.
+ * Needs to be a model of the concept SimplexInCoxeterTriangulation.
+ * \tparam Triangulation The class of the triangulation.
+ * Needs to be a model of the concept TriangulationForManifoldTracing.
+ *
+ * @param[in] simplex The query simplex. The dimension of the simplex
+ * should be the same as the codimension of the boundary of the manifold
+ * (the codomain dimension of the function + 1).
+ * @param[in] triangulation The ambient triangulation. The dimension of
+ * the triangulation should be the same as the ambient dimension of the manifold
+ * (the domain dimension of the function).
+ */
+ template <class Simplex_handle, class Triangulation>
+ Query_result<Simplex_handle> intersects_boundary(const Simplex_handle& simplex,
+ const Triangulation& triangulation) const;
+
+ /** \brief Returns true if the input point lies inside the piecewise-linear
+ * domain induced by the given ambient triangulation that defines the relative
+ * interior of the piecewise-linear approximation of the manifold.
+ *
+ * @param p The input point. Needs to have the same dimension as the ambient
+ * dimension of the manifold (the domain dimension of the function).
+ * @param triangulation The ambient triangulation. Needs to have the same
+ * dimension as the ambient dimension of the manifold
+ * (the domain dimension of the function).
+ */
+ template <class Triangulation>
+ bool lies_in_domain(const Eigen::VectorXd& p, const Triangulation& triangulation) const {
+ Eigen::VectorXd pl_p = make_pl_approximation(domain_fun_, triangulation)(p);
+ return pl_p(0) < 0;
+ }
+
+ /** \brief Returns the function that defines the interior of the manifold */
+ const Function_& function() const;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/concept/SimplexInCoxeterTriangulation.h b/src/Coxeter_triangulation/concept/SimplexInCoxeterTriangulation.h
new file mode 100644
index 00000000..dac8e66d
--- /dev/null
+++ b/src/Coxeter_triangulation/concept/SimplexInCoxeterTriangulation.h
@@ -0,0 +1,81 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef CONCEPT_COXETER_TRIANGULATION_SIMPLEX_IN_COXETER_TRIANGULATION_H_
+#define CONCEPT_COXETER_TRIANGULATION_SIMPLEX_IN_COXETER_TRIANGULATION_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <gudhi/Permutahedral_representation.h>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \brief The concept SimplexInCoxeterTriangulation describes the requirements
+ * for a type to implement a representation of simplices in Freudenthal_triangulation
+ * or in Coxeter_triangulation.
+ */
+struct SimplexInCoxeterTriangulation {
+ /** \brief Type of the vertex. */
+ typedef Vertex_ Vertex;
+
+ /** \brief Type of the ordered partition. */
+ typedef Ordered_set_partition_ OrderedSetPartition;
+
+ /** \brief Dimension of the simplex. */
+ unsigned dimension() const;
+
+ /** \brief Type of a range of vertices, each of type Vertex. */
+ typedef Vertex_range;
+
+ /** \brief Returns a range of vertices of the simplex.
+ */
+ Vertex_range vertex_range() const;
+
+ /** \brief Type of a range of faces, each of type that
+ * is a model of the concept SimplexInCoxeterTriangulation.
+ */
+ typedef Face_range;
+
+ /** \brief Returns a range of permutahedral representations of k-dimensional faces
+ * of the simplex for some given integer parameter 'k'.
+ */
+ Face_range face_range(std::size_t k) const;
+
+ /** \brief Returns a range of permutahedral representations of facets of the simplex.
+ * The dimension of the simplex must be strictly positive.
+ */
+ Face_range facet_range() const;
+
+ /** \brief Type of a range of cofaces, each of type that
+ * is a model of the concept SimplexInCoxeterTriangulation.
+ */
+ typedef Coface_range;
+
+ /** \brief Returns a range of permutahedral representations of k-dimensional cofaces
+ * of the simplex for some given integer parameter 'k'.
+ */
+ Coface_range coface_range(std::size_t k) const;
+
+ /** \brief Returns a range of permutahedral representations of cofacets of the simplex.
+ * The dimension of the simplex must be strictly different from the ambient dimension.
+ */
+ Coface_range cofacet_range() const;
+
+ /** \brief Returns true, if the simplex is a face of other simplex. */
+ bool is_face_of(const Permutahedral_representation& other) const;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/concept/TriangulationForManifoldTracing.h b/src/Coxeter_triangulation/concept/TriangulationForManifoldTracing.h
new file mode 100644
index 00000000..2b5d568c
--- /dev/null
+++ b/src/Coxeter_triangulation/concept/TriangulationForManifoldTracing.h
@@ -0,0 +1,56 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef CONCEPT_COXETER_TRIANGULATION_TRIANGULATION_FOR_MANIFOLD_TRACING_H_
+#define CONCEPT_COXETER_TRIANGULATION_TRIANGULATION_FOR_MANIFOLD_TRACING_H_
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \brief The concept TriangulationForManifoldTracing describes the requirements
+ * for a type to implement a triangulation class used for example in Manifold_tracing.
+ */
+struct TriangulationForManifoldTracing {
+ /** \brief Type of the simplices in the triangulation.
+ * Needs to be a model of the concept SimplexInCoxeterTriangulation. */
+ typedef Simplex_handle;
+
+ /** \brief Type of the vertices in the triangulation.
+ * Needs to be a random-access range of integer values. */
+ typedef Vertex_handle;
+
+ /** \brief Returns the permutahedral representation of the simplex in the
+ * triangulation that contains a given query point 'p'.
+ * \tparam Point_d A class that represents a point in d-dimensional Euclidean space.
+ * The coordinates should be random-accessible. Needs to provide the method size().
+ * @param[in] point The query point.
+ */
+ template <class Point_d>
+ Simplex_handle locate_point(const Point_d& point) const;
+
+ /** \brief Returns the Cartesian coordinates of the given vertex 'v'.
+ * @param[in] v The input vertex.
+ */
+ Eigen::VectorXd cartesian_coordinates(const Vertex_handle& v) const;
+
+ /** \brief Returns the Cartesian coordinates of the barycenter of a given simplex 's'.
+ * @param[in] s The input simplex given by permutahedral representation.
+ */
+ Eigen::VectorXd barycenter(const Simplex_handle& s) const;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/doc/custom_function.png b/src/Coxeter_triangulation/doc/custom_function.png
new file mode 100644
index 00000000..8bb8ba9a
--- /dev/null
+++ b/src/Coxeter_triangulation/doc/custom_function.png
Binary files differ
diff --git a/src/Coxeter_triangulation/doc/flat_torus_with_boundary.png b/src/Coxeter_triangulation/doc/flat_torus_with_boundary.png
new file mode 100644
index 00000000..338b39fe
--- /dev/null
+++ b/src/Coxeter_triangulation/doc/flat_torus_with_boundary.png
Binary files differ
diff --git a/src/Coxeter_triangulation/doc/intro_coxeter_triangulation.h b/src/Coxeter_triangulation/doc/intro_coxeter_triangulation.h
new file mode 100644
index 00000000..395996c9
--- /dev/null
+++ b/src/Coxeter_triangulation/doc/intro_coxeter_triangulation.h
@@ -0,0 +1,240 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef DOC_COXETER_TRIANGULATION_INTRO_COXETER_TRIANGULATION_H_
+#define DOC_COXETER_TRIANGULATION_INTRO_COXETER_TRIANGULATION_H_
+
+// needs namespaces for Doxygen to link on classes
+namespace Gudhi {
+namespace coxeter_triangulation {
+
+/** \defgroup coxeter_triangulation Coxeter triangulation
+
+\author Siargey Kachanovich
+
+@{
+
+\section overview Module overview
+
+Coxeter triangulation module is designed to provide tools for constructing a piecewise-linear approximation of an
+\f$m\f$-dimensional smooth manifold embedded in \f$ \mathbb{R}^d \f$ using an ambient triangulation.
+For a more detailed description of the module see \cite KachanovichThesis.
+
+\section manifoldtracing Manifold tracing algorithm
+The central piece of the module is the manifold tracing algorithm represented by the class
+\ref Gudhi::coxeter_triangulation::Manifold_tracing "Manifold_tracing".
+The manifold tracing algorithm takes as input a manifold of some dimension \f$m\f$ embedded in \f$\mathbb{R}^d\f$
+represented by an intersection oracle (see Section \ref intersectionoracle "Intersection oracle"), a point on the
+manifold and an ambient triangulation (see Section \ref ambienttriangulations "Ambient triangulations").
+The output consists of one map (or two maps in the case of manifolds with boundary) from the \f$(d-m)\f$-dimensional
+(and \f$(d-m+1)\f$-dimensional in the case of manifolds with boundary) simplices in the ambient triangulation that
+intersect the manifold to their intersection points.
+From this output, it is possible to construct the cell complex of the piecewise-linear approximation of the input
+manifold.
+
+There are two methods that execute the manifold tracing algorithm: the method
+\ref Gudhi::coxeter_triangulation::Manifold_tracing::manifold_tracing_algorithm() "Manifold_tracing::manifold_tracing_algorithm(seed_points, triangulation, oracle, out_simplex_map)"
+for manifolds without boundary and
+\ref Gudhi::coxeter_triangulation::Manifold_tracing::manifold_tracing_algorithm() "Manifold_tracing::manifold_tracing_algorithm(seed_points, triangulation, oracle, interior_simplex_map,boundary_simplex_map)"
+for manifolds with boundary. The algorithm functions as follows. It starts at the specified seed points and inserts a
+\f$(d-m)\f$-dimensional simplices nearby each seed point that intersect the manifold into the output. Starting from
+this simplex, the algorithm propagates the search for other \f$(d-m)\f$-dimensional simplices that intersect the
+manifold by marching from a simplex to neighbouring simplices via their common cofaces.
+
+This class \ref Gudhi::coxeter_triangulation::Manifold_tracing "Manifold_tracing" has one template parameter
+`Triangulation_` which specifies the ambient triangulation which is used by the algorithm.
+The template type `Triangulation_` has to be a model of the concept
+\ref Gudhi::coxeter_triangulation::TriangulationForManifoldTracing "TriangulationForManifoldTracing".
+
+The module also provides two static methods:
+\ref Gudhi::coxeter_triangulation::manifold_tracing_algorithm() "manifold_tracing_algorithm(seed_points, triangulation, oracle, out_simplex_map)"
+for manifolds without boundary and
+\ref manifold_tracing_algorithm() "manifold_tracing_algorithm(seed_points, triangulation, oracle, interior_simplex_map, boundary_simplex_map)"
+for manifolds with boundary. For these static methods it is not necessary to specify any template arguments.
+
+\section ambienttriangulations Ambient triangulations
+
+The ambient triangulations supported by the manifold tracing algorithm have to be models of the concept
+\ref Gudhi::coxeter_triangulation::TriangulationForManifoldTracing "TriangulationForManifoldTracing".
+This module offers two such models: the class
+\ref Gudhi::coxeter_triangulation::Freudenthal_triangulation "Freudenthal_triangulation" and the derived class
+\ref Gudhi::coxeter_triangulation::Coxeter_triangulation "Coxeter_triangulation".
+
+Both these classes encode affine transformations of the so-called Freudenthal-Kuhn triangulation of \f$\mathbb{R}^d\f$.
+The Freudenthal-Kuhn triangulation of \f$\mathbb{R}^d\f$ is defined as the simplicial subdivision of the unit cubic
+partition of \f$\mathbb{R}^d\f$.
+Each simplex is encoded using the permutahedral representation, which consists of an integer-valued vector \f$y\f$ that
+positions the simplex in a specific cube in the cubical partition and an ordered partition \f$\omega\f$ of the set
+\f$\{1,\ldots,d+1\}\f$, which positions the simplex in the simplicial subdivision of the cube.
+The default constructor
+\ref Gudhi::coxeter_triangulation::Freudenthal_triangulation::Freudenthal_triangulation(std::size_t)
+"Freudenthal_triangulation(d)" the Freudenthal-Kuhn triangulation of \f$\mathbb{R}^d\f$. The class
+\ref Gudhi::coxeter_triangulation::Freudenthal_triangulation "Freudenthal_triangulation" can also encode any affine
+transformation of the Freudenthal-Kuhn triangulation of \f$\mathbb{R}^d\f$ using an invertible matrix \f$\Lambda\f$ and
+an offset vector \f$b\f$ that can be specified in the constructor and which can be changed using the methods
+change_matrix and change_offset. The class
+\ref Gudhi::coxeter_triangulation::Coxeter_triangulation "Coxeter_triangulation" is derived from
+\ref Gudhi::coxeter_triangulation::Freudenthal_triangulation "Freudenthal_triangulation" and its default constructor
+\ref Gudhi::coxeter_triangulation::Coxeter_triangulation::Coxeter_triangulation(std::size_t) "Coxeter_triangulation(d)"
+builds a Coxeter triangulation of type \f$\tilde{A}_d\f$, which has the best simplex quality of all linear
+transformations of the Freudenthal-Kuhn triangulation of \f$\mathbb{R}^d\f$.
+
+\image html two_triangulations.png "Coxeter (on the left) and Freudenthal-Kuhn triangulation (on the right)"
+
+
+\section intersectionoracle Intersection oracle
+
+The input \f$m\f$-dimensional manifold in \f$\mathbb{R}^d\f$ needs to be given via the intersection oracle that answers
+the following query: given a \f$(d-m)\f$-dimensional simplex, does it intersect the manifold?
+The concept \ref Gudhi::coxeter_triangulation::IntersectionOracle "IntersectionOracle" describes all requirements for
+an intersection oracle class to be compatible with the class
+\ref Gudhi::coxeter_triangulation::Manifold_tracing "Manifold_tracing".
+This module offers one model of the concept
+\ref Gudhi::coxeter_triangulation::IntersectionOracle "IntersectionOracle", which is the class
+\ref Gudhi::coxeter_triangulation::Implicit_manifold_intersection_oracle "Implicit_manifold_intersection_oracle".
+This class represents a manifold given as the zero-set of a specified function
+\f$F: \mathbb{R}^d \rightarrow \mathbb{R}^{d-m}\f$.
+The function \f$F\f$ is given by a class which is a model of the concept
+\ref Gudhi::coxeter_triangulation::FunctionForImplicitManifold "FunctionForImplicitManifold".
+There are multiple function classes that are already implemented in this module.
+
+\li \ref Gudhi::coxeter_triangulation::Constant_function(std::size_t, std::size_t, Eigen::VectorXd)
+"Constant_function(d,k,v)" defines a constant function \f$F\f$ such that for all \f$x \in \mathbb{R}^d\f$, we have
+ \f$F(x) = v \in \mathbb{R}^k\f$.
+ The class Constant_function does not define an implicit manifold, but is useful as the domain function when defining
+ boundaryless implicit manifolds.
+\li \ref Gudhi::coxeter_triangulation::Function_affine_plane_in_Rd(N,b) "Function_affine_plane_in_Rd(N,b)" defines an
+ \f$m\f$-dimensional implicit affine plane in the \f$d\f$-dimensional Euclidean space given by a normal matrix \f$N\f$
+ and an offset vector \f$b\f$.
+\li \ref Gudhi::coxeter_triangulation::Function_Sm_in_Rd(r,m,d,center) "Function_Sm_in_Rd(r,m,d,center)" defines an
+ \f$m\f$-dimensional implicit sphere embedded in the \f$d\f$-dimensional Euclidean space of radius \f$r\f$ centered at
+ the point 'center'.
+\li \ref Gudhi::coxeter_triangulation::Function_moment_curve_in_Rd(r,d) "Function_moment_curve(r,d)" defines the moment
+ curve in the \f$d\f$-dimensional Euclidean space of radius \f$r\f$ given as the parameterized curve (but implemented
+ as an implicit curve):
+ \f[ (r, rt, \ldots, rt^{d-1}) \in \mathbb{R}^d,\text{ for $t \in \mathbb{R}$.} \f]
+\li \ref Gudhi::coxeter_triangulation::Function_torus_in_R3(R, r) "Function_torus_in_R3(R, r)" defines a torus in
+ \f$\mathbb{R}^3\f$ with the outer radius \f$R\f$ and the inner radius, given by the equation:
+ \f[ z^2 + (\sqrt{x^2 + y^2} - r)^2 - R^2 = 0. \f]
+\li \ref Gudhi::coxeter_triangulation::Function_chair_in_R3(a, b, k) "Function_chair_in_R3(a, b, k)" defines the
+ \"Chair\" surface in \f$\mathbb{R}^3\f$ defined by the equation:
+ \f[ (x^2 + y^2 + z^2 - ak^2)^2 - b((z-k)^2 - 2x^2)((z+k)^2 - 2y^2) = 0. \f]
+\li \ref Gudhi::coxeter_triangulation::Function_iron_in_R3() "Function_iron_in_R3()" defines the \"Iron\" surface in
+ \f$\mathbb{R}^3\f$ defined by the equation:
+ \f[ \frac{-x^6-y^6-z^6}{300} + \frac{xy^2z}{2.1} + y^2 + (z-2)^2 = 1. \f]
+\li \ref Gudhi::coxeter_triangulation::Function_lemniscate_revolution_in_R3(a) "Function_lemniscate_revolution_in_R3(a)"
+ defines a revolution surface in \f$\mathbb{R}^3\f$ obtained from the lemniscate of Bernoulli defined by the equation:
+ \f[ (x^2 + y^2 + z^2)^2 - 2a^2(x^2 - y^2 - z^2) = 0. \f]
+\li \ref Gudhi::coxeter_triangulation::Function_whitney_umbrella_in_R3() "Function_whitney_umbrella_in_R3()" defines
+ the Whitney umbrella surface in \f$\mathbb{R}^3\f$ defined by the equation:
+ \f[ x^2 - y^2z = 0. \f]
+
+The base function classes above can be composed or modified into new functions using the following classes and methods:
+
+\li \ref Gudhi::coxeter_triangulation::Cartesian_product "Cartesian_product(functions...)" expresses the Cartesian
+ product \f$F_1^{-1}(0) \times \ldots \times F_k^{-1}(0)\f$ of multiple implicit manifolds as an implicit manifold.
+ For convenience, a static function
+ \ref Gudhi::coxeter_triangulation::make_product_function() "make_product_function(functions...)" is provided that
+ takes a pack of function-typed objects as the argument.
+\li \ref Gudhi::coxeter_triangulation::Embed_in_Rd "Embed_in_Rd(F, d)" expresses an implicit manifold given as the
+ zero-set of a function \f$F\f$ embedded in a higher-dimensional Euclidean space \f$\mathbb{R}^d\f$.
+ For convenience, a static function \ref Gudhi::coxeter_triangulation::make_embedding() "make_embedding(F, d)" is
+ provided.
+\li \ref Gudhi::coxeter_triangulation::Linear_transformation "Linear_transformation(F, M)" applies a linear
+ transformation given by a matrix \f$M\f$ on an implicit manifold given as the zero-set of the function \f$F\f$.
+ For convenience, a static function
+ \ref Gudhi::coxeter_triangulation::make_linear_transformation() "make_linear_transformation(F, M)" is provided.
+\li \ref Gudhi::coxeter_triangulation::Translate "Translate(F, v)" translates an implicit manifold given as the
+ zero-set of ththe function \f$F\f$ by a vector \f$v\f$.
+ For convenience, a static function \ref Gudhi::coxeter_triangulation::translate() "translate(F, v)" is provided.
+\li \ref Gudhi::coxeter_triangulation::Negation() "Negation(F)" defines the negative of the given function \f$F\f$.
+ This class is useful to define the complementary of a given domain, when defining a manifold with boundary.
+ For convenience, a static function \ref Gudhi::coxeter_triangulation::negation() "negation(F)" is provided.
+\li \ref Gudhi::coxeter_triangulation::PL_approximation "PL_approximation(F, T)" defines a piecewise-linear
+ approximation of a given function \f$F\f$ induced by an ambient triangulation \f$T\f$.
+ The purpose of this class is to define a piecewise-linear function that is compatible with the requirements for the
+ domain function \f$D\f$ when defining a manifold with boundary.
+ For convenience, a static function
+ \ref Gudhi::coxeter_triangulation::make_pl_approximation() "make_pl_approximation(F, T)" is provided.
+ The type of \f$T\f$ is required to be a model of the concept
+ \ref Gudhi::coxeter_triangulation::TriangulationForManifoldTracing "TriangulationForManifoldTracing".
+
+It is also possible to implement your own function as detailed in this \ref exampleswithcustomfunction.
+
+\section cellcomplex Cell complex construction
+
+The output of the manifold tracing algorithm can be transformed into the Hasse diagram of a cell complex that
+approximates the input manifold using the class \ref Gudhi::coxeter_triangulation::Cell_complex "Cell_complex".
+The type of the cells in the Hasse diagram is
+\ref Gudhi::Hasse_diagram::Hasse_diagram_cell "Hasse_cell<int, double, bool>" provided by the module Hasse diagram.
+The cells in the cell complex given by an object of the class
+\ref Gudhi::coxeter_triangulation::Cell_complex "Cell_complex" are accessed through several maps that are accessed
+through the following methods.
+
+\li The method
+\ref Gudhi::coxeter_triangulation::Cell_complex::interior_simplex_cell_maps() "interior_simplex_cell_maps()"
+returns a vector of maps from the cells of various dimensions in the interior of the cell complex to the permutahedral
+representations of the corresponding simplices in the ambient triangulation.
+Each individual map for cells of a specific dimension \f$l\f$ can be accessed using the method
+\ref Gudhi::coxeter_triangulation::Cell_complex::interior_simplex_cell_map() "interior_simplex_cell_map(l)".
+\li The method
+\ref Gudhi::coxeter_triangulation::Cell_complex::boundary_simplex_cell_maps() "boundary_simplex_cell_maps()"
+returns a vector of maps from the cells of various dimensions on the boundary of the cell complex to the permutahedral
+representations of the corresponding simplices in the ambient triangulation.
+Each individual map for cells of a specific dimension \f$l\f$ can be accessed using the method
+\ref Gudhi::coxeter_triangulation::Cell_complex::boundary_simplex_cell_map() "boundary_simplex_cell_map(l)".
+\li The method \ref Gudhi::coxeter_triangulation::Cell_complex::cell_simplex_map() "cell_simplex_map()" returns a map
+from the cells in the cell complex to the permutahedral representations of the corresponding simplices in the ambient
+triangulation.
+\li The method \ref Gudhi::coxeter_triangulation::Cell_complex::cell_point_map() "cell_point_map()" returns a map from
+the vertex cells in the cell complex to their Cartesian coordinates.
+
+The use and interfaces of this \ref Gudhi::coxeter_triangulation::Cell_complex "Cell_complex" is limited to the
+Coxeter_triangulation implementation.
+
+\section example Examples
+
+\subsection examplewithoutboundaries Basic example without boundaries
+\include cell_complex_from_basic_circle_manifold.cpp
+
+The program output is:
+
+\include cell_complex_from_basic_circle_manifold_for_doc.txt
+
+\subsection exampleswithboundaries Example with boundaries
+
+Here is an example of constructing a piecewise-linear approximation of a flat torus embedded in \f$\mathbb{R}^4\f$,
+rotated by a random rotation in \f$\mathbb{R}^4\f$ and cut by a hyperplane.
+
+\include manifold_tracing_flat_torus_with_boundary.cpp
+
+The output in <a target="_blank" href="https://www.ljll.math.upmc.fr/frey/software.html">medit</a> is:
+
+\image html "flat_torus_with_boundary.png" "Output from the example of a flat torus with boundary"
+
+\subsection exampleswithcustomfunction Example with a custom function
+
+In the following more complex example, we define a custom function for the implicit manifold.
+
+\include manifold_tracing_custom_function.cpp
+
+The output in <a target="_blank" href="https://www.ljll.math.upmc.fr/frey/software.html">medit</a> looks as follows:
+
+\image html "custom_function.png" "Output from the example with a custom function"
+
+
+ */
+/** @} */ // end defgroup coxeter_triangulation
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif // DOC_COXETER_TRIANGULATION_INTRO_COXETER_TRIANGULATION_H_
diff --git a/src/Coxeter_triangulation/doc/manifold_tracing_on_custom_function_example.png b/src/Coxeter_triangulation/doc/manifold_tracing_on_custom_function_example.png
new file mode 100644
index 00000000..04912729
--- /dev/null
+++ b/src/Coxeter_triangulation/doc/manifold_tracing_on_custom_function_example.png
Binary files differ
diff --git a/src/Coxeter_triangulation/doc/two_triangulations.png b/src/Coxeter_triangulation/doc/two_triangulations.png
new file mode 100644
index 00000000..055d93e7
--- /dev/null
+++ b/src/Coxeter_triangulation/doc/two_triangulations.png
Binary files differ
diff --git a/src/Coxeter_triangulation/example/CMakeLists.txt b/src/Coxeter_triangulation/example/CMakeLists.txt
new file mode 100644
index 00000000..7f81c599
--- /dev/null
+++ b/src/Coxeter_triangulation/example/CMakeLists.txt
@@ -0,0 +1,19 @@
+project(Coxeter_triangulation_example)
+
+if (NOT EIGEN3_VERSION VERSION_LESS 3.1.0)
+ # because of random_orthogonal_matrix inclusion
+ if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+ add_executable ( Coxeter_triangulation_manifold_tracing_flat_torus_with_boundary_example manifold_tracing_flat_torus_with_boundary.cpp )
+ target_link_libraries(Coxeter_triangulation_manifold_tracing_flat_torus_with_boundary_example ${CGAL_LIBRARY})
+ add_test(NAME Coxeter_triangulation_manifold_tracing_flat_torus_with_boundary_example
+ COMMAND $<TARGET_FILE:Coxeter_triangulation_manifold_tracing_flat_torus_with_boundary_example>)
+ endif()
+
+ add_executable ( Coxeter_triangulation_manifold_tracing_custom_function_example manifold_tracing_custom_function.cpp )
+ add_test(NAME Coxeter_triangulation_manifold_tracing_custom_function_example
+ COMMAND $<TARGET_FILE:Coxeter_triangulation_manifold_tracing_custom_function_example>)
+
+ add_executable ( Coxeter_triangulation_cell_complex_from_basic_circle_manifold_example cell_complex_from_basic_circle_manifold.cpp )
+ add_test(NAME Coxeter_triangulation_cell_complex_from_basic_circle_manifold_example
+ COMMAND $<TARGET_FILE:Coxeter_triangulation_cell_complex_from_basic_circle_manifold_example>)
+endif() \ No newline at end of file
diff --git a/src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold.cpp b/src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold.cpp
new file mode 100644
index 00000000..dfaaffa8
--- /dev/null
+++ b/src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold.cpp
@@ -0,0 +1,55 @@
+#include <iostream>
+
+#include <gudhi/Coxeter_triangulation.h>
+#include <gudhi/Implicit_manifold_intersection_oracle.h> // for Gudhi::coxeter_triangulation::make_oracle
+#include <gudhi/Manifold_tracing.h>
+#include <gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h>
+#include <gudhi/Functions/Function_Sm_in_Rd.h>
+
+using namespace Gudhi::coxeter_triangulation;
+
+int main(int argc, char** argv) {
+ // Oracle is a circle of radius 1
+ double radius = 1.;
+ auto oracle = make_oracle(Function_Sm_in_Rd(radius, 1));
+
+ // Define a Coxeter triangulation.
+ Coxeter_triangulation<> cox_tr(oracle.amb_d());
+ // Theory forbids that a vertex of the triangulation lies exactly on the circle.
+ // Add some offset to avoid algorithm degeneracies.
+ cox_tr.change_offset(-Eigen::VectorXd::Random(oracle.amb_d()));
+ // For a better manifold approximation, one can change the circle radius value or change the linear transformation
+ // matrix.
+ // The number of points and edges will increase with a better resolution.
+ //cox_tr.change_matrix(0.5 * cox_tr.matrix());
+
+ // Manifold tracing algorithm
+ using Out_simplex_map = typename Manifold_tracing<Coxeter_triangulation<> >::Out_simplex_map;
+
+ std::vector<Eigen::VectorXd> seed_points(1, oracle.seed());
+ Out_simplex_map interior_simplex_map;
+ manifold_tracing_algorithm(seed_points, cox_tr, oracle, interior_simplex_map);
+
+ // Constructing the cell complex
+ std::size_t intr_d = oracle.amb_d() - oracle.cod_d();
+ Cell_complex<Out_simplex_map> cell_complex(intr_d);
+ cell_complex.construct_complex(interior_simplex_map);
+
+ // List of Hasse_cell pointers to retrieve vertices values from edges
+ std::map<Cell_complex<Out_simplex_map>::Hasse_cell*, std::size_t> vi_map;
+ std::size_t index = 0;
+
+ std::clog << "Vertices:" << std::endl;
+ for (const auto& cp_pair : cell_complex.cell_point_map()) {
+ std::clog << index << " : (" << cp_pair.second(0) << ", " << cp_pair.second(1) << ")" << std::endl;
+ vi_map.emplace(cp_pair.first, index++);
+ }
+
+ std::clog << "Edges:" << std::endl;
+ for (const auto& sc_pair : cell_complex.interior_simplex_cell_map(1)) {
+ Cell_complex<Out_simplex_map>::Hasse_cell* edge_cell = sc_pair.second;
+ for (const auto& vi_pair : edge_cell->get_boundary()) std::clog << vi_map[vi_pair.first] << " ";
+ std::clog << std::endl;
+ }
+ return 0;
+}
diff --git a/src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold_for_doc.txt b/src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold_for_doc.txt
new file mode 100644
index 00000000..b323cca3
--- /dev/null
+++ b/src/Coxeter_triangulation/example/cell_complex_from_basic_circle_manifold_for_doc.txt
@@ -0,0 +1,26 @@
+Vertices:
+0 : (-0.680375, 0.523483)
+1 : (0.147642, 0.887879)
+2 : (-0.847996, 0.30801)
+3 : (-0.881369, 0.0951903)
+4 : (0.638494, -0.550215)
+5 : (0.415344, 0.843848)
+6 : (0.812453, -0.0815816)
+7 : (0.319625, -0.7709)
+8 : (0.319625, 0.889605)
+9 : (0.579487, 0.638553)
+10 : (-0.680375, -0.461325)
+11 : (-0.364269, -0.760962)
+Edges:
+3 2
+3 10
+10 11
+11 7
+7 4
+2 0
+0 1
+6 9
+6 4
+1 8
+8 5
+5 9
diff --git a/src/Coxeter_triangulation/example/manifold_tracing_custom_function.cpp b/src/Coxeter_triangulation/example/manifold_tracing_custom_function.cpp
new file mode 100644
index 00000000..fe2051bb
--- /dev/null
+++ b/src/Coxeter_triangulation/example/manifold_tracing_custom_function.cpp
@@ -0,0 +1,87 @@
+#include <iostream>
+
+#include <gudhi/Coxeter_triangulation.h>
+#include <gudhi/Functions/Function_Sm_in_Rd.h>
+#include <gudhi/Implicit_manifold_intersection_oracle.h>
+#include <gudhi/Manifold_tracing.h>
+#include <gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h>
+#include <gudhi/Functions/Linear_transformation.h>
+
+#include <gudhi/IO/build_mesh_from_cell_complex.h>
+#include <gudhi/IO/output_meshes_to_medit.h>
+
+using namespace Gudhi::coxeter_triangulation;
+
+/* A definition of a function that defines a 2d surface embedded in R^4, but that normally
+ * lives on a complex projective plane.
+ * In terms of harmonic coordinates [x:y:z] of points on the complex projective plane,
+ * the equation of the manifold is x^3*y + y^3*z + z^3*x = 0.
+ * The embedding consists of restricting the manifold to the affine subspace z = 1.
+ */
+struct Function_surface_on_CP2_in_R4 {
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ // The real and imaginary parts of the variables x and y
+ double xr = p(0), xi = p(1), yr = p(2), yi = p(3);
+ Eigen::VectorXd result(cod_d());
+
+ // Squares and cubes of real and imaginary parts used in the computations
+ double xr2 = xr * xr, xi2 = xi * xi, yr2 = yr * yr, yi2 = yi * yi, xr3 = xr2 * xr, xi3 = xi2 * xi, yr3 = yr2 * yr,
+ yi3 = yi2 * yi;
+
+ // The first coordinate of the output is Re(x^3*y + y^3 + x)
+ result(0) = xr3 * yr - 3 * xr * xi2 * yr - 3 * xr2 * xi * yi + xi3 * yi + yr3 - 3 * yr * yi2 + xr;
+ // The second coordinate of the output is Im(x^3*y + y^3 + x)
+ result(1) = 3 * xr2 * xi * yr + xr3 * yi - 3 * xr * xi2 * yi - xi3 * yr + 3 * yr2 * yi - yi3 + xi;
+ return result;
+ }
+
+ std::size_t amb_d() const { return 4; };
+ std::size_t cod_d() const { return 2; };
+
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = Eigen::VectorXd::Zero(4);
+ return result;
+ }
+
+ Function_surface_on_CP2_in_R4() {}
+};
+
+int main(int argc, char** argv) {
+ // The function for the (non-compact) manifold
+ Function_surface_on_CP2_in_R4 fun;
+
+ // Seed of the function
+ Eigen::VectorXd seed = fun.seed();
+
+ // Creating the function that defines the boundary of a compact region on the manifold
+ double radius = 3.0;
+ Function_Sm_in_Rd fun_sph(radius, 3, seed);
+
+ // Defining the intersection oracle
+ auto oracle = make_oracle(fun, fun_sph);
+
+ // Define a Coxeter triangulation scaled by a factor lambda.
+ // The triangulation is translated by a random vector to avoid violating the genericity hypothesis.
+ double lambda = 0.2;
+ Coxeter_triangulation<> cox_tr(oracle.amb_d());
+ cox_tr.change_offset(Eigen::VectorXd::Random(oracle.amb_d()));
+ cox_tr.change_matrix(lambda * cox_tr.matrix());
+
+ // Manifold tracing algorithm
+ using MT = Manifold_tracing<Coxeter_triangulation<> >;
+ using Out_simplex_map = typename MT::Out_simplex_map;
+ std::vector<Eigen::VectorXd> seed_points(1, seed);
+ Out_simplex_map interior_simplex_map, boundary_simplex_map;
+ manifold_tracing_algorithm(seed_points, cox_tr, oracle, interior_simplex_map, boundary_simplex_map);
+
+ // Constructing the cell complex
+ std::size_t intr_d = oracle.amb_d() - oracle.cod_d();
+ Cell_complex<Out_simplex_map> cell_complex(intr_d);
+ cell_complex.construct_complex(interior_simplex_map, boundary_simplex_map);
+
+ // Output the cell complex to a file readable by medit
+ output_meshes_to_medit(3, "manifold_on_CP2_with_boundary",
+ build_mesh_from_cell_complex(cell_complex, Configuration(true, true, true, 1, 5, 3),
+ Configuration(true, true, true, 2, 13, 14)));
+ return 0;
+}
diff --git a/src/Coxeter_triangulation/example/manifold_tracing_flat_torus_with_boundary.cpp b/src/Coxeter_triangulation/example/manifold_tracing_flat_torus_with_boundary.cpp
new file mode 100644
index 00000000..59fe2e2b
--- /dev/null
+++ b/src/Coxeter_triangulation/example/manifold_tracing_flat_torus_with_boundary.cpp
@@ -0,0 +1,72 @@
+// workaround for the annoying boost message in boost 1.69
+#define BOOST_PENDING_INTEGER_LOG2_HPP
+#include <boost/integer/integer_log2.hpp>
+// end workaround
+
+#include <iostream>
+
+#include <gudhi/Coxeter_triangulation.h>
+#include <gudhi/Functions/Function_affine_plane_in_Rd.h>
+#include <gudhi/Functions/Function_Sm_in_Rd.h>
+#include <gudhi/Functions/Cartesian_product.h>
+#include <gudhi/Functions/Linear_transformation.h>
+#include <gudhi/Implicit_manifold_intersection_oracle.h>
+#include <gudhi/Manifold_tracing.h>
+#include <gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h>
+#include <gudhi/Functions/random_orthogonal_matrix.h> // requires CGAL
+
+#include <gudhi/IO/build_mesh_from_cell_complex.h>
+#include <gudhi/IO/output_meshes_to_medit.h>
+
+using namespace Gudhi::coxeter_triangulation;
+
+int main(int argc, char** argv) {
+ // Creating a circle S1 in R2 of specified radius
+ double radius = 1.0;
+ Function_Sm_in_Rd fun_circle(radius, 1);
+
+ // Creating a flat torus S1xS1 in R4 from two circle functions
+ auto fun_flat_torus = make_product_function(fun_circle, fun_circle);
+
+ // Apply a random rotation in R4
+ auto matrix = random_orthogonal_matrix(4);
+ auto fun_flat_torus_rotated = make_linear_transformation(fun_flat_torus, matrix);
+
+ // Computing the seed of the function fun_flat_torus
+ Eigen::VectorXd seed = fun_flat_torus_rotated.seed();
+
+ // Defining a domain function that defines the boundary, which is a hyperplane passing by the origin and orthogonal to
+ // x.
+ Eigen::MatrixXd normal_matrix = Eigen::MatrixXd::Zero(4, 1);
+ for (std::size_t i = 0; i < 4; i++) normal_matrix(i, 0) = -seed(i);
+ Function_affine_plane_in_Rd fun_bound(normal_matrix, -seed / 2);
+
+ // Defining the intersection oracle
+ auto oracle = make_oracle(fun_flat_torus_rotated, fun_bound);
+
+ // Define a Coxeter triangulation scaled by a factor lambda.
+ // The triangulation is translated by a random vector to avoid violating the genericity hypothesis.
+ double lambda = 0.2;
+ Coxeter_triangulation<> cox_tr(oracle.amb_d());
+ cox_tr.change_offset(Eigen::VectorXd::Random(oracle.amb_d()));
+ cox_tr.change_matrix(lambda * cox_tr.matrix());
+
+ // Manifold tracing algorithm
+ using MT = Manifold_tracing<Coxeter_triangulation<> >;
+ using Out_simplex_map = typename MT::Out_simplex_map;
+ std::vector<Eigen::VectorXd> seed_points(1, seed);
+ Out_simplex_map interior_simplex_map, boundary_simplex_map;
+ manifold_tracing_algorithm(seed_points, cox_tr, oracle, interior_simplex_map, boundary_simplex_map);
+
+ // Constructing the cell complex
+ std::size_t intr_d = oracle.amb_d() - oracle.cod_d();
+ Cell_complex<Out_simplex_map> cell_complex(intr_d);
+ cell_complex.construct_complex(interior_simplex_map, boundary_simplex_map);
+
+ // Output the cell complex to a file readable by medit
+ output_meshes_to_medit(3, "flat_torus_with_boundary",
+ build_mesh_from_cell_complex(cell_complex, Configuration(true, true, true, 1, 5, 3),
+ Configuration(true, true, true, 2, 13, 14)));
+
+ return 0;
+}
diff --git a/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation.h b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation.h
new file mode 100644
index 00000000..de68acb6
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation.h
@@ -0,0 +1,77 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef COXETER_TRIANGULATION_H_
+#define COXETER_TRIANGULATION_H_
+
+#include <vector>
+#include <cmath> // for std::sqrt
+
+#include <boost/range/iterator_range.hpp>
+#include <boost/graph/graph_traits.hpp>
+#include <boost/graph/adjacency_list.hpp>
+
+#include <Eigen/Eigenvalues>
+#include <Eigen/Sparse>
+#include <Eigen/SVD>
+
+#include <gudhi/Freudenthal_triangulation.h>
+#include <gudhi/Permutahedral_representation.h>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Coxeter_triangulation
+ * \brief A class that stores Coxeter triangulation of type \f$\tilde{A}_d\f$.
+ * This triangulation has the greatest simplex quality out of all linear transformations
+ * of the Freudenthal-Kuhn triangulation.
+ *
+ * \ingroup coxeter_triangulation
+ *
+ * \tparam Permutahedral_representation_ Type of a simplex given by a permutahedral representation.
+ * Needs to be a model of SimplexInCoxeterTriangulation.
+ */
+template <class Permutahedral_representation_ =
+ Permutahedral_representation<std::vector<int>, std::vector<std::vector<std::size_t> > > >
+class Coxeter_triangulation : public Freudenthal_triangulation<Permutahedral_representation_> {
+ using Matrix = Eigen::MatrixXd;
+
+ Matrix root_matrix(unsigned d) {
+ Matrix cartan(Matrix::Identity(d, d));
+ for (unsigned i = 1; i < d; i++) {
+ cartan(i - 1, i) = -0.5;
+ cartan(i, i - 1) = -0.5;
+ }
+ Eigen::SelfAdjointEigenSolver<Matrix> saes(cartan);
+ Eigen::VectorXd sqrt_diag(d);
+ for (unsigned i = 0; i < d; ++i) sqrt_diag(i) = std::sqrt(saes.eigenvalues()[i]);
+
+ Matrix lower(Matrix::Ones(d, d));
+ lower = lower.triangularView<Eigen::Lower>();
+
+ Matrix result = (lower * saes.eigenvectors() * sqrt_diag.asDiagonal()).inverse();
+ return result;
+ }
+
+ public:
+ /** \brief Constructor of Coxeter triangulation of a given dimension.
+ * @param[in] dimension The dimension of the triangulation.
+ */
+ Coxeter_triangulation(std::size_t dimension)
+ : Freudenthal_triangulation<Permutahedral_representation_>(dimension, root_matrix(dimension)) {}
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h
new file mode 100644
index 00000000..de342ecc
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h
@@ -0,0 +1,340 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef CELL_COMPLEX_H_
+#define CELL_COMPLEX_H_
+
+#include <Eigen/Dense>
+
+#include <vector>
+#include <map>
+#include <utility> // for std::make_pair
+
+#include <gudhi/IO/output_debug_traces_to_html.h> // for DEBUG_TRACES
+#include <gudhi/Permutahedral_representation/Simplex_comparator.h>
+#include <gudhi/Coxeter_triangulation/Cell_complex/Hasse_diagram_cell.h> // for Hasse_cell
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \class Cell_complex
+ * \brief A class that constructs the cell complex from the output provided by the class
+ * \ref Gudhi::coxeter_triangulation::Manifold_tracing.
+ *
+ * The use and interfaces of this cell complex is limited to the \ref coxeter_triangulation implementation.
+ *
+ * \tparam Out_simplex_map_ The type of a map from a simplex type that is a
+ * model of SimplexInCoxeterTriangulation to Eigen::VectorXd.
+ */
+template <class Out_simplex_map_>
+class Cell_complex {
+ public:
+ /** \brief Type of a simplex in the ambient triangulation.
+ * Is a model of the concept SimplexInCoxeterTriangulation.
+ */
+ using Simplex_handle = typename Out_simplex_map_::key_type;
+ /** \brief Type of a cell in the cell complex.
+ * Always is Gudhi::Hasse_cell from the Hasse diagram module.
+ * The additional information is the boolean that is true if and only if the cell lies
+ * on the boundary.
+ */
+ using Hasse_cell = Gudhi::Hasse_diagram::Hasse_diagram_cell<int, double, bool>;
+ /** \brief Type of a map from permutahedral representations of simplices in the
+ * ambient triangulation to the corresponding cells in the cell complex of some
+ * specific dimension.
+ */
+ using Simplex_cell_map = std::map<Simplex_handle, Hasse_cell*, Simplex_comparator<Simplex_handle> >;
+ /** \brief Type of a vector of maps from permutahedral representations of simplices in the
+ * ambient triangulation to the corresponding cells in the cell complex of various dimensions.
+ */
+ using Simplex_cell_maps = std::vector<Simplex_cell_map>;
+
+ /** \brief Type of a map from cells in the cell complex to the permutahedral representations
+ * of the corresponding simplices in the ambient triangulation.
+ */
+ using Cell_simplex_map = std::map<Hasse_cell*, Simplex_handle>;
+
+ /** \brief Type of a map from vertex cells in the cell complex to the permutahedral representations
+ * of their Cartesian coordinates.
+ */
+ using Cell_point_map = std::map<Hasse_cell*, Eigen::VectorXd>;
+
+ private:
+ Hasse_cell* insert_cell(const Simplex_handle& simplex, std::size_t cell_d, bool is_boundary) {
+ Simplex_cell_maps& simplex_cell_maps = (is_boundary ? boundary_simplex_cell_maps_ : interior_simplex_cell_maps_);
+#ifdef DEBUG_TRACES
+ CC_detail_list& cc_detail_list =
+ (is_boundary ? cc_boundary_detail_lists[cell_d] : cc_interior_detail_lists[cell_d]);
+ cc_detail_list.emplace_back(simplex);
+#endif
+ Simplex_cell_map& simplex_cell_map = simplex_cell_maps[cell_d];
+ auto map_it = simplex_cell_map.find(simplex);
+ if (map_it == simplex_cell_map.end()) {
+ hasse_cells_.push_back(new Hasse_cell(is_boundary, cell_d));
+ Hasse_cell* new_cell = hasse_cells_.back();
+ simplex_cell_map.emplace(simplex, new_cell);
+ cell_simplex_map_.emplace(new_cell, simplex);
+#ifdef DEBUG_TRACES
+ cc_detail_list.back().status_ = CC_detail_info::Result_type::inserted;
+#endif
+ return new_cell;
+ }
+#ifdef DEBUG_TRACES
+ CC_detail_info& cc_info = cc_detail_list.back();
+ cc_info.trigger_ = to_string(map_it->first);
+ cc_info.status_ = CC_detail_info::Result_type::self;
+#endif
+ return map_it->second;
+ }
+
+ void expand_level(std::size_t cell_d) {
+ bool is_manifold_with_boundary = boundary_simplex_cell_maps_.size() > 0;
+ for (auto& sc_pair : interior_simplex_cell_maps_[cell_d - 1]) {
+ const Simplex_handle& simplex = sc_pair.first;
+ Hasse_cell* cell = sc_pair.second;
+ for (Simplex_handle coface : simplex.coface_range(cod_d_ + cell_d)) {
+ Hasse_cell* new_cell = insert_cell(coface, cell_d, false);
+ new_cell->get_boundary().emplace_back(cell, 1);
+ }
+ }
+
+ if (is_manifold_with_boundary) {
+ for (auto& sc_pair : boundary_simplex_cell_maps_[cell_d - 1]) {
+ const Simplex_handle& simplex = sc_pair.first;
+ Hasse_cell* cell = sc_pair.second;
+ if (cell_d != intr_d_)
+ for (Simplex_handle coface : simplex.coface_range(cod_d_ + cell_d + 1)) {
+ Hasse_cell* new_cell = insert_cell(coface, cell_d, true);
+ new_cell->get_boundary().emplace_back(cell, 1);
+ }
+ auto map_it = interior_simplex_cell_maps_[cell_d].find(simplex);
+ if (map_it == interior_simplex_cell_maps_[cell_d].end())
+ std::cerr << "Cell_complex::expand_level error: A boundary cell does not have an interior counterpart.\n";
+ else {
+ Hasse_cell* i_cell = map_it->second;
+ i_cell->get_boundary().emplace_back(cell, 1);
+ }
+ }
+ }
+ }
+
+ void construct_complex_(const Out_simplex_map_& out_simplex_map) {
+#ifdef DEBUG_TRACES
+ cc_interior_summary_lists.resize(interior_simplex_cell_maps_.size());
+ cc_interior_prejoin_lists.resize(interior_simplex_cell_maps_.size());
+ cc_interior_detail_lists.resize(interior_simplex_cell_maps_.size());
+#endif
+ for (auto& os_pair : out_simplex_map) {
+ const Simplex_handle& simplex = os_pair.first;
+ const Eigen::VectorXd& point = os_pair.second;
+ Hasse_cell* new_cell = insert_cell(simplex, 0, false);
+ cell_point_map_.emplace(new_cell, point);
+ }
+ for (std::size_t cell_d = 1;
+ cell_d < interior_simplex_cell_maps_.size() && !interior_simplex_cell_maps_[cell_d - 1].empty(); ++cell_d) {
+ expand_level(cell_d);
+ }
+ }
+
+ void construct_complex_(const Out_simplex_map_& interior_simplex_map, const Out_simplex_map_& boundary_simplex_map) {
+#ifdef DEBUG_TRACES
+ cc_interior_summary_lists.resize(interior_simplex_cell_maps_.size());
+ cc_interior_prejoin_lists.resize(interior_simplex_cell_maps_.size());
+ cc_interior_detail_lists.resize(interior_simplex_cell_maps_.size());
+ cc_boundary_summary_lists.resize(boundary_simplex_cell_maps_.size());
+ cc_boundary_prejoin_lists.resize(boundary_simplex_cell_maps_.size());
+ cc_boundary_detail_lists.resize(boundary_simplex_cell_maps_.size());
+#endif
+ for (auto& os_pair : boundary_simplex_map) {
+ const Simplex_handle& simplex = os_pair.first;
+ const Eigen::VectorXd& point = os_pair.second;
+ Hasse_cell* new_cell = insert_cell(simplex, 0, true);
+ cell_point_map_.emplace(new_cell, point);
+ }
+ for (auto& os_pair : interior_simplex_map) {
+ const Simplex_handle& simplex = os_pair.first;
+ const Eigen::VectorXd& point = os_pair.second;
+ Hasse_cell* new_cell = insert_cell(simplex, 0, false);
+ cell_point_map_.emplace(new_cell, point);
+ }
+#ifdef DEBUG_TRACES
+ for (const auto& sc_pair : interior_simplex_cell_maps_[0])
+ cc_interior_summary_lists[0].push_back(CC_summary_info(sc_pair));
+ for (const auto& sc_pair : boundary_simplex_cell_maps_[0])
+ cc_boundary_summary_lists[0].push_back(CC_summary_info(sc_pair));
+#endif
+
+ for (std::size_t cell_d = 1;
+ cell_d < interior_simplex_cell_maps_.size() && !interior_simplex_cell_maps_[cell_d - 1].empty(); ++cell_d) {
+ expand_level(cell_d);
+
+#ifdef DEBUG_TRACES
+ for (const auto& sc_pair : interior_simplex_cell_maps_[cell_d])
+ cc_interior_summary_lists[cell_d].push_back(CC_summary_info(sc_pair));
+ if (cell_d < boundary_simplex_cell_maps_.size())
+ for (const auto& sc_pair : boundary_simplex_cell_maps_[cell_d])
+ cc_boundary_summary_lists[cell_d].push_back(CC_summary_info(sc_pair));
+#endif
+ }
+ }
+
+ public:
+ /**
+ * \brief Constructs the the cell complex that approximates an \f$m\f$-dimensional manifold
+ * without boundary embedded in the \f$ d \f$-dimensional Euclidean space
+ * from the output of the class Gudhi::Manifold_tracing.
+ *
+ * \param[in] out_simplex_map A map from simplices of dimension \f$(d-m)\f$
+ * in the ambient triangulation that intersect the relative interior of the manifold
+ * to the intersection points.
+ */
+ void construct_complex(const Out_simplex_map_& out_simplex_map) {
+ interior_simplex_cell_maps_.resize(intr_d_ + 1);
+ if (!out_simplex_map.empty()) cod_d_ = out_simplex_map.begin()->first.dimension();
+ construct_complex_(out_simplex_map);
+ }
+
+ /**
+ * \brief Constructs the skeleton of the cell complex that approximates
+ * an \f$m\f$-dimensional manifold without boundary embedded
+ * in the \f$d\f$-dimensional Euclidean space
+ * up to a limit dimension from the output of the class Gudhi::Manifold_tracing.
+ *
+ * \param[in] out_simplex_map A map from simplices of dimension \f$(d-m)\f$
+ * in the ambient triangulation that intersect the relative interior of the manifold
+ * to the intersection points.
+ * \param[in] limit_dimension The dimension of the constructed skeleton.
+ */
+ void construct_complex(const Out_simplex_map_& out_simplex_map, std::size_t limit_dimension) {
+ interior_simplex_cell_maps_.resize(limit_dimension + 1);
+ if (!out_simplex_map.empty()) cod_d_ = out_simplex_map.begin()->first.dimension();
+ construct_complex_(out_simplex_map);
+ }
+
+ /**
+ * \brief Constructs the the cell complex that approximates an \f$m\f$-dimensional manifold
+ * with boundary embedded in the \f$ d \f$-dimensional Euclidean space
+ * from the output of the class Gudhi::Manifold_tracing.
+ *
+ * \param[in] interior_simplex_map A map from simplices of dimension \f$(d-m)\f$
+ * in the ambient triangulation that intersect the relative interior of the manifold
+ * to the intersection points.
+ * \param[in] boundary_simplex_map A map from simplices of dimension \f$(d-m+1)\f$
+ * in the ambient triangulation that intersect the boundary of the manifold
+ * to the intersection points.
+ */
+ void construct_complex(const Out_simplex_map_& interior_simplex_map, const Out_simplex_map_& boundary_simplex_map) {
+ interior_simplex_cell_maps_.resize(intr_d_ + 1);
+ boundary_simplex_cell_maps_.resize(intr_d_);
+ if (!interior_simplex_map.empty()) cod_d_ = interior_simplex_map.begin()->first.dimension();
+ construct_complex_(interior_simplex_map, boundary_simplex_map);
+ }
+
+ /**
+ * \brief Constructs the skeleton of the cell complex that approximates
+ * an \f$m\f$-dimensional manifold with boundary embedded
+ * in the \f$d\f$-dimensional Euclidean space
+ * up to a limit dimension from the output of the class Gudhi::Manifold_tracing.
+ *
+ * \param[in] interior_simplex_map A map from simplices of dimension \f$(d-m)\f$
+ * in the ambient triangulation that intersect the relative interior of the manifold
+ * to the intersection points.
+ * \param[in] boundary_simplex_map A map from simplices of dimension \f$(d-m+1)\f$
+ * in the ambient triangulation that intersect the boundary of the manifold
+ * to the intersection points.
+ * \param[in] limit_dimension The dimension of the constructed skeleton.
+ */
+ void construct_complex(const Out_simplex_map_& interior_simplex_map, const Out_simplex_map_& boundary_simplex_map,
+ std::size_t limit_dimension) {
+ interior_simplex_cell_maps_.resize(limit_dimension + 1);
+ boundary_simplex_cell_maps_.resize(limit_dimension);
+ if (!interior_simplex_map.empty()) cod_d_ = interior_simplex_map.begin()->first.dimension();
+ construct_complex_(interior_simplex_map, boundary_simplex_map);
+ }
+
+ /**
+ * \brief Returns the dimension of the cell complex.
+ */
+ std::size_t intrinsic_dimension() const { return intr_d_; }
+
+ /**
+ * \brief Returns a vector of maps from the cells of various dimensions in the interior
+ * of the cell complex of type Gudhi::Hasse_cell to the permutahedral representations
+ * of the corresponding simplices in the ambient triangulation.
+ */
+ const Simplex_cell_maps& interior_simplex_cell_maps() const { return interior_simplex_cell_maps_; }
+
+ /**
+ * \brief Returns a vector of maps from the cells of various dimensions on the boundary
+ * of the cell complex of type Gudhi::Hasse_cell to the permutahedral representations
+ * of the corresponding simplices in the ambient triangulation.
+ */
+ const Simplex_cell_maps& boundary_simplex_cell_maps() const { return boundary_simplex_cell_maps_; }
+
+ /**
+ * \brief Returns a map from the cells of a given dimension in the interior
+ * of the cell complex of type Gudhi::Hasse_cell to the permutahedral representations
+ * of the corresponding simplices in the ambient triangulation.
+ *
+ * \param[in] cell_d The dimension of the cells.
+ */
+ const Simplex_cell_map& interior_simplex_cell_map(std::size_t cell_d) const {
+ return interior_simplex_cell_maps_[cell_d];
+ }
+
+ /**
+ * \brief Returns a map from the cells of a given dimension on the boundary
+ * of the cell complex of type Gudhi::Hasse_cell to the permutahedral representations
+ * of the corresponding simplices in the ambient triangulation.
+ *
+ * \param[in] cell_d The dimension of the cells.
+ */
+ const Simplex_cell_map& boundary_simplex_cell_map(std::size_t cell_d) const {
+ return boundary_simplex_cell_maps_[cell_d];
+ }
+
+ /**
+ * \brief Returns a map from the cells in the cell complex of type Gudhi::Hasse_cell
+ * to the permutahedral representations of the corresponding simplices in the
+ * ambient triangulation.
+ */
+ const Cell_simplex_map& cell_simplex_map() const { return cell_simplex_map_; }
+
+ /**
+ * \brief Returns a map from the vertex cells in the cell complex of type Gudhi::Hasse_cell
+ * to their Cartesian coordinates.
+ */
+ const Cell_point_map& cell_point_map() const { return cell_point_map_; }
+
+ /**
+ * \brief Constructor for the class Cell_complex.
+ *
+ * \param[in] intrinsic_dimension The dimension of the cell complex.
+ */
+ Cell_complex(std::size_t intrinsic_dimension) : intr_d_(intrinsic_dimension) {}
+
+ ~Cell_complex() {
+ for (Hasse_cell* hs_ptr : hasse_cells_) delete hs_ptr;
+ }
+
+ private:
+ std::size_t intr_d_, cod_d_;
+ Simplex_cell_maps interior_simplex_cell_maps_, boundary_simplex_cell_maps_;
+ Cell_simplex_map cell_simplex_map_;
+ Cell_point_map cell_point_map_;
+ std::vector<Hasse_cell*> hasse_cells_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Hasse_diagram_cell.h b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Hasse_diagram_cell.h
new file mode 100644
index 00000000..59e9a350
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Cell_complex/Hasse_diagram_cell.h
@@ -0,0 +1,285 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Pawel Dlotko
+ *
+ * Copyright (C) 2017 Swansea University UK
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef HASSE_DIAGRAM_CELL_H
+#define HASSE_DIAGRAM_CELL_H
+
+#include <vector>
+#include <utility> // for std::pair
+#include <ostream>
+#include <string>
+#include <type_traits> // for std::is_same
+#include <cstdlib> // for std::size_t
+
+namespace Gudhi {
+namespace Hasse_diagram {
+
+template <typename Cell_type>
+class Hasse_diagram;
+
+/**
+ * \class Hasse_diagram_cell
+ * \brief Data structure to store a cell in a Hasse diagram.
+ *
+ * \ingroup Hasse_diagram
+ *
+ * \details
+ * The use and interfaces of this Hasse diagram cell is limited to the \ref coxeter_triangulation implementation.
+ *
+ * This is a data structure to store a cell in a general Hasse diagram data structure. It stores the following
+ * information about the cell: References to boundary and coBoundary elements, dimension of a cell and its filtration.
+ * It also allow to store any additional information of a type Additional_information which is a template parameter of
+ * the class (set by default to void).
+ *
+ * The complex is a template class requiring the following parameters:
+ * Incidence_type_ - determine the type of incidence coefficients. Use integers in most general case.
+ * Filtration_type_ - type of filtration of cells.
+ * Additional_information_ (set by default to void) - allows to store any
+ * additional information in the cells of Hasse diagrams.
+ *
+ */
+template <typename Incidence_type_, typename Filtration_type_, typename Additional_information_ = void>
+class Hasse_diagram_cell {
+ public:
+ typedef Incidence_type_ Incidence_type;
+ typedef Filtration_type_ Filtration_type;
+ typedef Additional_information_ Additional_information;
+ using Cell_range = std::vector<std::pair<Hasse_diagram_cell*, Incidence_type> >;
+
+ /**
+ * Default constructor.
+ **/
+ Hasse_diagram_cell() : dimension(0), position(0), deleted_(false) {}
+
+ /**
+ * Constructor of a cell of dimension dim.
+ **/
+ Hasse_diagram_cell(int dim) : dimension(dim), position(0), deleted_(false) {}
+
+ /**
+ * Constructor of a cell of dimension dim.
+ **/
+ Hasse_diagram_cell(int dim, Filtration_type filt_)
+ : dimension(dim), position(0), deleted_(false), filtration(filt_) {}
+
+ /**
+ * Constructor of a cell of dimension dim with a given boundary.
+ **/
+ Hasse_diagram_cell(const Cell_range& boundary_, int dim)
+ : dimension(dim), boundary(boundary_), position(0), deleted_(false) {}
+
+ /**
+ * Constructor of a cell of dimension dim with a given boundary and coboundary.
+ **/
+ Hasse_diagram_cell(const Cell_range& boundary_, const Cell_range& coboundary_, int dim)
+ : dimension(dim), boundary(boundary_), coBoundary(coboundary_), position(0), deleted_(false) {}
+
+ /**
+ * Constructor of a cell of dimension dim with a given boundary, coboundary and
+ * additional information.
+ **/
+ Hasse_diagram_cell(const Cell_range& boundary_, const Cell_range& coboundary_, const Additional_information& ai,
+ int dim)
+ : dimension(dim),
+ boundary(boundary_),
+ coBoundary(coboundary_),
+ additional_info(ai),
+ position(0),
+ deleted_(false) {}
+
+ /**
+ * Construcor of a cell of dimension dim having given additional information.
+ **/
+ Hasse_diagram_cell(Additional_information ai, int dim)
+ : dimension(dim), additional_info(ai), position(0), deleted_(false) {}
+
+ /**
+ * Procedure to get the boundary of a fiven cell. The output format
+ * is a vector of pairs of pointers to boundary elements and incidence
+ * coefficients.
+ **/
+ inline Cell_range& get_boundary() { return this->boundary; }
+
+ /**
+ * Procedure to get the coboundary of a fiven cell. The output format
+ * is a vector of pairs of pointers to coboundary elements and incidence
+ * coefficients.
+ **/
+ inline Cell_range& get_coBoundary() { return this->coBoundary; }
+
+ /**
+ * Procedure to get the dimension of a cell.
+ **/
+ inline int& get_dimension() { return this->dimension; }
+
+ /**
+ * Procedure to get additional information about the cell.s
+ **/
+ inline Additional_information& get_additional_information() { return this->additional_info; }
+
+ /**
+ * Procedure to retrive position of the cell in the structure. It is used in
+ * the implementation of Hasse diagram and set by it. Note that removal of
+ * cell and subsequent call of clean_up_the_structure will change those
+ * positions.
+ **/
+ inline unsigned& get_position() { return this->position; }
+
+ /**
+ * Accessing the filtration of the cell.
+ **/
+ inline Filtration_type& get_filtration() {
+ // std::cout << "Accessing the filtration of a cell : " << *this << std::endl;
+ return this->filtration;
+ }
+
+ /**
+ * A procedure used to check if the cell is deleted. It is used by the
+ * subsequent implementation of Hasse diagram that is absed on lazy
+ * delete.
+ **/
+ inline bool deleted() { return this->deleted_; }
+
+ template <typename Cell_type>
+ friend class Hasse_diagram;
+
+ template <typename Cell_type>
+ friend class is_before_in_filtration;
+
+ template <typename Complex_type, typename Cell_type>
+ friend std::vector<Cell_type*> convert_to_vector_of_Cell_type(Complex_type& cmplx);
+
+ /**
+ * Procedure to remove deleted boundary and coboundary elements from the
+ * vectors of boundary and coboundary elements of this cell.
+ **/
+ void remove_deleted_elements_from_boundary_and_coboundary() {
+ Cell_range new_boundary;
+ new_boundary.reserve(this->boundary.size());
+ for (std::size_t bd = 0; bd != this->boundary.size(); ++bd) {
+ if (!this->boundary[bd].first->deleted()) {
+ new_boundary.push_back(this->boundary[bd]);
+ }
+ }
+ this->boundary.swap(new_boundary);
+
+ Cell_range new_coBoundary;
+ new_coBoundary.reserve(this->coBoundary.size());
+ for (std::size_t cbd = 0; cbd != this->coBoundary.size(); ++cbd) {
+ if (!this->coBoundary[cbd].first->deleted()) {
+ new_coBoundary.push_back(this->coBoundary[cbd]);
+ }
+ }
+ this->coBoundary.swap(new_coBoundary);
+ }
+
+ /**
+ * Writing to a stream operator.
+ **/
+ friend std::ostream& operator<<(
+ std::ostream& out, const Hasse_diagram_cell<Incidence_type, Filtration_type, Additional_information>& c) {
+ // cout << "position : " << c.position << ", dimension : " << c.dimension << ", filtration: " << c.filtration << ",
+ // size of boudary : " << c.boundary.size() << "\n";
+ out << c.position << " " << c.dimension << " " << c.filtration << std::endl;
+ for (std::size_t bd = 0; bd != c.boundary.size(); ++bd) {
+ // do not write out the cells that has been deleted
+ if (c.boundary[bd].first->deleted()) continue;
+ out << c.boundary[bd].first->position << " " << c.boundary[bd].second << " ";
+ }
+ out << std::endl;
+ return out;
+ }
+
+ /**
+ * Procedure that return vector of pointers to boundary elements of a given cell.
+ **/
+ inline std::vector<Hasse_diagram_cell*> get_list_of_boundary_elements() {
+ std::vector<Hasse_diagram_cell*> result;
+ std::size_t size_of_boundary = this->boundary.size();
+ result.reserve(size_of_boundary);
+ for (std::size_t bd = 0; bd != size_of_boundary; ++bd) {
+ result.push_back(this->boundary[bd].first);
+ }
+ return result;
+ }
+
+ /**
+ * Procedure that return vector of positios of boundary elements of a given cell.
+ **/
+ inline std::vector<unsigned> get_list_of_positions_of_boundary_elements() {
+ std::vector<unsigned> result;
+ std::size_t size_of_boundary = this->boundary.size();
+ result.reserve(size_of_boundary);
+ for (std::size_t bd = 0; bd != size_of_boundary; ++bd) {
+ result.push_back(this->boundary[bd].first->position);
+ }
+ return result;
+ }
+
+ /**
+ * Function that display a string being a signature of a structure.
+ * Used mainly for debugging purposes.
+ **/
+ std::string full_signature_of_the_structure() {
+ std::string result;
+ result += "dimension: ";
+ result += std::to_string(this->dimension);
+ result += " filtration: ";
+ result += std::to_string(this->filtration);
+ result += " position: ";
+ result += std::to_string(this->position);
+ result += " deleted_: ";
+ result += std::to_string(this->deleted_);
+
+ // if the Additional_information is not void, add them to
+ // the signature as well.
+ if (std::is_same<Additional_information, void>::value) {
+ result += " Additional_information: ";
+ result += std::to_string(this->additional_info);
+ }
+ result += " boundary ";
+ for (std::size_t bd = 0; bd != this->boundary.size(); ++bd) {
+ result += "( " + std::to_string(this->boundary[bd].first->position);
+ result += " " + std::to_string(this->boundary[bd].second);
+ result += ") ";
+ }
+
+ result += " coBoundary ";
+ for (std::size_t cbd = 0; cbd != this->coBoundary.size(); ++cbd) {
+ result += "( " + std::to_string(this->coBoundary[cbd].first->position);
+ result += " " + std::to_string(this->coBoundary[cbd].second);
+ result += ") ";
+ }
+
+ return result;
+ }
+
+ protected:
+ Cell_range boundary;
+ Cell_range coBoundary;
+ int dimension;
+ Additional_information additional_info;
+ unsigned position;
+ bool deleted_;
+ Filtration_type filtration;
+
+ /**
+ * A procedure to delete a cell. It is a private function of the Hasse_diagram_cell
+ * class, since in the Hasse_diagram class I want to have a control
+ * of removal of cells. Therefore, to remove cell please use
+ * remove_cell in the Hasse_diagram structure.
+ **/
+ void delete_cell() { this->deleted_ = true; }
+}; // Hasse_diagram_cell
+
+} // namespace Hasse_diagram
+} // namespace Gudhi
+
+#endif // CELL_H
diff --git a/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Query_result.h b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Query_result.h
new file mode 100644
index 00000000..5543c2fb
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Coxeter_triangulation/Query_result.h
@@ -0,0 +1,40 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef QUERY_RESULT_H_
+#define QUERY_RESULT_H_
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \class Query_result
+ * \brief The result of a query by an oracle such as Implicit_manifold_intersection_oracle.
+ *
+ * \tparam Simplex_handle The class of the query simplex.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Simplex_handle>
+struct Query_result {
+ /** \brief The potentially lower-dimensional face of the query simplex
+ * that contains the intersection point. OBSOLETE: as the snapping is removed. */
+ // Simplex_handle face;
+ /** \brief The intersection point. */
+ Eigen::VectorXd intersection;
+ /** \brief True if the query simplex intersects the manifold. */
+ bool success;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Freudenthal_triangulation.h b/src/Coxeter_triangulation/include/gudhi/Freudenthal_triangulation.h
new file mode 100644
index 00000000..873c5c9b
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Freudenthal_triangulation.h
@@ -0,0 +1,219 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FREUDENTHAL_TRIANGULATION_H_
+#define FREUDENTHAL_TRIANGULATION_H_
+
+#include <vector>
+#include <algorithm> // for std::sort
+#include <cmath> // for std::floor
+#include <numeric> // for std::iota
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Eigenvalues>
+#include <Eigen/SVD>
+
+#include <gudhi/Permutahedral_representation.h>
+#include <gudhi/Debug_utils.h> // for GUDHI_CHECK
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Freudenthal_triangulation
+ * \brief A class that stores any affine transformation of the Freudenthal-Kuhn
+ * triangulation.
+ *
+ * \ingroup coxeter_triangulation
+ *
+ * \details The data structure is a record that consists of a matrix
+ * that represents the linear transformation of the Freudenthal-Kuhn triangulation
+ * and a vector that represents the offset.
+ *
+ * \tparam Permutahedral_representation_ Type of a simplex given by a permutahedral representation.
+ * Needs to be a model of SimplexInCoxeterTriangulation.
+ */
+template <class Permutahedral_representation_ =
+ Permutahedral_representation<std::vector<int>, std::vector<std::vector<std::size_t> > > >
+class Freudenthal_triangulation {
+ using Matrix = Eigen::MatrixXd;
+ using Vector = Eigen::VectorXd;
+
+ public:
+ /** \brief Type of the simplices in the triangulation. */
+ using Simplex_handle = Permutahedral_representation_;
+
+ /** \brief Type of the vertices in the triangulation. */
+ using Vertex_handle = typename Permutahedral_representation_::Vertex;
+
+ /** \brief Constructor of the Freudenthal-Kuhn triangulation of a given dimension.
+ * @param[in] dimension The dimension of the triangulation.
+ */
+ Freudenthal_triangulation(std::size_t dimension)
+ : Freudenthal_triangulation(dimension, Matrix::Identity(dimension, dimension), Vector::Zero(dimension)) {
+ is_freudenthal_ = true;
+ }
+
+ /** \brief Constructor of the Freudenthal-Kuhn triangulation of a given dimension under
+ * a linear transformation by a given matrix.
+ * @param[in] dimension The dimension of the triangulation.
+ * @param[in] matrix The matrix that defines the linear transformation.
+ * Needs to be invertible.
+ */
+ Freudenthal_triangulation(std::size_t dimension, const Matrix& matrix)
+ : Freudenthal_triangulation(dimension, matrix, Vector::Zero(dimension)) {}
+
+ /** \brief Constructor of the Freudenthal-Kuhn triangulation of a given dimension under
+ * an affine transformation by a given matrix and a translation vector.
+ * @param[in] dimension The dimension of the triangulation.
+ * @param[in] matrix The matrix that defines the linear transformation.
+ * Needs to be invertible.
+ * @param[in] offset The offset vector.
+ *
+ * @exception std::invalid_argument In debug mode, if offset size is different from dimension.
+ */
+ Freudenthal_triangulation(unsigned dimension, const Matrix& matrix, const Vector& offset)
+ : dimension_(dimension),
+ matrix_(matrix),
+ offset_(offset),
+ colpivhouseholderqr_(matrix_.colPivHouseholderQr()),
+ is_freudenthal_(false) {
+ GUDHI_CHECK(dimension == offset_.size(), std::invalid_argument("Offset must be of size 'dimension'"));
+ }
+
+ /** \brief Dimension of the triangulation. */
+ unsigned dimension() const { return dimension_; }
+
+ /** \brief Matrix that defines the linear transformation of the triangulation. */
+ const Matrix& matrix() const { return matrix_; }
+
+ /** \brief Vector that defines the offset of the triangulation. */
+ const Vector& offset() const { return offset_; }
+
+ /** \brief Change the linear transformation matrix to a given value.
+ * @param[in] matrix New value of the linear transformation matrix.
+ */
+ void change_matrix(const Eigen::MatrixXd& matrix) {
+ matrix_ = matrix;
+ colpivhouseholderqr_ = matrix.colPivHouseholderQr();
+ is_freudenthal_ = false;
+ }
+
+ /** \brief Change the offset vector to a given value.
+ * @param[in] offset New value of the offset vector.
+ */
+ void change_offset(const Eigen::VectorXd& offset) {
+ offset_ = offset;
+ is_freudenthal_ = false;
+ }
+
+ /** \brief Returns the permutahedral representation of the simplex in the
+ * triangulation that contains a given query point.
+ * \details Using the additional parameter scale, the search can be done in a
+ * triangulation that shares the origin, but is scaled by a given factor.
+ * This parameter can be useful to simulate the point location in a subdivided
+ * triangulation.
+ * The returned simplex is always minimal by inclusion.
+ *
+ * \tparam Point_d A class that represents a point in d-dimensional Euclidean space.
+ * The coordinates should be random-accessible. Needs to provide the method size().
+ *
+ * @param[in] point The query point.
+ * @param[in] scale The scale of the triangulation.
+ *
+ * @exception std::invalid_argument In debug mode, if point dimension is different from triangulation one.
+ */
+ template <class Point_d>
+ Simplex_handle locate_point(const Point_d& point, double scale = 1) const {
+ using Ordered_set_partition = typename Simplex_handle::OrderedSetPartition;
+ using Part = typename Ordered_set_partition::value_type;
+ unsigned d = point.size();
+ GUDHI_CHECK(d == dimension_,
+ std::invalid_argument("The point must be of the same dimension as the triangulation"));
+ double error = 1e-9;
+ Simplex_handle output;
+ std::vector<double> z;
+ if (is_freudenthal_) {
+ for (std::size_t i = 0; i < d; i++) {
+ double x_i = scale * point[i];
+ int y_i = std::floor(x_i);
+ output.vertex().push_back(y_i);
+ z.push_back(x_i - y_i);
+ }
+ } else {
+ Eigen::VectorXd p_vect(d);
+ for (std::size_t i = 0; i < d; i++) p_vect(i) = point[i];
+ Eigen::VectorXd x_vect = colpivhouseholderqr_.solve(p_vect - offset_);
+ for (std::size_t i = 0; i < d; i++) {
+ double x_i = scale * x_vect(i);
+ int y_i = std::floor(x_i);
+ output.vertex().push_back(y_i);
+ z.push_back(x_i - y_i);
+ }
+ }
+ z.push_back(0);
+ Part indices(d + 1);
+ std::iota(indices.begin(), indices.end(), 0);
+ std::sort(indices.begin(), indices.end(), [&z](std::size_t i1, std::size_t i2) { return z[i1] > z[i2]; });
+
+ output.partition().push_back(Part(1, indices[0]));
+ for (std::size_t i = 1; i <= d; ++i)
+ if (z[indices[i - 1]] > z[indices[i]] + error)
+ output.partition().push_back(Part(1, indices[i]));
+ else
+ output.partition().back().push_back(indices[i]);
+ return output;
+ }
+
+ /** \brief Returns the Cartesian coordinates of the given vertex.
+ * \details Using the additional parameter scale, the search can be done in a
+ * triangulation that shares the origin, but is scaled by a given factor.
+ * This parameter can be useful to simulate the computation of Cartesian coordinates
+ * of a vertex in a subdivided triangulation.
+ * @param[in] vertex The query vertex.
+ * @param[in] scale The scale of the triangulation.
+ */
+ Eigen::VectorXd cartesian_coordinates(const Vertex_handle& vertex, double scale = 1) const {
+ Eigen::VectorXd v_vect(dimension_);
+ for (std::size_t j = 0; j < dimension_; j++) v_vect(j) = vertex[j] / scale;
+ return matrix_ * v_vect + offset_;
+ }
+
+ /** \brief Returns the Cartesian coordinates of the barycenter of a given simplex.
+ * \details Using the additional parameter scale, the search can be done in a
+ * triangulation that shares the origin, but is scaled by a given factor.
+ * This parameter can be useful to simulate the computation of Cartesian coordinates
+ * of the barycenter of a simplex in a subdivided triangulation.
+ * @param[in] simplex The query simplex.
+ * @param[in] scale The scale of the triangulation.
+ */
+ Eigen::VectorXd barycenter(const Simplex_handle& simplex, double scale = 1) const {
+ Eigen::VectorXd res_vector(dimension_);
+ res_vector.setZero(dimension_, 1);
+ for (auto v : simplex.vertex_range()) {
+ res_vector += cartesian_coordinates(v, scale);
+ }
+ return (1. / (simplex.dimension() + 1)) * res_vector;
+ }
+
+ protected:
+ unsigned dimension_;
+ Matrix matrix_;
+ Vector offset_;
+ Eigen::ColPivHouseholderQR<Matrix> colpivhouseholderqr_;
+ bool is_freudenthal_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Cartesian_product.h b/src/Coxeter_triangulation/include/gudhi/Functions/Cartesian_product.h
new file mode 100644
index 00000000..0533bb83
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Cartesian_product.h
@@ -0,0 +1,157 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_CARTESIAN_PRODUCT_H_
+#define FUNCTIONS_CARTESIAN_PRODUCT_H_
+
+#include <cstdlib>
+#include <tuple>
+#include <type_traits> // for std::enable_if
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/* Get the domain dimension of the tuple of functions.
+ */
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I == sizeof...(T), std::size_t>::type get_amb_d(const std::tuple<T...>& tuple) {
+ return 0;
+}
+
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I != sizeof...(T), std::size_t>::type get_amb_d(const std::tuple<T...>& tuple) {
+ return std::get<I>(tuple).amb_d() + get_amb_d<I + 1, T...>(tuple);
+}
+
+/* Get the codomain dimension of the tuple of functions.
+ */
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I == sizeof...(T), std::size_t>::type get_cod_d(const std::tuple<T...>& tuple) {
+ return 0;
+}
+
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I != sizeof...(T), std::size_t>::type get_cod_d(const std::tuple<T...>& tuple) {
+ return std::get<I>(tuple).cod_d() + get_cod_d<I + 1, T...>(tuple);
+}
+
+/* Get the seed of the tuple of functions.
+ */
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I == sizeof...(T), void>::type get_seed(const std::tuple<T...>& tuple,
+ Eigen::VectorXd& point, std::size_t i = 0) {}
+
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I != sizeof...(T), void>::type get_seed(const std::tuple<T...>& tuple,
+ Eigen::VectorXd& point, std::size_t i = 0) {
+ const auto& f = std::get<I>(tuple);
+ std::size_t n = f.amb_d();
+ Eigen::VectorXd seed = f.seed();
+ for (std::size_t j = 0; j < n; ++j) point(i + j) = seed(j);
+ get_seed<I + 1, T...>(tuple, point, i + n);
+}
+
+/* Get the seed of the tuple of functions.
+ */
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I == sizeof...(T), void>::type get_value(const std::tuple<T...>& tuple,
+ const Eigen::VectorXd& x,
+ Eigen::VectorXd& point, std::size_t i = 0,
+ std::size_t j = 0) {}
+
+template <std::size_t I = 0, typename... T>
+inline typename std::enable_if<I != sizeof...(T), void>::type get_value(const std::tuple<T...>& tuple,
+ const Eigen::VectorXd& x,
+ Eigen::VectorXd& point, std::size_t i = 0,
+ std::size_t j = 0) {
+ const auto& f = std::get<I>(tuple);
+ std::size_t n = f.amb_d();
+ std::size_t k = f.cod_d();
+ Eigen::VectorXd x_i(n);
+ for (std::size_t l = 0; l < n; ++l) x_i(l) = x(i + l);
+ Eigen::VectorXd res = f(x_i);
+ for (std::size_t l = 0; l < k; ++l) point(j + l) = res(l);
+ get_value<I + 1, T...>(tuple, x, point, i + n, j + k);
+}
+
+/**
+ * \class Cartesian_product
+ * \brief Constructs the function the zero-set of which is the Cartesian product
+ * of the zero-sets of some given functions.
+ *
+ * \tparam Functions A pack template parameter for functions. All functions should be models of
+ * the concept FunctionForImplicitManifold.
+ */
+template <class... Functions>
+struct Cartesian_product {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd result(cod_d_);
+ get_value(function_tuple_, p, result, 0, 0);
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return amb_d_; }
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return cod_d_; }
+
+ /** \brief Returns a point on the zero-set. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result(amb_d_);
+ get_seed(function_tuple_, result, 0);
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the Cartesian product function.
+ *
+ * @param[in] functions The functions the zero-sets of which are factors in the
+ * Cartesian product of the resulting function.
+ */
+ Cartesian_product(const Functions&... functions) : function_tuple_(std::make_tuple(functions...)) {
+ amb_d_ = get_amb_d(function_tuple_);
+ cod_d_ = get_cod_d(function_tuple_);
+ }
+
+ private:
+ std::tuple<Functions...> function_tuple_;
+ std::size_t amb_d_, cod_d_;
+};
+
+/**
+ * \brief Static constructor of a Cartesian product function.
+ *
+ * @param[in] functions The functions the zero-sets of which are factors in the
+ * Cartesian product of the resulting function.
+ *
+ * \tparam Functions A pack template parameter for functions. All functions should be models of
+ * the concept FunctionForImplicitManifold.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <typename... Functions>
+Cartesian_product<Functions...> make_product_function(const Functions&... functions) {
+ return Cartesian_product<Functions...>(functions...);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Constant_function.h b/src/Coxeter_triangulation/include/gudhi/Functions/Constant_function.h
new file mode 100644
index 00000000..0603afd8
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Constant_function.h
@@ -0,0 +1,64 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_CONSTANT_FUNCTION_H_
+#define FUNCTIONS_CONSTANT_FUNCTION_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Constant_function
+ * \brief A class that encodes a constant function from R^d to R^k.
+ * This class does not have any implicit manifold in correspondence.
+ */
+struct Constant_function {
+ /** \brief Value of the function at a specified point. The value is constant.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ return value_;
+ }
+
+ /** \brief Returns the domain dimension. Same as the ambient dimension of the sphere. */
+ std::size_t amb_d() const { return d_; };
+
+ /** \brief Returns the codomain dimension. Same as the codimension of the sphere. */
+ std::size_t cod_d() const { return k_; };
+
+ /** \brief No seed point is available. Throws an exception on evocation. */
+ Eigen::VectorXd seed() const { throw "Seed invoked on a constant function.\n"; }
+
+ Constant_function() {}
+
+ /**
+ * \brief Constructor of a constant function from R^d to R^m.
+ *
+ * @param[in] d The domain dimension.
+ * @param[in] k The codomain dimension.
+ * @param[in] value The constant value of the function.
+ */
+ Constant_function(std::size_t d, std::size_t k, const Eigen::VectorXd& value) : d_(d), k_(k), value_(value) {}
+
+ private:
+ std::size_t d_, k_;
+ Eigen::VectorXd value_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Embed_in_Rd.h b/src/Coxeter_triangulation/include/gudhi/Functions/Embed_in_Rd.h
new file mode 100644
index 00000000..e1fe868f
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Embed_in_Rd.h
@@ -0,0 +1,93 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_EMBED_IN_RD_H_
+#define FUNCTIONS_EMBED_IN_RD_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Embed_in_Rd
+ * \brief Embedding of an implicit manifold in a higher dimension.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ */
+template <class Function_>
+struct Embed_in_Rd {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd x = p;
+ Eigen::VectorXd x_k(fun_.amb_d()), x_rest(d_ - fun_.amb_d());
+ for (std::size_t i = 0; i < fun_.amb_d(); ++i) x_k(i) = x(i);
+ for (std::size_t i = fun_.amb_d(); i < d_; ++i) x_rest(i - fun_.amb_d()) = x(i);
+ Eigen::VectorXd result = fun_(x_k);
+ result.conservativeResize(this->cod_d());
+ for (std::size_t i = fun_.cod_d(); i < this->cod_d(); ++i) result(i) = x_rest(i - fun_.cod_d());
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return d_; }
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return d_ - (fun_.amb_d() - fun_.cod_d()); }
+
+ /** \brief Returns a point on the zero-set of the embedded function. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = fun_.seed();
+ result.conservativeResize(d_);
+ for (std::size_t l = fun_.amb_d(); l < d_; ++l) result(l) = 0;
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the embedding function.
+ *
+ * @param[in] function The function to be embedded in higher dimension.
+ * @param[in] d Embedding dimension.
+ */
+ Embed_in_Rd(const Function_& function, std::size_t d) : fun_(function), d_(d) {}
+
+ private:
+ Function_ fun_;
+ std::size_t d_;
+};
+
+/**
+ * \brief Static constructor of an embedding function.
+ *
+ * @param[in] function The function to be embedded in higher dimension.
+ * @param[in] d Embedding dimension.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_>
+Embed_in_Rd<Function_> make_embedding(const Function_& function, std::size_t d) {
+ return Embed_in_Rd<Function_>(function, d);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_Sm_in_Rd.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_Sm_in_Rd.h
new file mode 100644
index 00000000..8911f990
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_Sm_in_Rd.h
@@ -0,0 +1,110 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_SM_IN_RD_H_
+#define FUNCTIONS_FUNCTION_SM_IN_RD_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_Sm_in_Rd
+ * \brief A class for the function that defines an m-dimensional implicit sphere embedded
+ * in the d-dimensional Euclidean space.
+ */
+struct Function_Sm_in_Rd {
+ /** \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd x = p;
+ for (std::size_t i = 0; i < d_; ++i) x(i) -= center_[i];
+ Eigen::VectorXd result = Eigen::VectorXd::Zero(k_);
+ for (std::size_t i = 0; i < m_ + 1; ++i) result(0) += x(i) * x(i);
+ result(0) -= r_ * r_;
+ for (std::size_t j = 1; j < k_; ++j) result(j) = x(m_ + j);
+ return result;
+ }
+
+ /** \brief Returns the domain dimension. Same as the ambient dimension of the sphere. */
+ std::size_t amb_d() const { return d_; };
+
+ /** \brief Returns the codomain dimension. Same as the codimension of the sphere. */
+ std::size_t cod_d() const { return k_; };
+
+ /** \brief Returns a point on the sphere. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = Eigen::VectorXd::Zero(d_);
+ result(0) += r_;
+ for (std::size_t i = 0; i < d_; ++i) result(i) += center_[i];
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the function that defines an m-dimensional implicit sphere embedded
+ * in the d-dimensional Euclidean space.
+ *
+ * @param[in] r The radius of the sphere.
+ * @param[in] m The dimension of the sphere.
+ * @param[in] d The ambient dimension of the sphere.
+ * @param[in] center The center of the sphere.
+ */
+ Function_Sm_in_Rd(double r, std::size_t m, std::size_t d, Eigen::VectorXd center)
+ : m_(m), k_(d - m), d_(d), r_(r), center_(center) {}
+
+ /**
+ * \brief Constructor of the function that defines an m-dimensional implicit sphere embedded
+ * in the d-dimensional Euclidean space centered at the origin.
+ *
+ * @param[in] r The radius of the sphere.
+ * @param[in] m The dimension of the sphere.
+ * @param[in] d The ambient dimension of the sphere.
+ */
+ Function_Sm_in_Rd(double r, std::size_t m, std::size_t d)
+ : m_(m), k_(d - m), d_(d), r_(r), center_(Eigen::VectorXd::Zero(d_)) {}
+
+ /**
+ * \brief Constructor of the function that defines an m-dimensional implicit sphere embedded
+ * in the (m+1)-dimensional Euclidean space.
+ *
+ * @param[in] r The radius of the sphere.
+ * @param[in] m The dimension of the sphere.
+ * @param[in] center The center of the sphere.
+ */
+ Function_Sm_in_Rd(double r, std::size_t m, Eigen::VectorXd center)
+ : m_(m), k_(1), d_(m_ + 1), r_(r), center_(center) {}
+
+ /**
+ * \brief Constructor of the function that defines an m-dimensional implicit sphere embedded
+ * in the (m+1)-dimensional Euclidean space centered at the origin.
+ *
+ * @param[in] r The radius of the sphere.
+ * @param[in] m The dimension of the sphere.
+ */
+ Function_Sm_in_Rd(double r, std::size_t m) : m_(m), k_(1), d_(m_ + 1), r_(r), center_(Eigen::VectorXd::Zero(d_)) {}
+
+ Function_Sm_in_Rd(const Function_Sm_in_Rd& rhs) : Function_Sm_in_Rd(rhs.r_, rhs.m_, rhs.d_, rhs.center_) {}
+
+ private:
+ std::size_t m_, k_, d_;
+ double r_;
+ Eigen::VectorXd center_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h
new file mode 100644
index 00000000..b29f0906
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h
@@ -0,0 +1,91 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_AFFINE_PLANE_IN_RD_H_
+#define FUNCTIONS_FUNCTION_AFFINE_PLANE_IN_RD_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_affine_plane_in_Rd
+ * \brief A class for the function that defines an m-dimensional implicit affine plane
+ * embedded in d-dimensional Euclidean space.
+ */
+struct Function_affine_plane_in_Rd {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd result = normal_matrix_.transpose() * (p - off_);
+ return result;
+ }
+
+ /** \brief Returns the domain dimension. Same as the ambient dimension of the sphere. */
+ std::size_t amb_d() const { return d_; };
+
+ /** \brief Returns the codomain dimension. Same as the codimension of the sphere. */
+ std::size_t cod_d() const { return k_; };
+
+ /** \brief Returns a point on the affine plane. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = off_;
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the function that defines an m-dimensional implicit affine
+ * plane in the d-dimensional Euclidean space.
+ *
+ * @param[in] normal_matrix A normal matrix of the affine plane. The number of rows should
+ * correspond to the ambient dimension, the number of columns should corespond to
+ * the size of the normal basis (codimension).
+ * @param[in] offset The offset vector of the affine plane.
+ * The dimension of the vector should be the ambient dimension of the manifold.
+ */
+ Function_affine_plane_in_Rd(const Eigen::MatrixXd& normal_matrix, const Eigen::VectorXd& offset)
+ : normal_matrix_(normal_matrix), d_(normal_matrix.rows()), k_(normal_matrix.cols()), m_(d_ - k_), off_(offset) {
+ normal_matrix_.colwise().normalize();
+ }
+
+ /**
+ * \brief Constructor of the function that defines an m-dimensional implicit affine
+ * plane in the d-dimensional Euclidean space that passes through origin.
+ *
+ * @param[in] normal_matrix A normal matrix of the affine plane. The number of rows should
+ * correspond to the ambient dimension, the number of columns should corespond to
+ * the size of the normal basis (codimension).
+ */
+ Function_affine_plane_in_Rd(const Eigen::MatrixXd& normal_matrix)
+ : normal_matrix_(normal_matrix),
+ d_(normal_matrix.rows()),
+ k_(normal_matrix.cols()),
+ m_(d_ - k_),
+ off_(Eigen::VectorXd::Zero(d_)) {
+ normal_matrix_.colwise().normalize();
+ }
+
+ private:
+ Eigen::MatrixXd normal_matrix_;
+ std::size_t d_, k_, m_;
+ Eigen::VectorXd off_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_chair_in_R3.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_chair_in_R3.h
new file mode 100644
index 00000000..620446da
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_chair_in_R3.h
@@ -0,0 +1,80 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_CHAIR_IN_R3_H_
+#define FUNCTIONS_FUNCTION_CHAIR_IN_R3_H_
+
+#include <cstdlib> // for std::size_t
+#include <cmath> // for std::pow
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_chair_in_R3
+ * \brief A class that encodes the function, the zero-set of which is a so-called
+ * "chair" surface embedded in R^3.
+ */
+struct Function_chair_in_R3 {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ double x = p(0) - off_[0], y = p(1) - off_[1], z = p(2) - off_[2];
+ Eigen::VectorXd result(cod_d());
+ result(0) = std::pow(x * x + y * y + z * z - a_ * k_ * k_, 2) -
+ b_ * ((z - k_) * (z - k_) - 2 * x * x) * ((z + k_) * (z + k_) - 2 * y * y);
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return 3; }
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return 1; }
+
+ /** \brief Returns a point on the surface. */
+ Eigen::VectorXd seed() const {
+ double t1 = a_ - b_;
+ double discr = t1 * t1 - (1.0 - b_) * (a_ * a_ - b_);
+ double z0 = k_ * std::sqrt((t1 + std::sqrt(discr)) / (1 - b_));
+ Eigen::Vector3d result(off_[0], off_[1], z0 + off_[2]);
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the function that defines the 'chair' surface
+ * embedded in R^3.
+ *
+ * @param[in] a A numerical parameter.
+ * @param[in] b A numerical parameter.
+ * @param[in] k A numerical parameter.
+ * @param[in] off Offset vector.
+ */
+ Function_chair_in_R3(double a = 0.8, double b = 0.4, double k = 1.0, Eigen::Vector3d off = Eigen::Vector3d::Zero())
+ : a_(a), b_(b), k_(k), off_(off) {}
+
+ protected:
+ double a_, b_, k_;
+ Eigen::Vector3d off_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
+
+// (x^2 + y^2 + z^2 - a*k^2)^2 - b*((z-k)^2 - 2*x^2)*((z+k)^2 - 2*y^2)
+// sqrt(k/(1-b))*sqrt(a-b + sqrt((a-b)^2 - (1-b)*(a^2 - b)*k^2))
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_iron_in_R3.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_iron_in_R3.h
new file mode 100644
index 00000000..f73c4280
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_iron_in_R3.h
@@ -0,0 +1,69 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_IRON_IN_R3_H_
+#define FUNCTIONS_FUNCTION_IRON_IN_R3_H_
+
+#include <cstdlib> // for std::size_t
+#include <cmath> // for std::pow
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_iron_in_R3
+ * \brief A class that encodes the function, the zero-set of which is a surface
+ * embedded in R^3 that ressembles an iron.
+ */
+struct Function_iron_in_R3 {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ double x = p(0), y = p(1), z = p(2);
+ Eigen::VectorXd result(cod_d());
+ result(0) = -std::pow(x, 6) / 300. - std::pow(y, 6) / 300. - std::pow(z, 6) / 300. + x * y * y * z / 2.1 + y * y +
+ std::pow(z - 2, 4) - 1;
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return 3; };
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return 1; };
+
+ /** \brief Returns a point on the surface. */
+ Eigen::VectorXd seed() const {
+ Eigen::Vector3d result(std::pow(4500, 1. / 6), 0, 0);
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the function that defines a surface embedded in R^3
+ * that ressembles an iron.
+ *
+ * @param[in] off Offset vector.
+ */
+ Function_iron_in_R3(Eigen::Vector3d off = Eigen::Vector3d::Zero()) : off_(off) {}
+
+ private:
+ Eigen::Vector3d off_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_lemniscate_revolution_in_R3.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_lemniscate_revolution_in_R3.h
new file mode 100644
index 00000000..beb41e00
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_lemniscate_revolution_in_R3.h
@@ -0,0 +1,85 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_LEMNISCATE_REVOLUTION_IN_R3_H_
+#define FUNCTIONS_FUNCTION_LEMNISCATE_REVOLUTION_IN_R3_H_
+
+#include <cstdlib> // for std::size_t
+#include <cmath> // for std::sqrt
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_lemniscate_revolution_in_R3
+ * \brief A class that encodes the function, the zero-set of which is a surface of revolution
+ * around the x axis based on the lemniscate of Bernoulli embedded in R^3.
+ */
+struct Function_lemniscate_revolution_in_R3 {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ double x = p(0) - off_[0], y = p(1) - off_[1], z = p(2) - off_[2];
+ Eigen::VectorXd result(cod_d());
+ double x2 = x * x, y2 = y * y, z2 = z * z, a2 = a_ * a_;
+ double t1 = x2 + y2 + z2;
+ result(0) = t1 * t1 - 2 * a2 * (x2 - y2 - z2);
+ return result;
+ }
+
+ /** \brief Returns the (ambient) domain dimension.*/
+ std::size_t amb_d() const { return 3; };
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return 1; };
+
+ /** \brief Returns a point on the surface. This seed point is only one of
+ * two necessary seed points for the manifold tracing algorithm.
+ * See the method seed2() for the other point.
+ */
+ Eigen::VectorXd seed() const {
+ Eigen::Vector3d result(std::sqrt(2 * a_) + off_[0], off_[1], off_[2]);
+ return result;
+ }
+
+ /** \brief Returns a point on the surface. This seed point is only one of
+ * two necessary seed points for the manifold tracing algorithm.
+ * See the method seed() for the other point.
+ */
+ Eigen::VectorXd seed2() const {
+ Eigen::Vector3d result(-std::sqrt(2 * a_) + off_[0], off_[1], off_[2]);
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the function that defines a surface of revolution
+ * around the x axis based on the lemniscate of Bernoulli embedded in R^3.
+ *
+ * @param[in] a A numerical parameter.
+ * @param[in] off Offset vector.
+ */
+ Function_lemniscate_revolution_in_R3(double a = 1, Eigen::Vector3d off = Eigen::Vector3d::Zero())
+ : a_(a), off_(off) {}
+
+ private:
+ double a_;
+ Eigen::Vector3d off_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h
new file mode 100644
index 00000000..11b379f3
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h
@@ -0,0 +1,79 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_MOMENT_CURVE_IN_RD_H_
+#define FUNCTIONS_FUNCTION_MOMENT_CURVE_IN_RD_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_moment_curve_in_Rd
+ * \brief A class for the function that defines an implicit moment curve
+ * in the d-dimensional Euclidean space.
+ */
+struct Function_moment_curve_in_Rd {
+ /** \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd result(k_);
+ for (std::size_t i = 1; i < d_; ++i) result(i - 1) = p(i) - p(0) * p(i - 1);
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension.. */
+ std::size_t amb_d() const { return d_; };
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return k_; };
+
+ /** \brief Returns a point on the moment curve. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = Eigen::VectorXd::Zero(d_);
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the function that defines an implicit moment curve
+ * in the d-dimensional Euclidean space.
+ *
+ * @param[in] r Numerical parameter.
+ * @param[in] d The ambient dimension.
+ */
+ Function_moment_curve_in_Rd(double r, std::size_t d) : m_(1), k_(d - 1), d_(d), r_(r) {}
+
+ /**
+ * \brief Constructor of the function that defines an implicit moment curve
+ * in the d-dimensional Euclidean space.
+ *
+ * @param[in] r Numerical parameter.
+ * @param[in] d The ambient dimension.
+ * @param[in] offset The offset of the moment curve.
+ */
+ Function_moment_curve_in_Rd(double r, std::size_t d, Eigen::VectorXd& offset)
+ : m_(1), k_(d - 1), d_(d), r_(r), off_(offset) {}
+
+ private:
+ std::size_t m_, k_, d_;
+ double r_;
+ Eigen::VectorXd off_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_torus_in_R3.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_torus_in_R3.h
new file mode 100644
index 00000000..b54d3c74
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_torus_in_R3.h
@@ -0,0 +1,71 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_TORUS_IN_R3_H_
+#define FUNCTIONS_FUNCTION_TORUS_IN_R3_H_
+
+#include <cstdlib> // for std::size_t
+#include <cmath> // for std::sqrt
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_torus_in_R3
+ * \brief A class that encodes the function, the zero-set of which is a torus
+ * surface embedded in R^3.
+ */
+struct Function_torus_in_R3 {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ double x = p(0) - off_[0], y = p(1) - off_[1], z = p(2) - off_[2];
+ Eigen::VectorXd result(cod_d());
+ result(0) = (z * z + (std::sqrt(x * x + y * y) - r_) * (std::sqrt(x * x + y * y) - r_) - R_ * R_);
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return 3; };
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return 1; };
+
+ /** \brief Returns a point on the surface. */
+ Eigen::VectorXd seed() const {
+ Eigen::Vector3d result(R_ + r_ + off_[0], off_[1], off_[2]);
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the function that defines a torus embedded in R^3.
+ *
+ * @param[in] R The outer radius of the torus.
+ * @param[in] r The inner radius of the torus.
+ * @param[in] off Offset vector.
+ */
+ Function_torus_in_R3(double R = 1, double r = 0.5, Eigen::Vector3d off = Eigen::Vector3d::Zero())
+ : R_(R), r_(r), off_(off) {}
+
+ private:
+ double R_, r_;
+ Eigen::Vector3d off_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_whitney_umbrella_in_R3.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_whitney_umbrella_in_R3.h
new file mode 100644
index 00000000..df1f1eec
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_whitney_umbrella_in_R3.h
@@ -0,0 +1,78 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_FUNCTION_WHITNEY_UMBRELLA_IN_R3_H_
+#define FUNCTIONS_FUNCTION_WHITNEY_UMBRELLA_IN_R3_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Function_whitney_umbrella_in_R3
+ * \brief A class that encodes the function, the zero-set of which is the Whitney umbrella
+ * surface embedded in R^3.
+ */
+struct Function_whitney_umbrella_in_R3 {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ double x = p(0) - off_[0], y = p(1) - off_[1], z = p(2) - off_[2];
+ Eigen::VectorXd result(cod_d());
+ result(0) = x * x - y * y * z;
+ return result;
+ }
+
+ /** \brief Returns the (ambient) domain dimension.*/
+ std::size_t amb_d() const { return 3; };
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return 1; };
+
+ /** \brief Returns a point on the surface. This seed point is only one of
+ * two necessary seed points for the manifold tracing algorithm.
+ * See the method seed2() for the other point.
+ */
+ Eigen::VectorXd seed() const {
+ Eigen::Vector3d result(1 + off_[0], 1 + off_[1], 1 + off_[2]);
+ return result;
+ }
+
+ /** \brief Returns a point on the surface. This seed point is only one of
+ * two necessary seed points for the manifold tracing algorithm.
+ * See the method seed() for the other point.
+ */
+ Eigen::VectorXd seed2() const {
+ Eigen::Vector3d result(-1 + off_[0], -1 + off_[1], 1 + off_[2]);
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the function that defines the Whitney umbrella in R^3.
+ *
+ * @param[in] off Offset vector.
+ */
+ Function_whitney_umbrella_in_R3(Eigen::Vector3d off = Eigen::Vector3d::Zero()) : off_(off) {}
+
+ private:
+ Eigen::Vector3d off_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Linear_transformation.h b/src/Coxeter_triangulation/include/gudhi/Functions/Linear_transformation.h
new file mode 100644
index 00000000..82e25bb9
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Linear_transformation.h
@@ -0,0 +1,88 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_LINEAR_TRANSFORMATION_H_
+#define FUNCTIONS_LINEAR_TRANSFORMATION_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \class Linear_transformation
+ * \brief Transforms the zero-set of the function by a given linear transformation.
+ * The underlying function corresponds to f(M*x), where M is the transformation matrix.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ */
+template <class Function_>
+struct Linear_transformation {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd result = fun_(matrix_.householderQr().solve(p));
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return fun_.amb_d(); }
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return fun_.cod_d(); }
+
+ /** \brief Returns a point on the zero-set. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = fun_.seed();
+ result = matrix_ * result;
+ return result;
+ }
+
+ /**
+ * \brief Constructor of a linearly transformed function.
+ *
+ * @param[in] function The function to be linearly transformed.
+ * @param[in] matrix The transformation matrix. Its dimension should be d*d,
+ * where d is the domain (ambient) dimension of 'function'.
+ */
+ Linear_transformation(const Function_& function, const Eigen::MatrixXd& matrix) : fun_(function), matrix_(matrix) {}
+
+ private:
+ Function_ fun_;
+ Eigen::MatrixXd matrix_;
+};
+
+/**
+ * \brief Static constructor of a linearly transformed function.
+ *
+ * @param[in] function The function to be linearly transformed.
+ * @param[in] matrix The transformation matrix. Its dimension should be d*d,
+ * where d is the domain (ambient) dimension of 'function'.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_>
+Linear_transformation<Function_> make_linear_transformation(const Function_& function, const Eigen::MatrixXd& matrix) {
+ return Linear_transformation<Function_>(function, matrix);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Negation.h b/src/Coxeter_triangulation/include/gudhi/Functions/Negation.h
new file mode 100644
index 00000000..fdf07f27
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Negation.h
@@ -0,0 +1,84 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_NEGATION_H_
+#define FUNCTIONS_NEGATION_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ *\class Negation
+ * \brief Constructs the "minus" function. The zero-set is the same, but
+ * the values at other points are the negative of their original value.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ */
+template <class Function_>
+struct Negation {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd result = -fun_(p);
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return fun_.amb_d(); }
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return fun_.cod_d(); }
+
+ /** \brief Returns a point on the zero-set. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = fun_.seed();
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the negative function.
+ *
+ * @param[in] function The function to be negated.
+ */
+ Negation(const Function_& function) : fun_(function) {}
+
+ private:
+ Function_ fun_;
+};
+
+/**
+ * \brief Static constructor of the negative function.
+ *
+ * @param[in] function The function to be translated.
+ * domain (ambient) dimension of 'function'.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_>
+Negation<Function_> negation(const Function_& function) {
+ return Negation<Function_>(function);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/PL_approximation.h b/src/Coxeter_triangulation/include/gudhi/Functions/PL_approximation.h
new file mode 100644
index 00000000..22071d6d
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/PL_approximation.h
@@ -0,0 +1,111 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_PL_APPROXIMATION_H_
+#define FUNCTIONS_PL_APPROXIMATION_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class PL_approximation
+ * \brief Constructs a piecewise-linear approximation of a function induced by
+ * an ambient triangulation.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ * \tparam Triangulation The triangulation template parameter. Should be a model of
+ * the concept TriangulationForManifoldTracing.
+ */
+template <class Function_, class Triangulation_>
+struct PL_approximation {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ std::size_t cod_d = this->cod_d();
+ std::size_t amb_d = this->amb_d();
+ auto s = tr_.locate_point(p);
+ Eigen::MatrixXd matrix(cod_d, s.dimension() + 1);
+ Eigen::MatrixXd vertex_matrix(amb_d + 1, s.dimension() + 1);
+ for (std::size_t i = 0; i < s.dimension() + 1; ++i) vertex_matrix(0, i) = 1;
+ std::size_t j = 0;
+ for (auto v : s.vertex_range()) {
+ Eigen::VectorXd pt_v = tr_.cartesian_coordinates(v);
+ Eigen::VectorXd fun_v = fun_(pt_v);
+ for (std::size_t i = 1; i < amb_d + 1; ++i) vertex_matrix(i, j) = pt_v(i - 1);
+ for (std::size_t i = 0; i < cod_d; ++i) matrix(i, j) = fun_v(i);
+ j++;
+ }
+ assert(j == s.dimension() + 1);
+ Eigen::VectorXd z(amb_d + 1);
+ z(0) = 1;
+ for (std::size_t i = 1; i < amb_d + 1; ++i) z(i) = p(i - 1);
+ Eigen::VectorXd lambda = vertex_matrix.colPivHouseholderQr().solve(z);
+ Eigen::VectorXd result = matrix * lambda;
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return fun_.amb_d(); }
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return fun_.cod_d(); }
+
+ /** \brief Returns a point on the zero-set. */
+ Eigen::VectorXd seed() const {
+ // TODO: not finished. Should use an oracle.
+ return Eigen::VectorXd(amb_d());
+ }
+
+ /**
+ * \brief Constructor of the piecewise-linear approximation of a function
+ * induced by an ambient triangulation.
+ *
+ * @param[in] function The function.
+ * @param[in] triangulation The ambient triangulation.
+ */
+ PL_approximation(const Function_& function, const Triangulation_& triangulation)
+ : fun_(function), tr_(triangulation) {}
+
+ private:
+ Function_ fun_;
+ Triangulation_ tr_;
+};
+
+/**
+ * \brief Static constructor of the piecewise-linear approximation of a function
+ * induced by an ambient triangulation.
+ *
+ * @param[in] function The function.
+ * @param[in] triangulation The ambient triangulation.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_, class Triangulation_>
+PL_approximation<Function_, Triangulation_> make_pl_approximation(const Function_& function,
+ const Triangulation_& triangulation) {
+ return PL_approximation<Function_, Triangulation_>(function, triangulation);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Translate.h b/src/Coxeter_triangulation/include/gudhi/Functions/Translate.h
new file mode 100644
index 00000000..cbe65abe
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Translate.h
@@ -0,0 +1,89 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_TRANSLATE_H_
+#define FUNCTIONS_TRANSLATE_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Translate
+ * \brief Translates the zero-set of the function by a vector.
+ * The underlying function corresponds to f(x-off), where off is the offset vector.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ */
+template <class Function_>
+struct Translate {
+ /**
+ * \brief Value of the function at a specified point.
+ * @param[in] p The input point. The dimension needs to coincide with the ambient dimension.
+ */
+ Eigen::VectorXd operator()(const Eigen::VectorXd& p) const {
+ Eigen::VectorXd result = fun_(p - off_);
+ return result;
+ }
+
+ /** \brief Returns the domain (ambient) dimension. */
+ std::size_t amb_d() const { return fun_.amb_d(); }
+
+ /** \brief Returns the codomain dimension. */
+ std::size_t cod_d() const { return fun_.cod_d(); }
+
+ /** \brief Returns a point on the zero-set. */
+ Eigen::VectorXd seed() const {
+ Eigen::VectorXd result = fun_.seed();
+ result += off_;
+ return result;
+ }
+
+ /**
+ * \brief Constructor of the translated function.
+ *
+ * @param[in] function The function to be translated.
+ * @param[in] off The offset vector. The dimension should correspond to the
+ * domain (ambient) dimension of 'function'.
+ */
+ Translate(const Function_& function, const Eigen::VectorXd& off) : fun_(function), off_(off) {}
+
+ private:
+ Function_ fun_;
+ Eigen::VectorXd off_;
+};
+
+/**
+ * \brief Static constructor of a translated function.
+ *
+ * @param[in] function The function to be translated.
+ * @param[in] off The offset vector. The dimension should correspond to the
+ * domain (ambient) dimension of 'function'.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_>
+Translate<Function_> translate(const Function_& function, Eigen::VectorXd off) {
+ return Translate<Function_>(function, off);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/random_orthogonal_matrix.h b/src/Coxeter_triangulation/include/gudhi/Functions/random_orthogonal_matrix.h
new file mode 100644
index 00000000..6a896e94
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/random_orthogonal_matrix.h
@@ -0,0 +1,72 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef FUNCTIONS_RANDOM_ORTHOGONAL_MATRIX_H_
+#define FUNCTIONS_RANDOM_ORTHOGONAL_MATRIX_H_
+
+#include <cstdlib> // for std::size_t
+#include <cmath> // for std::cos, std::sin
+#include <random> // for std::uniform_real_distribution, std::random_device
+
+#include <Eigen/Dense>
+#include <Eigen/Sparse>
+#include <Eigen/SVD>
+
+#include <CGAL/Epick_d.h>
+#include <CGAL/point_generators_d.h>
+
+#include <boost/math/constants/constants.hpp> // for PI value
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \brief Generates a uniform random orthogonal matrix using the "subgroup algorithm" by
+ * Diaconis & Shashahani.
+ * \details Taken from https://en.wikipedia.org/wiki/Rotation_matrix#Uniform_random_rotation_matrices.
+ * The idea: take a random rotation matrix of dimension d-1, embed it
+ * as a d*d matrix M with the last column (0,...,0,1).
+ * Pick a random vector v on a sphere S^d. rotate the matrix M so that its last column is v.
+ * The determinant of the matrix can be either 1 or -1
+ */
+// Note: the householderQR operation at the end seems to take a lot of time at compilation.
+// The CGAL headers are another source of long compilation time.
+Eigen::MatrixXd random_orthogonal_matrix(std::size_t d) {
+ typedef CGAL::Epick_d<CGAL::Dynamic_dimension_tag> Kernel;
+ typedef typename Kernel::Point_d Point_d;
+ if (d == 1) return Eigen::VectorXd::Constant(1, 1.0);
+ if (d == 2) {
+ // 0. < alpha < 2 Pi
+ std::uniform_real_distribution<double> unif(0., 2 * boost::math::constants::pi<double>());
+ std::random_device rand_dev;
+ std::mt19937 rand_engine(rand_dev());
+ double alpha = unif(rand_engine);
+
+ Eigen::Matrix2d rot;
+ rot << std::cos(alpha), -std::sin(alpha), std::sin(alpha), cos(alpha);
+ return rot;
+ }
+ Eigen::MatrixXd low_dim_rot = random_orthogonal_matrix(d - 1);
+ Eigen::MatrixXd rot(d, d);
+ Point_d v = *CGAL::Random_points_on_sphere_d<Point_d>(d, 1);
+ for (std::size_t i = 0; i < d; ++i) rot(i, 0) = v[i];
+ for (std::size_t i = 0; i < d - 1; ++i)
+ for (std::size_t j = 1; j < d - 1; ++j) rot(i, j) = low_dim_rot(i, j - 1);
+ for (std::size_t j = 1; j < d; ++j) rot(d - 1, j) = 0;
+ rot = rot.householderQr()
+ .householderQ(); // a way to do Gram-Schmidt, see https://forum.kde.org/viewtopic.php?f=74&t=118568#p297246
+ return rot;
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/IO/Mesh_medit.h b/src/Coxeter_triangulation/include/gudhi/IO/Mesh_medit.h
new file mode 100644
index 00000000..ca08f629
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/IO/Mesh_medit.h
@@ -0,0 +1,60 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef IO_MESH_MEDIT_H_
+#define IO_MESH_MEDIT_H_
+
+#include <Eigen/Dense>
+
+#include <vector>
+#include <utility> // for std::pair
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \class Mesh_medit
+ * \brief Structure to store a mesh that can be output in Medit .mesh file format
+ * using the output_meshes_to_medit method.
+ *
+ * \ingroup coxeter_triangulation
+ */
+struct Mesh_medit {
+ /** \brief Type of a range of vertices. */
+ typedef std::vector<Eigen::VectorXd> Vertex_points;
+ /** \brief Type of a mesh element.
+ * A pair consisting of a vector of vertex indices of type std::size_t
+ * and of an integer that represents the common reference number for
+ * the mesh elements of this type. */
+ typedef std::pair<std::vector<std::size_t>, std::size_t> Mesh_element;
+ /** \brief Type of a range of mesh elements. */
+ typedef std::vector<Mesh_element> Mesh_elements;
+ /** \brief Type of a range of scalar field . */
+ typedef std::vector<double> Scalar_field_range;
+
+ /** \brief Range of vertices of type Eigen::VectorXd to output. */
+ Vertex_points vertex_points;
+ /** \brief Range of edges. */
+ Mesh_elements edges;
+ /** \brief Range of triangles. */
+ Mesh_elements triangles;
+ /** \brief Range of tetrahedra. */
+ Mesh_elements tetrahedra;
+ /** \brief Range of scalar values over triangles. */
+ Scalar_field_range triangles_scalar_range;
+ /** \brief Range of scalar values over tetrahedra. */
+ Scalar_field_range tetrahedra_scalar_range;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/IO/build_mesh_from_cell_complex.h b/src/Coxeter_triangulation/include/gudhi/IO/build_mesh_from_cell_complex.h
new file mode 100644
index 00000000..9750f366
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/IO/build_mesh_from_cell_complex.h
@@ -0,0 +1,171 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef IO_BUILD_MESH_FROM_CELL_COMPLEX_H_
+#define IO_BUILD_MESH_FROM_CELL_COMPLEX_H_
+
+#include <gudhi/IO/output_debug_traces_to_html.h> // for DEBUG_TRACES
+#include <gudhi/IO/Mesh_medit.h>
+
+#include <Eigen/Dense>
+
+#include <cstdlib> // for std::size_t
+#include <map>
+#include <set>
+#include <string>
+#include <utility> // for std::make_pair
+#include <algorithm> // for std::min
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+struct Configuration {
+ Configuration(bool t_edges, bool t_triangles, bool t_tetrahedra, std::size_t r_edges, std::size_t r_triangles,
+ std::size_t r_tetrahedra)
+ : toggle_edges(t_edges),
+ toggle_triangles(t_triangles),
+ toggle_tetrahedra(t_tetrahedra),
+ ref_edges(r_edges),
+ ref_triangles(r_triangles),
+ ref_tetrahedra(r_tetrahedra) {}
+
+ Configuration() {}
+
+ bool toggle_edges = true, toggle_triangles = true, toggle_tetrahedra = true;
+ std::size_t ref_edges = 1, ref_triangles = 1, ref_tetrahedra = 1;
+};
+
+template <class Hasse_cell, class Simplex_cell_map>
+void populate_mesh(Mesh_medit& output, Simplex_cell_map& sc_map, Configuration configuration, std::size_t amb_d,
+ std::map<Hasse_cell*, std::size_t> vi_map) {
+ using Mesh_element_vertices = Mesh_medit::Mesh_elements::value_type::first_type;
+ std::map<Hasse_cell*, std::size_t> ci_map;
+ std::size_t index = vi_map.size() + 1; // current size of output.vertex_points
+ if (sc_map.size() >= 3)
+ for (const auto& sc_pair : sc_map[2]) {
+ Eigen::VectorXd barycenter = Eigen::VectorXd::Zero(amb_d);
+ std::set<std::size_t> vertex_indices;
+ Hasse_cell* cell = sc_pair.second;
+ for (const auto& ei_pair : cell->get_boundary())
+ for (const auto& vi_pair : ei_pair.first->get_boundary()) vertex_indices.emplace(vi_map[vi_pair.first]);
+ for (const std::size_t& v : vertex_indices) barycenter += output.vertex_points[v - 1];
+ ci_map.emplace(cell, index++);
+ output.vertex_points.emplace_back((1. / vertex_indices.size()) * barycenter);
+#ifdef DEBUG_TRACES
+ std::string vlist = " (" + std::to_string(index - 1) + ")";
+ for (const std::size_t& v : vertex_indices) vlist += " " + std::to_string(v);
+ cell_vlist_map.emplace(to_string(cell), vlist);
+#endif
+ }
+
+ if (configuration.toggle_edges && sc_map.size() >= 2)
+ for (const auto& sc_pair : sc_map[1]) {
+ Hasse_cell* edge_cell = sc_pair.second;
+ Mesh_element_vertices edge;
+ for (const auto& vi_pair : edge_cell->get_boundary()) edge.push_back(vi_map[vi_pair.first]);
+ output.edges.emplace_back(edge, configuration.ref_edges);
+#ifdef DEBUG_TRACES
+ std::string vlist;
+ for (const std::size_t& v : edge) vlist += " " + std::to_string(v);
+ cell_vlist_map.emplace(to_string(edge_cell), vlist);
+#endif
+ }
+
+ if (configuration.toggle_triangles && sc_map.size() >= 3)
+ for (const auto& sc_pair : sc_map[2]) {
+ for (const auto& ei_pair : sc_pair.second->get_boundary()) {
+ Mesh_element_vertices triangle(1, ci_map[sc_pair.second]);
+ for (const auto& vi_pair : ei_pair.first->get_boundary()) triangle.push_back(vi_map[vi_pair.first]);
+ output.triangles.emplace_back(triangle, configuration.ref_triangles);
+ }
+ }
+
+ if (configuration.toggle_tetrahedra && sc_map.size() >= 4)
+ for (const auto& sc_pair : sc_map[3]) {
+ Eigen::VectorXd barycenter = Eigen::VectorXd::Zero(amb_d);
+ std::set<std::size_t> vertex_indices;
+ Hasse_cell* cell = sc_pair.second;
+ for (const auto& ci_pair : cell->get_boundary())
+ for (const auto& ei_pair : ci_pair.first->get_boundary())
+ for (const auto& vi_pair : ei_pair.first->get_boundary()) vertex_indices.emplace(vi_map[vi_pair.first]);
+ for (const std::size_t& v : vertex_indices) barycenter += output.vertex_points[v - 1];
+ output.vertex_points.emplace_back((1. / vertex_indices.size()) * barycenter);
+#ifdef DEBUG_TRACES
+ std::string vlist = " (" + std::to_string(index) + ")";
+ for (const std::size_t& v : vertex_indices) vlist += " " + std::to_string(v);
+ cell_vlist_map.emplace(to_string(cell), vlist);
+#endif
+
+ for (const auto& ci_pair : cell->get_boundary())
+ for (const auto& ei_pair : ci_pair.first->get_boundary()) {
+ Mesh_element_vertices tetrahedron = {index, ci_map[sc_pair.second]};
+ for (const auto& vi_pair : ei_pair.first->get_boundary()) tetrahedron.push_back(vi_map[vi_pair.first]);
+ output.tetrahedra.emplace_back(tetrahedron, configuration.ref_tetrahedra);
+ }
+ index++;
+ }
+}
+
+/** @brief Builds a Gudhi::coxeter_triangulation::Mesh_medit from a Gudhi::coxeter_triangulation::Cell_complex
+ *
+ * @ingroup coxeter_triangulation
+ */
+template <class Cell_complex>
+Mesh_medit build_mesh_from_cell_complex(const Cell_complex& cell_complex,
+ Configuration i_configuration = Configuration(),
+ Configuration b_configuration = Configuration()) {
+ using Hasse_cell = typename Cell_complex::Hasse_cell;
+ Mesh_medit output;
+ std::map<Hasse_cell*, std::size_t> vi_map; // one for vertices, other for 2d-cells
+ std::size_t index = 1; // current size of output.vertex_points
+
+ if (cell_complex.cell_point_map().empty()) return output;
+ std::size_t amb_d = std::min((int)cell_complex.cell_point_map().begin()->second.size(), 3);
+
+ for (const auto& cp_pair : cell_complex.cell_point_map()) {
+#ifdef DEBUG_TRACES
+ std::string vlist;
+ vlist += " " + std::to_string(index);
+ cell_vlist_map.emplace(to_string(cp_pair.first), vlist);
+#endif
+ vi_map.emplace(cp_pair.first, index++);
+ output.vertex_points.push_back(cp_pair.second);
+ output.vertex_points.back().conservativeResize(amb_d);
+ }
+
+ populate_mesh(output, cell_complex.interior_simplex_cell_maps(), i_configuration, amb_d, vi_map);
+#ifdef DEBUG_TRACES
+ for (const auto& sc_map : cell_complex.interior_simplex_cell_maps())
+ for (const auto& sc_pair : sc_map) {
+ std::string simplex = "I" + to_string(sc_pair.first);
+ std::string cell = to_string(sc_pair.second);
+ std::string vlist = cell_vlist_map.at(cell).substr(1);
+ simplex_vlist_map.emplace(simplex, vlist);
+ }
+#endif
+ populate_mesh(output, cell_complex.boundary_simplex_cell_maps(), b_configuration, amb_d, vi_map);
+#ifdef DEBUG_TRACES
+ for (const auto& sc_map : cell_complex.boundary_simplex_cell_maps())
+ for (const auto& sc_pair : sc_map) {
+ std::string simplex = "B" + to_string(sc_pair.first);
+ std::string cell = to_string(sc_pair.second);
+ std::string vlist = cell_vlist_map.at(cell).substr(1);
+ simplex_vlist_map.emplace(simplex, vlist);
+ }
+#endif
+ return output;
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/IO/output_debug_traces_to_html.h b/src/Coxeter_triangulation/include/gudhi/IO/output_debug_traces_to_html.h
new file mode 100644
index 00000000..a2995738
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/IO/output_debug_traces_to_html.h
@@ -0,0 +1,550 @@
+#ifndef IO_OUTPUT_DEBUG_TRACES_TO_HTML_H_
+#define IO_OUTPUT_DEBUG_TRACES_TO_HTML_H_
+
+#ifdef DEBUG_TRACES // All this part of code can be skipped if DEBUG_TRACES are not ON - cmake -DDEBUG_TRACES=ON .
+
+#include <sstream>
+#include <fstream>
+#include <vector>
+#include <list>
+#include <string>
+#include <regex>
+
+#include <Eigen/Dense>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+template <class T>
+std::ostream& operator<<(std::ostream& os, const std::vector<T>& vector) {
+ os << "(";
+ if (vector.empty()) {
+ os << ")";
+ return os;
+ }
+ auto v_it = vector.begin();
+ os << *v_it++;
+ for (; v_it != vector.end(); ++v_it) os << ", " << *v_it;
+ os << ")";
+ return os;
+}
+
+/* A class to make the vector horizontal instead of vertical */
+struct Straighten {
+ Straighten(const Eigen::VectorXd& vector) : vector_(vector) {}
+ const Eigen::VectorXd& vector_;
+};
+
+std::ostream& operator<<(std::ostream& os, const Straighten& str) {
+ std::size_t size = str.vector_.size();
+ os << "(" << str.vector_(0);
+ if (size == 0) {
+ os << ")";
+ return os;
+ }
+ for (std::size_t i = 1; i < size; ++i) os << ", " << str.vector_(i);
+ os << ")";
+ return os;
+}
+
+std::string id_from_simplex(const std::string& simplex) {
+ std::regex r("\\s+"), r2("\\(|\\)|\\{|\\}"), r3(","), r4("\\["), r5("\\]");
+ std::string output = std::regex_replace(simplex, r, "");
+ output = std::regex_replace(output, r2, ":");
+ output = std::regex_replace(output, r3, ".");
+ output = std::regex_replace(output, r4, "_");
+ output = std::regex_replace(output, r5, "");
+ return output;
+}
+
+template <typename T>
+std::string to_string(const T& t) {
+ std::ostringstream oss;
+ oss << t;
+ return oss.str();
+}
+
+struct MT_inserted_info {
+ std::string qr_face_, init_face_, qr_intersection_;
+ bool qr_success_, is_boundary_;
+ template <class Query_result, class Simplex_handle>
+ MT_inserted_info(const Query_result& qr, const Simplex_handle& face, bool is_boundary)
+ : qr_face_(to_string(face)),
+ init_face_(to_string(face)),
+ qr_intersection_(to_string(qr.intersection)),
+ qr_success_(qr.success),
+ is_boundary_(is_boundary) {}
+};
+std::list<MT_inserted_info> mt_seed_inserted_list, mt_inserted_list;
+
+struct CC_summary_info {
+ std::string face_, cell_;
+ template <class SC_pair>
+ CC_summary_info(const SC_pair& sc_pair) : face_(to_string(sc_pair.first)), cell_(to_string(sc_pair.second)) {}
+};
+using CC_summary_list = std::list<CC_summary_info>;
+std::vector<CC_summary_list> cc_interior_summary_lists, cc_boundary_summary_lists;
+
+struct CC_detail_info {
+ enum class Result_type { self, face, coface, inserted, join_single, join_is_face };
+ std::string simplex_, trigger_, init_simplex_;
+ Result_type status_;
+ bool join_trigger_ = false;
+ std::list<std::string> faces_, post_faces_, cofaces_;
+ template <class Simplex_handle>
+ CC_detail_info(const Simplex_handle& simplex) : simplex_(to_string(simplex)) {}
+};
+using CC_detail_list = std::list<CC_detail_info>;
+std::vector<CC_detail_list> cc_interior_detail_lists, cc_boundary_detail_lists;
+std::vector<CC_detail_list> cc_interior_insert_detail_lists, cc_boundary_insert_detail_lists;
+
+struct CC_prejoin_info {
+ enum class Result_type { join_single, join_is_face, join_different, join_same };
+ std::string simplex_, join_;
+ std::vector<std::string> faces_;
+ std::size_t dimension_;
+ Result_type status_;
+ template <class Simplex_handle>
+ CC_prejoin_info(const Simplex_handle& simplex) : simplex_(to_string(simplex)), dimension_(simplex.dimension()) {}
+};
+using CC_prejoin_list = std::list<CC_prejoin_info>;
+std::vector<CC_prejoin_list> cc_interior_prejoin_lists, cc_boundary_prejoin_lists;
+
+struct CC_join_info {
+ enum class Result_type { self, face, coface, inserted, join_single, join_is_face };
+ std::string simplex_, join_, trigger_;
+ Result_type status_;
+ std::list<std::string> boundary_faces_;
+ std::list<std::string> faces_, post_faces_, cofaces_;
+ template <class Simplex_handle>
+ CC_join_info(const Simplex_handle& simplex) : simplex_(to_string(simplex)) {}
+};
+bool join_switch = false;
+std::vector<CC_detail_list> cc_interior_join_detail_lists, cc_boundary_join_detail_lists;
+
+std::map<std::string, std::string> cell_vlist_map;
+std::map<std::string, std::string> simplex_vlist_map;
+
+std::ostringstream mt_ostream, vis_ostream;
+std::vector<std::ostringstream> cc_summary_ostream, cc_traces_ostream;
+
+std::string simplex_format(const std::string& simplex, bool is_boundary) {
+ std::string b_simplex = (is_boundary ? "B" : "I") + simplex;
+ std::string tooltiptext;
+ auto it = simplex_vlist_map.find(b_simplex);
+ if (it == simplex_vlist_map.end())
+ tooltiptext = "deleted";
+ else
+ tooltiptext = simplex_vlist_map.at(b_simplex);
+ return (std::string) "<a class=\"" + (is_boundary ? "boundary" : "interior") + "\" href=\"#" +
+ id_from_simplex(b_simplex) + "\">" + b_simplex + "<span class=\"tooltiptext\">" + tooltiptext + "</span></a>";
+}
+
+std::string simplex_format(const std::string& b_simplex) {
+ bool is_boundary = b_simplex[0] == 'B';
+ std::string tooltiptext;
+ auto it = simplex_vlist_map.find(b_simplex);
+ if (it == simplex_vlist_map.end())
+ tooltiptext = "deleted";
+ else
+ tooltiptext = simplex_vlist_map.at(b_simplex);
+ return (std::string) "<a class=\"" + (is_boundary ? "boundary" : "interior") + "\" href=\"#" +
+ id_from_simplex(b_simplex) + "\">" + b_simplex + "<span class=\"tooltiptext\">" + tooltiptext + "</span></a>";
+}
+
+void write_head(std::ofstream& ofs) {
+ ofs << " <head>\n"
+ << " <title>Cell complex debug trace</title>\n"
+ << " <style>\n"
+ << " a.boundary {\n"
+ << " position: relative;\n"
+ << " display: inline-block;\n"
+ << " color: darkred;\n"
+ << " background-color: lightgreen\n"
+ << " }\n"
+ << " a.interior {\n"
+ << " position: relative;\n"
+ << " display: inline-block;\n"
+ << " color: navy;\n"
+ << " background-color: yellow\n"
+ << " }\n"
+ << " .tooltiptext {\n"
+ << " visibility: hidden;\n"
+ << " width: 120px;\n"
+ << " background-color: #555;\n"
+ << " color: #fff;\n"
+ << " text-align: center;\n"
+ << " padding: 5px 0;\n"
+ << " border-radius: 6px;\n"
+ << " position: absolute;\n"
+ << " z-index: 1;\n"
+ << " bottom: 125%;\n"
+ << " left: 50%;\n"
+ << " margin-left: -60px;\n"
+ << " opacity: 0;\n"
+ << " transition: opacity 0.3s;\n"
+ << " }\n"
+ << " .boundary .tooltiptext::after {\n"
+ << " content: \"\";\n"
+ << " position: absolute;\n"
+ << " top: 100%;\n"
+ << " left: 50%;\n"
+ << " margin-left: -5px;\n"
+ << " border-width: 5px;\n"
+ << " border-style: solid;\n"
+ << " border-color: #555 transparent transparent transparent;\n"
+ << " }\n"
+ << " .interior .tooltiptext::after {\n"
+ << " content: \"\";\n"
+ << " position: absolute;\n"
+ << " top: 100%;\n"
+ << " left: 50%;\n"
+ << " margin-left: -5px;\n"
+ << " border-width: 5px;\n"
+ << " border-style: solid;\n"
+ << " border-color: #555 transparent transparent transparent;\n"
+ << " }\n"
+ << " .boundary:hover .tooltiptext {\n"
+ << " visibility: visible;\n"
+ << " opacity: 1;\n"
+ << " }\n"
+ << " .interior:hover .tooltiptext {\n"
+ << " visibility: visible;\n"
+ << " opacity: 1;\n"
+ << " }\n"
+ << " ul.nav {\n"
+ << " list-style-type: none;\n"
+ << " margin: 0;\n"
+ << " padding: 0;\n"
+ << " overflow: auto;\n"
+ << " background-color: #333;\n"
+ << " position: fixed;\n"
+ << " height: 100%;\n"
+ << " width: 15%;\n"
+ << " }\n"
+ << " ul.nav li a {\n"
+ << " display: block;\n"
+ << " color: white;\n"
+ << " text-align: left;\n"
+ << " padding: 14px 16px;\n"
+ << " text-decoration: none;\n"
+ << " }\n"
+ << " .active {\n"
+ << " background-color: #4CAF50;\n"
+ << " }\n"
+ << " div {\n"
+ << " margin-left: 15%;\n"
+ << " padding: 1px 16px\n"
+ << " }\n"
+ << " div.navi {\n"
+ << " margin-left: 0%;\n"
+ << " padding: 0px 0px\n"
+ << " }\n"
+ << " h1 {\n"
+ << " margin-left: 15%;\n"
+ << " padding: 1px 16px\n"
+ << " }\n"
+ << " </style>\n"
+ << " </head>\n";
+}
+
+void write_nav(std::ofstream& ofs) {
+ ofs << " <div class=\"navi\" style=\"margin-top:30px;background-color:#1abc9c;\">\n"
+ << " <ul class=\"nav\">\n"
+ << " <li><a href=\"#mant\">Manifold tracing</a></li>\n"
+ << " <li><a href=\"#cell\">Cell complex</a>\n"
+ << " <ul>\n";
+ for (std::size_t i = 0; i < cc_interior_summary_lists.size(); ++i) {
+ ofs << " <li><a href=\"#dim" << i << "\">Dimension " << i << "</a>\n";
+ ofs << " <ul>\n";
+ ofs << " <li><a href=\"#dim" << i << "i\">Interior</a></li>\n";
+ if (i < cc_boundary_summary_lists.size()) {
+ ofs << " <li><a href=\"#dim" << i << "b\">Boundary</a></li>\n";
+ }
+ ofs << " </ul>\n";
+ ofs << " </li>\n";
+ }
+ ofs << " </ul>\n"
+ << " </li>\n"
+ << " <li><a href=\"#visu\">Visualization details</a></li>\n"
+ << " </ul>\n"
+ << " </div>\n";
+}
+
+void write_mt(std::ofstream& ofs) {
+ ofs << " <div id=\"mant\">\n";
+ ofs << " <h2> Manifold debug trace </h2>\n";
+ ofs << " <h3> Simplices inserted during the seed phase </h3>\n";
+ ofs << " <ul>\n";
+ for (const MT_inserted_info& mt_info : mt_seed_inserted_list) {
+ if (mt_info.qr_success_) {
+ ofs << " <li>Inserted " << simplex_format(mt_info.qr_face_, mt_info.is_boundary_);
+ if (mt_info.qr_face_ != mt_info.init_face_)
+ ofs << " (initially " << simplex_format(mt_info.init_face_, mt_info.is_boundary_) << ")";
+ ofs << " intersection point is " << mt_info.qr_intersection_ << "</li>\n";
+ } else
+ ofs << " <li>Failed to insert " << mt_info.init_face_ << "</li>\n";
+ }
+ ofs << " </ul>\n";
+ ofs << " <h3> Simplices inserted during the while loop phase </h3>\n";
+ ofs << " <ul>\n";
+ for (const MT_inserted_info& mt_info : mt_inserted_list) {
+ if (mt_info.qr_success_) {
+ ofs << " <li>Inserted " << simplex_format(mt_info.qr_face_, mt_info.is_boundary_);
+ if (mt_info.qr_face_ != mt_info.init_face_)
+ ofs << " (initially " << simplex_format(mt_info.init_face_, mt_info.is_boundary_) << ")";
+ ofs << " intersection point is " << mt_info.qr_intersection_ << "</li>\n";
+ } else
+ ofs << " <li>Failed to insert " << mt_info.init_face_ << ")</li>\n";
+ }
+ ofs << " </ul>\n";
+ ofs << " </div>\n";
+}
+
+void write_cc(std::ofstream& ofs) {
+ ofs << " <div id=\"cell\">\n"
+ << " <h2> Cell complex debug trace </h2>\n"
+ << " <p>Go to:</p>\n"
+ << " <ul>\n";
+ for (std::size_t i = 0; i < cc_interior_summary_lists.size(); ++i) {
+ ofs << " <li><a href=\"#dim" << i << "\">Dimension " << i << "</a></li>\n";
+ }
+ ofs << " </ul>\n";
+ for (std::size_t i = 0; i < cc_interior_summary_lists.size(); ++i) {
+ ofs << " <h3 id=\"dim" << i << "\"> Dimension " << i << "</h3>\n";
+ ofs << " <h4 id=\"dim" << i << "i\"> Summary for interior simplices</h4>\n";
+ if (i < cc_boundary_summary_lists.size()) ofs << " <p><a href=\"#dim" << i << "b\">Go to boundary</a></p>\n";
+ ofs << " <ul>\n";
+ for (const CC_summary_info& cc_info : cc_interior_summary_lists[i])
+ ofs << " <li id = \"" << id_from_simplex("I" + cc_info.face_) << "\">"
+ << simplex_format(cc_info.face_, false) << " cell =" << cc_info.cell_ << "</li>\n";
+ ofs << " </ul>\n";
+ ofs << " <h4> Prejoin state of the interior cells of dimension " << i << "</h4>\n";
+ auto prejoin_it = cc_interior_prejoin_lists[i].begin();
+ while (prejoin_it != cc_interior_prejoin_lists[i].end()) {
+ std::size_t j = prejoin_it->dimension_;
+ ofs << " <h5>" << j << "-dimensional ambient simplices</h5>\n";
+ ofs << " <ul>\n";
+ for (; prejoin_it->dimension_ == j; ++prejoin_it) {
+ ofs << " <li>" << simplex_format(prejoin_it->simplex_, false)
+ << " join = " << simplex_format(prejoin_it->join_, false) << " boundary:\n"
+ << " <ul>\n";
+ for (const auto& face : prejoin_it->faces_) ofs << " <li>" << simplex_format(face) << "</li>";
+ ofs << " </ul>\n";
+ switch (prejoin_it->status_) {
+ case (CC_prejoin_info::Result_type::join_single):
+ ofs << " <p style=\"color: red\">Deleted " << simplex_format(prejoin_it->simplex_, false)
+ << " as it has a single face.</p>";
+ break;
+ case (CC_prejoin_info::Result_type::join_is_face):
+ ofs << " <p style=\"color: red\">Deleted " << simplex_format(prejoin_it->simplex_, false)
+ << " as its join " << simplex_format(prejoin_it->join_, false) << " is one of the faces.</p>";
+ break;
+ case (CC_prejoin_info::Result_type::join_different):
+ ofs << " <p style=\"color: magenta\">Deleted " << simplex_format(prejoin_it->simplex_, false)
+ << " and replaced by its join " << simplex_format(prejoin_it->join_, false) << ".</p>";
+ break;
+ case (CC_prejoin_info::Result_type::join_same):
+ ofs << " <p style=\"color: green\">Kept " << simplex_format(prejoin_it->simplex_, false)
+ << ".</p>";
+ }
+ ofs << " </li>";
+ }
+ ofs << " </ul>\n";
+ }
+ ofs << " <h4> Details for interior simplices</h4>\n";
+ ofs << " <ul>\n";
+ for (const CC_detail_info& cc_info : cc_interior_detail_lists[i]) {
+ if (cc_info.status_ == CC_detail_info::Result_type::join_single) {
+ ofs << " <li style=\"color:magenta\" id = \"" << id_from_simplex("I" + cc_info.simplex_)
+ << "\"> Simplex " << simplex_format(cc_info.simplex_, false) << " has only one face ("
+ << simplex_format(cc_info.trigger_, false) << ") and is deleted.";
+ continue;
+ }
+ if (cc_info.status_ == CC_detail_info::Result_type::join_single) {
+ ofs << " <li style=\"color:darkmagenta\" id = \"" << id_from_simplex("I" + cc_info.simplex_)
+ << "\"> The join of the simplex " << simplex_format(cc_info.simplex_, false) << " is one of its faces ("
+ << simplex_format(cc_info.trigger_, false) << "), hence it is is deleted.";
+ continue;
+ }
+ ofs << " <li> Insert_cell called for " << simplex_format(cc_info.simplex_, false) << "\n";
+ ofs << " <ul>\n";
+ // for (const std::string& cof: cc_info.faces_)
+ // ofs << " <li>Checking if " << simplex_format(cc_info.simplex_, false)
+ // << " is a face of " << simplex_format(cof, false) << "\n";
+ ofs << " </ul>\n";
+ ofs << " <ul>\n";
+ if (cc_info.status_ == CC_detail_info::Result_type::self) {
+ ofs << " <p><span style=\"color:blue\">The simplex " << simplex_format(cc_info.simplex_, false)
+ << " already exists in the cell complex!</span></p>\n";
+ }
+ if (cc_info.status_ == CC_detail_info::Result_type::face) {
+ ofs << " <p><span style=\"color:red\">The simplex " << simplex_format(cc_info.simplex_, false)
+ << " is a face of the simplex " << simplex_format(cc_info.trigger_, false) << "!</span><br>\n";
+ ofs << " <ul>\n";
+ for (const std::string post_face : cc_info.post_faces_)
+ ofs << " <li id = \"" << id_from_simplex("I" + post_face) << "\">"
+ << "Post deleting " << simplex_format(post_face, false) << "</li>\n";
+ ofs << " </ul>\n";
+ ofs << " </p>\n";
+ ofs << " <p id = \"" << id_from_simplex("I" + cc_info.trigger_) << "\">"
+ << "Deleting " << simplex_format(cc_info.trigger_, false) << "</p>\n";
+ }
+ // for (const std::string& fac: cc_info.cofaces_)
+ // ofs << " <li>Checking if " << simplex_format(cc_info.simplex_, false)
+ // << " is a coface of " << simplex_format(fac, false) << "\n";
+ if (cc_info.status_ == CC_detail_info::Result_type::coface) {
+ ofs << " <p><span style=\"color:darkorange\">The simplex " << simplex_format(cc_info.simplex_, false)
+ << " is a coface of the simplex " << simplex_format(cc_info.trigger_, false) << "!</span><p>\n";
+ }
+ if (cc_info.status_ == CC_detail_info::Result_type::inserted) {
+ ofs << " <p><span style=\"color:green\">Successfully inserted "
+ << simplex_format(cc_info.simplex_, false) << "!</span><p>\n";
+ }
+ ofs << " </ul>\n";
+ ofs << " </li>\n";
+ }
+ ofs << " </ul>\n";
+
+ if (i < cc_boundary_summary_lists.size()) {
+ ofs << " <h4 id=\"dim" << i << "b\"> Summary for boundary simplices</h4>\n";
+ ofs << " <p><a href=\"#dim" << i << "i\">Go to interior</a></p>\n";
+ ofs << " <ul>\n";
+ for (const CC_summary_info& cc_info : cc_boundary_summary_lists[i])
+ ofs << " <li id = \"" << id_from_simplex("B" + cc_info.face_) << "\">"
+ << simplex_format(cc_info.face_, true) << " cell =" << cc_info.cell_ << "</li>\n";
+ ofs << " </ul>\n";
+ ofs << " <h4> Prejoin state of the boundary cells of dimension " << i << "</h4>\n";
+ auto prejoin_it = cc_boundary_prejoin_lists[i].begin();
+ while (prejoin_it != cc_boundary_prejoin_lists[i].end()) {
+ std::size_t j = prejoin_it->dimension_;
+ ofs << " <h5>" << j << "-dimensional ambient simplices</h5>\n";
+ ofs << " <ul>\n";
+ for (; prejoin_it->dimension_ == j; ++prejoin_it) {
+ ofs << " <li>" << simplex_format(prejoin_it->simplex_, true)
+ << " join = " << simplex_format(prejoin_it->join_, true) << " boundary:\n"
+ << " <ul>\n";
+ for (const auto& face : prejoin_it->faces_) ofs << " <li>" << simplex_format(face) << "</li>";
+ ofs << " </ul>\n";
+ switch (prejoin_it->status_) {
+ case (CC_prejoin_info::Result_type::join_single):
+ ofs << " <p style=\"color: red\">Deleted " << simplex_format(prejoin_it->simplex_, true)
+ << " as it has a single face.</p>";
+ break;
+ case (CC_prejoin_info::Result_type::join_is_face):
+ ofs << " <p style=\"color: red\">Deleted " << simplex_format(prejoin_it->simplex_, true)
+ << " as its join " << simplex_format(prejoin_it->join_, true) << " is one of the faces.</p>";
+ break;
+ case (CC_prejoin_info::Result_type::join_different):
+ ofs << " <p style=\"color: magenta\">Deleted " << simplex_format(prejoin_it->simplex_, true)
+ << " and replaced by its join " << simplex_format(prejoin_it->join_, true) << ".</p>";
+ break;
+ case (CC_prejoin_info::Result_type::join_same):
+ ofs << " <p style=\"color: green\">Kept " << simplex_format(prejoin_it->simplex_, true)
+ << ".</p>";
+ }
+ ofs << " </li>";
+ }
+ ofs << " </ul>\n";
+ }
+ }
+ if (i < cc_boundary_detail_lists.size()) {
+ ofs << " <h4> Details for boundary simplices</h4>\n"
+ << " <ul>\n";
+ for (const CC_detail_info& cc_info : cc_boundary_detail_lists[i]) {
+ if (cc_info.status_ == CC_detail_info::Result_type::join_single) {
+ ofs << " <li style=\"color:magenta\" id = \"" << id_from_simplex("B" + cc_info.simplex_)
+ << "\"> Simplex " << simplex_format(cc_info.simplex_, true) << " has only one face ("
+ << simplex_format(cc_info.trigger_, true) << ") and is deleted.";
+ continue;
+ }
+ if (cc_info.status_ == CC_detail_info::Result_type::join_single) {
+ ofs << " <li style=\"color:darkmagenta\" id = \"" << id_from_simplex("B" + cc_info.simplex_)
+ << "\"> The join of the simplex " << simplex_format(cc_info.simplex_, true) << " is one of its faces ("
+ << simplex_format(cc_info.trigger_, true) << "), hence it is is deleted.";
+ continue;
+ }
+ ofs << " <li> Insert_simplex called on " << simplex_format(cc_info.simplex_, true);
+ ofs << " <ul>\n";
+ // for (const std::string& cof: cc_info.faces_)
+ // ofs << " <li>Checking if " << simplex_format(cc_info.simplex_, true)
+ // << " is a face of " << simplex_format(cof, true) << "\n";
+ ofs << " </ul>\n";
+ ofs << " <ul>\n";
+ if (cc_info.status_ == CC_detail_info::Result_type::self) {
+ ofs << " <p><span style=\"color:blue\">The simplex " << simplex_format(cc_info.simplex_, true)
+ << " already exists in the cell complex!</span></p>\n";
+ }
+ if (cc_info.status_ == CC_detail_info::Result_type::face) {
+ ofs << " <p><span style=\"color:red\">The simplex " << simplex_format(cc_info.simplex_, true)
+ << " is a face of the simplex " << simplex_format(cc_info.trigger_, true) << "!</span><br>\n";
+ ofs << " <ul>\n";
+ for (const std::string post_face : cc_info.post_faces_)
+ ofs << " <li id=\"" << id_from_simplex("B" + post_face) << "\">Post deleting "
+ << simplex_format(post_face, true) << "</li>\n";
+ ofs << " </ul>\n";
+ ofs << " </p>\n";
+ ofs << " <p id=\"" << id_from_simplex(cc_info.trigger_) << "\">Deleting "
+ << simplex_format(cc_info.trigger_, true) << "</p>\n";
+ }
+ // for (const std::string& fac: cc_info.cofaces_)
+ // ofs << " <li>Checking if " << simplex_format(cc_info.simplex_, true)
+ // << " is a coface of " << simplex_format(fac, true) << "\n";
+ ofs << " </ul>\n";
+ ofs << " </li>\n";
+ if (cc_info.status_ == CC_detail_info::Result_type::coface) {
+ ofs << " <p><span style=\"color:darkorange\">The simplex "
+ << simplex_format(cc_info.simplex_, true) << " is a coface of the simplex "
+ << simplex_format(cc_info.trigger_, true) << "!</span><p>\n";
+ }
+ if (cc_info.status_ == CC_detail_info::Result_type::inserted) {
+ ofs << " <p><span style=\"color:green\">Successfully inserted "
+ << simplex_format(cc_info.simplex_, true) << "!</span><p>\n";
+ }
+ }
+ ofs << " </ul>\n";
+ }
+ }
+ ofs << " </div>\n";
+}
+
+void write_visu(std::ofstream& ofs) {
+ ofs << " <div id=\"visu\">\n"
+ << " <h2> Visualization details debug trace </h2>\n";
+ // std::vector<std::map<std::string, std::string> > vs_maps(cc_interior_summary_lists.size());
+ std::map<std::string, std::string> vs_map;
+ for (const auto& sv_pair : simplex_vlist_map) vs_map.emplace(sv_pair.second, sv_pair.first);
+ ofs << " <ul>\n";
+ for (const auto& vs_pair : vs_map) {
+ std::string w_simplex = vs_pair.second.substr(1);
+ bool is_boundary = vs_pair.second[0] == 'B';
+ ofs << " <li><b>" << vs_pair.first << "</b>: " << simplex_format(w_simplex, is_boundary) << "</li>\n";
+ }
+ ofs << " </ul>\n";
+ ofs << " </div>\n";
+}
+
+void write_to_html(std::string file_name) {
+ std::ofstream ofs(file_name + ".html", std::ofstream::out);
+ ofs << "<!DOCTYPE html>\n"
+ << "<html>\n";
+ write_head(ofs);
+ ofs << " <body>\n";
+ write_nav(ofs);
+ ofs << " <h1> Debug traces for " << file_name << " </h1>\n";
+ write_mt(ofs);
+ write_cc(ofs);
+ write_visu(ofs);
+ ofs << " </body>\n";
+ ofs << "</html>\n";
+
+ ofs.close();
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif // DEBUG_TRACES
+#endif // IO_OUTPUT_DEBUG_TRACES_TO_HTML_H_
diff --git a/src/Coxeter_triangulation/include/gudhi/IO/output_meshes_to_medit.h b/src/Coxeter_triangulation/include/gudhi/IO/output_meshes_to_medit.h
new file mode 100644
index 00000000..f69d8b29
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/IO/output_meshes_to_medit.h
@@ -0,0 +1,154 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef IO_OUTPUT_MESHES_TO_MEDIT_H_
+#define IO_OUTPUT_MESHES_TO_MEDIT_H_
+
+#include <gudhi/IO/Mesh_medit.h>
+
+#include <Eigen/Dense>
+
+#include <cstdlib> // for std::size_t
+#include <fstream> // for std::ofstream
+#include <vector>
+#include <type_traits> // for std::enable_if
+#include <tuple> // for std::get
+#include <utility> // for std::make_pair
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+using Vertex_points = Mesh_medit::Vertex_points;
+using Mesh_elements = Mesh_medit::Mesh_elements;
+using Scalar_field_range = Mesh_medit::Scalar_field_range;
+
+template <std::size_t I = 0, typename... Meshes>
+typename std::enable_if<I == sizeof...(Meshes), void>::type fill_meshes(Vertex_points& vertex_points,
+ Mesh_elements& edges, Mesh_elements& triangles,
+ Mesh_elements& tetrahedra,
+ Scalar_field_range& triangles_scalar_range,
+ Scalar_field_range& tetrahedra_scalar_range,
+ std::size_t index, const Meshes&... meshes) {}
+
+template <std::size_t I = 0, typename... Meshes>
+typename std::enable_if<I != sizeof...(Meshes), void>::type fill_meshes(Vertex_points& vertex_points,
+ Mesh_elements& edges, Mesh_elements& triangles,
+ Mesh_elements& tetrahedra,
+ Scalar_field_range& triangles_scalar_range,
+ Scalar_field_range& tetrahedra_scalar_range,
+ std::size_t index, const Meshes&... meshes) {
+ auto mesh = std::get<I>(std::forward_as_tuple(meshes...));
+ for (const auto& v : mesh.vertex_points) vertex_points.push_back(v);
+ for (const auto& e : mesh.edges) {
+ std::vector<std::size_t> edge;
+ for (const auto& v_i : e.first) edge.push_back(v_i + index);
+ edges.emplace_back(edge, e.second);
+ }
+ for (const auto& t : mesh.triangles) {
+ std::vector<std::size_t> triangle;
+ for (const auto& v_i : t.first) triangle.push_back(v_i + index);
+ triangles.emplace_back(triangle, t.second);
+ }
+ for (const auto& t : mesh.tetrahedra) {
+ std::vector<std::size_t> tetrahedron;
+ for (const auto& v_i : t.first) tetrahedron.push_back(v_i + index);
+ tetrahedra.emplace_back(tetrahedron, t.second);
+ }
+ for (const auto& b : mesh.triangles_scalar_range) triangles_scalar_range.push_back(b);
+ for (const auto& b : mesh.tetrahedra_scalar_range) tetrahedra_scalar_range.push_back(b);
+ fill_meshes<I + 1, Meshes...>(vertex_points, edges, triangles, tetrahedra, triangles_scalar_range,
+ tetrahedra_scalar_range, index + mesh.vertex_points.size(), meshes...);
+}
+
+/** \brief Outputs a text file with specified meshes that can be visualized in
+ * <a target="_blank" href="https://www.ljll.math.upmc.fr/frey/software.html">Medit</a>.
+ *
+ * @param[in] amb_d Ambient dimension. Can be 2 or 3.
+ * @param[in] file_name The name of the output file.
+ * @param[in] meshes A pack of meshes to be specified separated by commas.
+ *
+ * @ingroup coxeter_triangulation
+ */
+template <typename... Meshes>
+void output_meshes_to_medit(std::size_t amb_d, std::string file_name, const Meshes&... meshes) {
+ Vertex_points vertex_points;
+ Mesh_elements edges, triangles, tetrahedra;
+ Scalar_field_range triangles_scalar_range, tetrahedra_scalar_range;
+ fill_meshes(vertex_points, edges, triangles, tetrahedra, triangles_scalar_range, tetrahedra_scalar_range, 0,
+ meshes...);
+
+ std::ofstream ofs(file_name + ".mesh", std::ofstream::out);
+ std::ofstream ofs_bb(file_name + ".bb", std::ofstream::out);
+
+ if (amb_d == 2) {
+ ofs << "MeshVersionFormatted 1\nDimension 2\n";
+ ofs_bb << "2 1 ";
+ ofs << "Vertices\n" << vertex_points.size() << "\n";
+ for (auto p : vertex_points) {
+ ofs << p[0] << " " << p[1] << " 2\n";
+ }
+ ofs << "Edges " << edges.size() << "\n";
+ for (auto e : edges) {
+ for (auto v : e.first) ofs << v << " ";
+ ofs << e.second << std::endl;
+ }
+ ofs << "Triangles " << triangles.size() << "\n";
+ for (auto s : triangles) {
+ for (auto v : s.first) {
+ ofs << v << " ";
+ }
+ ofs << s.second << std::endl;
+ }
+
+ ofs_bb << triangles_scalar_range.size() << " 1\n";
+ for (auto& b : triangles_scalar_range) ofs_bb << b << "\n";
+
+ } else {
+ ofs << "MeshVersionFormatted 1\nDimension 3\n";
+ ofs_bb << "3 1 ";
+ ofs << "Vertices\n" << vertex_points.size() << "\n";
+ for (auto p : vertex_points) {
+ ofs << p[0] << " " << p[1] << " " << p[2] << " 2\n";
+ }
+ ofs << "Edges " << edges.size() << "\n";
+ for (auto e : edges) {
+ for (auto v : e.first) ofs << v << " ";
+ ofs << e.second << std::endl;
+ }
+ ofs << "Triangles " << triangles.size() << "\n";
+ for (auto s : triangles) {
+ for (auto v : s.first) {
+ ofs << v << " ";
+ }
+ ofs << s.second << std::endl;
+ }
+ ofs << "Tetrahedra " << tetrahedra.size() << "\n";
+ for (auto s : tetrahedra) {
+ for (auto v : s.first) {
+ ofs << v << " ";
+ }
+ ofs << s.second << std::endl;
+ }
+
+ ofs_bb << triangles_scalar_range.size() + tetrahedra_scalar_range.size() << " 1\n";
+ for (auto& b : triangles_scalar_range) ofs_bb << b << "\n";
+ for (auto& b : tetrahedra_scalar_range) ofs_bb << b << "\n";
+ }
+
+ ofs.close();
+ ofs_bb.close();
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Implicit_manifold_intersection_oracle.h b/src/Coxeter_triangulation/include/gudhi/Implicit_manifold_intersection_oracle.h
new file mode 100644
index 00000000..277f8b6c
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Implicit_manifold_intersection_oracle.h
@@ -0,0 +1,261 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef IMPLICIT_MANIFOLD_INTERSECTION_ORACLE_H_
+#define IMPLICIT_MANIFOLD_INTERSECTION_ORACLE_H_
+
+#include <Eigen/Dense>
+
+#include <gudhi/Permutahedral_representation/face_from_indices.h>
+#include <gudhi/Functions/Constant_function.h>
+#include <gudhi/Functions/PL_approximation.h>
+#include <gudhi/Coxeter_triangulation/Query_result.h>
+#include <gudhi/Debug_utils.h> // for GUDHI_CHECK
+
+#include <vector>
+#include <limits> // for std::numeric_limits<>
+#include <cmath> // for std::fabs
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \class Implicit_manifold_intersection_oracle
+ * \brief An oracle that supports the intersection query on an implicit manifold.
+ *
+ * \tparam Function_ The function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ * \tparam Domain_function_ The domain function template parameter. Should be a model of
+ * the concept FunctionForImplicitManifold.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_, class Domain_function_ = Constant_function>
+class Implicit_manifold_intersection_oracle {
+ /* Computes the affine coordinates of the intersection point of the implicit manifold
+ * and the affine hull of the simplex. */
+ template <class Simplex_handle, class Triangulation>
+ Eigen::VectorXd compute_lambda(const Simplex_handle& simplex, const Triangulation& triangulation) const {
+ std::size_t cod_d = this->cod_d();
+ Eigen::MatrixXd matrix(cod_d + 1, cod_d + 1);
+ for (std::size_t i = 0; i < cod_d + 1; ++i) matrix(0, i) = 1;
+ std::size_t j = 0;
+ for (auto v : simplex.vertex_range()) {
+ Eigen::VectorXd v_coords = fun_(triangulation.cartesian_coordinates(v));
+ for (std::size_t i = 1; i < cod_d + 1; ++i) matrix(i, j) = v_coords(i - 1);
+ j++;
+ }
+ Eigen::VectorXd z(cod_d + 1);
+ z(0) = 1;
+ for (std::size_t i = 1; i < cod_d + 1; ++i) z(i) = 0;
+ Eigen::VectorXd lambda = matrix.colPivHouseholderQr().solve(z);
+ if (!z.isApprox(matrix*lambda)) {
+ // NaN non valid results
+ for (std::size_t i = 0; i < (std::size_t)lambda.size(); ++i) lambda(i) =
+ std::numeric_limits<double>::quiet_NaN();
+ }
+ return lambda;
+ }
+
+ /* Computes the affine coordinates of the intersection point of the boundary
+ * of the implicit manifold and the affine hull of the simplex. */
+ template <class Simplex_handle, class Triangulation>
+ Eigen::VectorXd compute_boundary_lambda(const Simplex_handle& simplex, const Triangulation& triangulation) const {
+ std::size_t cod_d = this->cod_d();
+ Eigen::MatrixXd matrix(cod_d + 2, cod_d + 2);
+ for (std::size_t i = 0; i < cod_d + 2; ++i) matrix(0, i) = 1;
+ std::size_t j = 0;
+ for (auto v : simplex.vertex_range()) {
+ Eigen::VectorXd v_coords = fun_(triangulation.cartesian_coordinates(v));
+ for (std::size_t i = 1; i < cod_d + 1; ++i) matrix(i, j) = v_coords(i - 1);
+ Eigen::VectorXd bv_coords = domain_fun_(triangulation.cartesian_coordinates(v));
+ matrix(cod_d + 1, j) = bv_coords(0);
+ j++;
+ }
+ Eigen::VectorXd z(cod_d + 2);
+ z(0) = 1;
+ for (std::size_t i = 1; i < cod_d + 2; ++i) z(i) = 0;
+ Eigen::VectorXd lambda = matrix.colPivHouseholderQr().solve(z);
+ if (!z.isApprox(matrix*lambda)) {
+ // NaN non valid results
+ for (std::size_t i = 0; i < (std::size_t)lambda.size(); ++i) lambda(i) =
+ std::numeric_limits<double>::quiet_NaN();
+ }
+ return lambda;
+ }
+
+ /* Computes the intersection result for a given simplex in a triangulation. */
+ template <class Simplex_handle, class Triangulation>
+ Query_result<Simplex_handle> intersection_result(const Eigen::VectorXd& lambda, const Simplex_handle& simplex,
+ const Triangulation& triangulation) const {
+ using QR = Query_result<Simplex_handle>;
+ std::size_t amb_d = triangulation.dimension();
+ std::size_t cod_d = simplex.dimension();
+ for (std::size_t i = 0; i < (std::size_t)lambda.size(); ++i) {
+ if (std::isnan(lambda(i))) return QR({Eigen::VectorXd(), false});
+ GUDHI_CHECK((std::fabs(lambda(i) - 1.) > std::numeric_limits<double>::epsilon() &&
+ std::fabs(lambda(i) - 0.) > std::numeric_limits<double>::epsilon()),
+ std::invalid_argument("A vertex of the triangulation lies exactly on the manifold"));
+ if (lambda(i) < 0. || lambda(i) > 1.) return QR({Eigen::VectorXd(), false});
+ }
+ Eigen::MatrixXd vertex_matrix(cod_d + 1, amb_d);
+ auto v_range = simplex.vertex_range();
+ auto v_it = v_range.begin();
+ for (std::size_t i = 0; i < cod_d + 1 && v_it != v_range.end(); ++v_it, ++i) {
+ Eigen::VectorXd v_coords = triangulation.cartesian_coordinates(*v_it);
+ for (std::size_t j = 0; j < amb_d; ++j) vertex_matrix(i, j) = v_coords(j);
+ }
+ Eigen::VectorXd intersection = lambda.transpose() * vertex_matrix;
+ return QR({intersection, true});
+ }
+
+ public:
+ /** \brief Ambient dimension of the implicit manifold. */
+ std::size_t amb_d() const { return fun_.amb_d(); }
+
+ /** \brief Codimension of the implicit manifold. */
+ std::size_t cod_d() const { return fun_.cod_d(); }
+
+ /** \brief The seed point of the implicit manifold. */
+ Eigen::VectorXd seed() const { return fun_.seed(); }
+
+ /** \brief Intersection query with the relative interior of the manifold.
+ *
+ * \details The returned structure Query_result contains the boolean value
+ * that is true only if the intersection point of the query simplex and
+ * the relative interior of the manifold exists, the intersection point
+ * and the face of the query simplex that contains
+ * the intersection point.
+ *
+ * \tparam Simplex_handle The class of the query simplex.
+ * Needs to be a model of the concept SimplexInCoxeterTriangulation.
+ * \tparam Triangulation The class of the triangulation.
+ * Needs to be a model of the concept TriangulationForManifoldTracing.
+ *
+ * @param[in] simplex The query simplex. The dimension of the simplex
+ * should be the same as the codimension of the manifold
+ * (the codomain dimension of the function).
+ * @param[in] triangulation The ambient triangulation. The dimension of
+ * the triangulation should be the same as the ambient dimension of the manifold
+ * (the domain dimension of the function).
+ */
+ template <class Simplex_handle, class Triangulation>
+ Query_result<Simplex_handle> intersects(const Simplex_handle& simplex, const Triangulation& triangulation) const {
+ Eigen::VectorXd lambda = compute_lambda(simplex, triangulation);
+ return intersection_result(lambda, simplex, triangulation);
+ }
+
+ /** \brief Intersection query with the boundary of the manifold.
+ *
+ * \details The returned structure Query_result contains the boolean value
+ * that is true only if the intersection point of the query simplex and
+ * the boundary of the manifold exists, the intersection point
+ * and the face of the query simplex that contains
+ * the intersection point.
+ *
+ * \tparam Simplex_handle The class of the query simplex.
+ * Needs to be a model of the concept SimplexInCoxeterTriangulation.
+ * \tparam Triangulation The class of the triangulation.
+ * Needs to be a model of the concept TriangulationForManifoldTracing.
+ *
+ * @param[in] simplex The query simplex. The dimension of the simplex
+ * should be the same as the codimension of the boundary of the manifold
+ * (the codomain dimension of the function + 1).
+ * @param[in] triangulation The ambient triangulation. The dimension of
+ * the triangulation should be the same as the ambient dimension of the manifold
+ * (the domain dimension of the function).
+ */
+ template <class Simplex_handle, class Triangulation>
+ Query_result<Simplex_handle> intersects_boundary(const Simplex_handle& simplex,
+ const Triangulation& triangulation) const {
+ //std::cout << "intersects_boundary" << std::endl;
+ Eigen::VectorXd lambda = compute_boundary_lambda(simplex, triangulation);
+ return intersection_result(lambda, simplex, triangulation);
+ }
+
+ /** \brief Returns true if the input point lies inside the piecewise-linear
+ * domain induced by the given ambient triangulation that defines the relative
+ * interior of the piecewise-linear approximation of the manifold.
+ *
+ * @param p The input point. Needs to have the same dimension as the ambient
+ * dimension of the manifold (the domain dimension of the function).
+ * @param triangulation The ambient triangulation. Needs to have the same
+ * dimension as the ambient dimension of the manifold
+ * (the domain dimension of the function).
+ */
+ template <class Triangulation>
+ bool lies_in_domain(const Eigen::VectorXd& p, const Triangulation& triangulation) const {
+ Eigen::VectorXd pl_p = make_pl_approximation(domain_fun_, triangulation)(p);
+ return pl_p(0) < 0;
+ }
+
+ /** \brief Returns the function that defines the interior of the manifold */
+ const Function_& function() const { return fun_; }
+
+ /** \brief Constructs an intersection oracle for an implicit manifold potentially
+ * with boundary from given function and domain.
+ *
+ * @param function The input function that represents the implicit manifold
+ * before the restriction with the domain.
+ * @param domain_function The input domain function that can be used to define an implicit
+ * manifold with boundary.
+ */
+ Implicit_manifold_intersection_oracle(const Function_& function, const Domain_function_& domain_function)
+ : fun_(function), domain_fun_(domain_function) {}
+
+ /** \brief Constructs an intersection oracle for an implicit manifold
+ * without boundary from a given function.
+ *
+ * \details To use this constructor, the template Domain_function_ needs to be left
+ * at its default value (Gudhi::coxeter_triangulation::Constant_function).
+ *
+ * @param function The input function that represents the implicit manifold
+ * without boundary.
+ */
+ Implicit_manifold_intersection_oracle(const Function_& function)
+ : fun_(function), domain_fun_(function.amb_d(), 1, Eigen::VectorXd::Constant(1, -1)) {}
+
+ private:
+ Function_ fun_;
+ Domain_function_ domain_fun_;
+};
+
+/** \brief Static constructor of an intersection oracle from a function with a domain.
+ *
+ * @param function The input function that represents the implicit manifold
+ * before the restriction with the domain.
+ * @param domain_function The input domain function that can be used to define an implicit
+ * manifold with boundary.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_, class Domain_function_>
+Implicit_manifold_intersection_oracle<Function_, Domain_function_> make_oracle(
+ const Function_& function, const Domain_function_& domain_function) {
+ return Implicit_manifold_intersection_oracle<Function_, Domain_function_>(function, domain_function);
+}
+
+/** \brief Static constructor of an intersection oracle from a function without a domain.
+ *
+ * @param function The input function that represents the implicit manifold
+ * without boundary.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Function_>
+Implicit_manifold_intersection_oracle<Function_> make_oracle(const Function_& function) {
+ return Implicit_manifold_intersection_oracle<Function_>(function);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Manifold_tracing.h b/src/Coxeter_triangulation/include/gudhi/Manifold_tracing.h
new file mode 100644
index 00000000..d61bbed7
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Manifold_tracing.h
@@ -0,0 +1,270 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef MANIFOLD_TRACING_H_
+#define MANIFOLD_TRACING_H_
+
+#include <gudhi/IO/output_debug_traces_to_html.h> // for DEBUG_TRACES
+#include <gudhi/Coxeter_triangulation/Query_result.h>
+
+#include <boost/functional/hash.hpp>
+
+#include <Eigen/Dense>
+
+#include <queue>
+#include <unordered_map>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \ingroup coxeter_triangulation
+ */
+
+/** \class Manifold_tracing
+ * \brief A class that assembles methods for manifold tracing algorithm.
+ *
+ * \tparam Triangulation_ The type of the ambient triangulation.
+ * Needs to be a model of the concept TriangulationForManifoldTracing.
+ */
+template <class Triangulation_>
+class Manifold_tracing {
+ public:
+ using Simplex_handle = typename Triangulation_::Simplex_handle;
+
+ struct Simplex_hash {
+ typedef Simplex_handle argument_type;
+ typedef std::size_t result_type;
+ result_type operator()(const argument_type& s) const noexcept {
+ return boost::hash<typename Simplex_handle::Vertex>()(s.vertex());
+ }
+ };
+
+ public:
+ /** \brief Type of the output simplex map with keys of type Triangulation_::Simplex_handle
+ * and values of type Eigen::VectorXd.
+ * This type should be used for the output in the method manifold_tracing_algorithm.
+ */
+ typedef std::unordered_map<Simplex_handle, Eigen::VectorXd, Simplex_hash> Out_simplex_map;
+
+ /**
+ * \brief Computes the set of k-simplices that intersect
+ * a boundaryless implicit manifold given by an intersection oracle, where k
+ * is the codimension of the manifold.
+ * The computation is based on the seed propagation --- it starts at the
+ * given seed points and then propagates along the manifold.
+ *
+ * \tparam Point_range Range of points of type Eigen::VectorXd.
+ * \tparam Intersection_oracle Intersection oracle that represents the manifold.
+ * Needs to be a model of the concept IntersectionOracle.
+ *
+ * \param[in] seed_points The range of points on the manifold from which
+ * the computation begins.
+ * \param[in] triangulation The ambient triangulation.
+ * \param[in] oracle The intersection oracle for the manifold.
+ * The ambient dimension needs to match the dimension of the
+ * triangulation.
+ * \param[out] out_simplex_map The output map, where the keys are k-simplices in
+ * the input triangulation that intersect the input manifold and the mapped values
+ * are the intersection points.
+ */
+ template <class Point_range, class Intersection_oracle>
+ void manifold_tracing_algorithm(const Point_range& seed_points, const Triangulation_& triangulation,
+ const Intersection_oracle& oracle, Out_simplex_map& out_simplex_map) {
+ std::size_t cod_d = oracle.cod_d();
+ std::queue<Simplex_handle> queue;
+
+ for (const auto& p : seed_points) {
+ Simplex_handle full_simplex = triangulation.locate_point(p);
+ for (Simplex_handle face : full_simplex.face_range(cod_d)) {
+ Query_result<Simplex_handle> qr = oracle.intersects(face, triangulation);
+ if (qr.success && out_simplex_map.emplace(face, qr.intersection).second) {
+#ifdef DEBUG_TRACES
+ mt_seed_inserted_list.push_back(MT_inserted_info(qr, face, false));
+#endif
+ queue.emplace(face);
+ break;
+ }
+ }
+ }
+
+ while (!queue.empty()) {
+ Simplex_handle s = queue.front();
+ queue.pop();
+ for (auto cof : s.coface_range(cod_d + 1)) {
+ for (auto face : cof.face_range(cod_d)) {
+ Query_result<Simplex_handle> qr = oracle.intersects(face, triangulation);
+ if (qr.success && out_simplex_map.emplace(face, qr.intersection).second) queue.emplace(face);
+ }
+ }
+ }
+ }
+
+ /**
+ * \brief Computes the set of k-simplices that intersect
+ * the dimensional manifold given by an intersection oracle, where k
+ * is the codimension of the manifold.
+ * The computation is based on the seed propagation --- it starts at the
+ * given seed points and then propagates along the manifold.
+ *
+ * \tparam Point_range Range of points of type Eigen::VectorXd.
+ * \tparam Intersection_oracle Intersection oracle that represents the manifold.
+ * Needs to be a model of the concept IntersectionOracle.
+ *
+ * \param[in] seed_points The range of points on the manifold from which
+ * the computation begins.
+ * \param[in] triangulation The ambient triangulation.
+ * \param[in] oracle The intersection oracle for the manifold.
+ * The ambient dimension needs to match the dimension of the
+ * triangulation.
+ * \param[out] interior_simplex_map The output map, where the keys are k-simplices in
+ * the input triangulation that intersect the relative interior of the input manifold
+ * and the mapped values are the intersection points.
+ * \param[out] boundary_simplex_map The output map, where the keys are k-simplices in
+ * the input triangulation that intersect the boundary of the input manifold
+ * and the mapped values are the intersection points.
+ */
+ template <class Point_range, class Intersection_oracle>
+ void manifold_tracing_algorithm(const Point_range& seed_points, const Triangulation_& triangulation,
+ const Intersection_oracle& oracle, Out_simplex_map& interior_simplex_map,
+ Out_simplex_map& boundary_simplex_map) {
+ std::size_t cod_d = oracle.cod_d();
+ std::queue<Simplex_handle> queue;
+
+ for (const auto& p : seed_points) {
+ Simplex_handle full_simplex = triangulation.locate_point(p);
+ for (Simplex_handle face : full_simplex.face_range(cod_d)) {
+ auto qr = oracle.intersects(face, triangulation);
+#ifdef DEBUG_TRACES
+ mt_seed_inserted_list.push_back(MT_inserted_info(qr, face, false));
+#endif
+ if (qr.success) {
+ if (oracle.lies_in_domain(qr.intersection, triangulation)) {
+ if (interior_simplex_map.emplace(face, qr.intersection).second) queue.emplace(face);
+ } else {
+ for (Simplex_handle cof : face.coface_range(cod_d + 1)) {
+ auto qrb = oracle.intersects_boundary(cof, triangulation);
+#ifdef DEBUG_TRACES
+ mt_seed_inserted_list.push_back(MT_inserted_info(qrb, cof, true));
+#endif
+ if (qrb.success) boundary_simplex_map.emplace(cof, qrb.intersection);
+ }
+ }
+ // break;
+ }
+ }
+ }
+
+ while (!queue.empty()) {
+ Simplex_handle s = queue.front();
+ queue.pop();
+ for (auto cof : s.coface_range(cod_d + 1)) {
+ for (auto face : cof.face_range(cod_d)) {
+ auto qr = oracle.intersects(face, triangulation);
+#ifdef DEBUG_TRACES
+ mt_inserted_list.push_back(MT_inserted_info(qr, face, false));
+#endif
+ if (qr.success) {
+ if (oracle.lies_in_domain(qr.intersection, triangulation)) {
+ if (interior_simplex_map.emplace(face, qr.intersection).second) queue.emplace(face);
+ } else {
+ auto qrb = oracle.intersects_boundary(cof, triangulation);
+#ifdef DEBUG_TRACES
+ mt_inserted_list.push_back(MT_inserted_info(qrb, cof, true));
+#endif
+ if (qrb.success) boundary_simplex_map.emplace(cof, qrb.intersection);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /** \brief Empty constructor */
+ Manifold_tracing() {}
+};
+
+/**
+ * \brief Static method for Manifold_tracing<Triangulation_>::manifold_tracing_algorithm
+ * that computes the set of k-simplices that intersect
+ * a boundaryless implicit manifold given by an intersection oracle, where k
+ * is the codimension of the manifold.
+ * The computation is based on the seed propagation --- it starts at the
+ * given seed points and then propagates along the manifold.
+ *
+ * \tparam Point_range Range of points of type Eigen::VectorXd.
+ * \tparam Triangulation_ The type of the ambient triangulation.
+ * Needs to be a model of the concept TriangulationForManifoldTracing.
+ * \tparam Intersection_oracle Intersection oracle that represents the manifold.
+ * Needs to be a model of the concept IntersectionOracle.
+ * \tparam Out_simplex_map Needs to be Manifold_tracing<Triangulation_>::Out_simplex_map.
+ *
+ * \param[in] seed_points The range of points on the manifold from which
+ * the computation begins.
+ * \param[in] triangulation The ambient triangulation.
+ * \param[in] oracle The intersection oracle for the manifold.
+ * The ambient dimension needs to match the dimension of the
+ * triangulation.
+ * \param[out] out_simplex_map The output map, where the keys are k-simplices in
+ * the input triangulation that intersect the input manifold and the mapped values
+ * are the intersection points.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Point_range, class Triangulation, class Intersection_oracle, class Out_simplex_map>
+void manifold_tracing_algorithm(const Point_range& seed_points, const Triangulation& triangulation,
+ const Intersection_oracle& oracle, Out_simplex_map& out_simplex_map) {
+ Manifold_tracing<Triangulation> mt;
+ mt.manifold_tracing_algorithm(seed_points, triangulation, oracle, out_simplex_map);
+}
+
+/**
+ * \brief Static method for Manifold_tracing<Triangulation_>::manifold_tracing_algorithm
+ * the dimensional manifold given by an intersection oracle, where k
+ * is the codimension of the manifold.
+ * The computation is based on the seed propagation --- it starts at the
+ * given seed points and then propagates along the manifold.
+ *
+ * \tparam Point_range Range of points of type Eigen::VectorXd.
+ * \tparam Triangulation_ The type of the ambient triangulation.
+ * Needs to be a model of the concept TriangulationForManifoldTracing.
+ * \tparam Intersection_oracle Intersection oracle that represents the manifold.
+ * Needs to be a model of the concept IntersectionOracle.
+ * \tparam Out_simplex_map Needs to be Manifold_tracing<Triangulation_>::Out_simplex_map.
+ *
+ * \param[in] seed_points The range of points on the manifold from which
+ * the computation begins.
+ * \param[in] triangulation The ambient triangulation.
+ * \param[in] oracle The intersection oracle for the manifold.
+ * The ambient dimension needs to match the dimension of the
+ * triangulation.
+ * \param[out] interior_simplex_map The output map, where the keys are k-simplices in
+ * the input triangulation that intersect the relative interior of the input manifold
+ * and the mapped values are the intersection points.
+ * \param[out] boundary_simplex_map The output map, where the keys are k-simplices in
+ * the input triangulation that intersect the boundary of the input manifold
+ * and the mapped values are the intersection points.
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Point_range, class Triangulation, class Intersection_oracle, class Out_simplex_map>
+void manifold_tracing_algorithm(const Point_range& seed_points, const Triangulation& triangulation,
+ const Intersection_oracle& oracle, Out_simplex_map& interior_simplex_map,
+ Out_simplex_map& boundary_simplex_map) {
+ Manifold_tracing<Triangulation> mt;
+ mt.manifold_tracing_algorithm(seed_points, triangulation, oracle, interior_simplex_map, boundary_simplex_map);
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation.h
new file mode 100644
index 00000000..76438c91
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation.h
@@ -0,0 +1,216 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_H_
+#define PERMUTAHEDRAL_REPRESENTATION_H_
+
+#include <gudhi/Permutahedral_representation/Permutahedral_representation_iterators.h>
+
+#include <utility> // for std::make_pair
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/**
+ * \class Permutahedral_representation
+ * \brief A class that stores the permutahedral representation of a simplex
+ * in a Coxeter triangulation or a Freudenthal-Kuhn triangulation.
+ *
+ * \ingroup coxeter_triangulation
+ *
+ * \details The data structure is a record consisting of a range that
+ * represents the vertex and a range that represents the ordered set
+ * partition, both of which identify the simplex in the triangulation.
+ *
+ * \tparam Vertex_ needs to be a random-access range.
+ * \tparam Ordered_set_partition_ needs to be a a random-access range that consists of
+ * random-access ranges.
+ */
+template <class Vertex_, class Ordered_set_partition_>
+class Permutahedral_representation {
+ typedef Permutahedral_representation<Vertex_, Ordered_set_partition_> Self;
+
+ public:
+ /** \brief Type of the vertex. */
+ typedef Vertex_ Vertex;
+
+ /** \brief Type of the ordered partition. */
+ typedef Ordered_set_partition_ OrderedSetPartition;
+
+ /** \brief Permutahedral_representation constructor from a vertex and an ordered set partition.
+ *
+ * @param[in] vertex Vertex.
+ * @param[in] partition Ordered set partition.
+ *
+ * \details If the size of vertex is d, the ranges in partition must consist
+ * of the integers 0,...,d without repetition or collision between the ranges.
+ */
+ Permutahedral_representation(const Vertex& vertex, const OrderedSetPartition& partition)
+ : vertex_(vertex), partition_(partition) {}
+
+ /** \brief Constructor for an empty permutahedral representation that does not correspond
+ * to any simplex.
+ */
+ Permutahedral_representation() {}
+
+ /** \brief Dimension of the simplex. */
+ std::size_t dimension() const { return partition_.size() - 1; }
+
+ /** \brief Lexicographically-minimal vertex. */
+ Vertex& vertex() { return vertex_; }
+
+ /** \brief Lexicographically-minimal vertex. */
+ const Vertex& vertex() const { return vertex_; }
+
+ /** \brief Ordered set partition. */
+ OrderedSetPartition& partition() { return partition_; }
+
+ /** \brief Identifying vertex. */
+ const OrderedSetPartition& partition() const { return partition_; }
+
+ /** \brief Equality operator.
+ * Returns true if an only if both vertex and the ordered set partition coincide.
+ */
+ bool operator==(const Permutahedral_representation& other) const {
+ if (dimension() != other.dimension()) return false;
+ if (vertex_ != other.vertex_) return false;
+ for (std::size_t k = 0; k < partition_.size(); ++k)
+ if (partition_[k] != other.partition_[k]) return false;
+ return true;
+ }
+
+ /** \brief Inequality operator.
+ * Returns true if an only if either vertex or the ordered set partition are different.
+ */
+ bool operator!=(const Permutahedral_representation& other) const { return !(*this == other); }
+
+ typedef Gudhi::coxeter_triangulation::Vertex_iterator<Self> Vertex_iterator;
+ typedef boost::iterator_range<Vertex_iterator> Vertex_range;
+ /** \brief Returns a range of vertices of the simplex.
+ * The type of vertices is Vertex.
+ */
+ Vertex_range vertex_range() const { return Vertex_range(Vertex_iterator(*this), Vertex_iterator()); }
+
+ typedef Gudhi::coxeter_triangulation::Face_iterator<Self> Face_iterator;
+ typedef boost::iterator_range<Face_iterator> Face_range;
+ /** \brief Returns a range of permutahedral representations of faces of the simplex.
+ * @param[in] value_dim The dimension of the faces. Must be between 0 and the dimension of the simplex.
+ */
+ Face_range face_range(std::size_t value_dim) const {
+ return Face_range(Face_iterator(*this, value_dim), Face_iterator());
+ }
+
+ /** \brief Returns a range of permutahedral representations of facets of the simplex.
+ * The dimension of the simplex must be strictly positive.
+ */
+ Face_range facet_range() const { return Face_range(Face_iterator(*this, dimension() - 1), Face_iterator()); }
+
+ typedef Gudhi::coxeter_triangulation::Coface_iterator<Self> Coface_iterator;
+ typedef boost::iterator_range<Coface_iterator> Coface_range;
+ /** \brief Returns a range of permutahedral representations of cofaces of the simplex.
+ * @param[in] value_dim The dimension of the cofaces. Must be between the dimension of the simplex and the ambient
+ * dimension (the size of the vertex).
+ */
+ Coface_range coface_range(std::size_t value_dim) const {
+ return Coface_range(Coface_iterator(*this, value_dim), Coface_iterator());
+ }
+
+ /** \brief Returns a range of permutahedral representations of cofacets of the simplex.
+ * The dimension of the simplex must be strictly different from the ambient dimension (the size of the vertex).
+ */
+ Coface_range cofacet_range() const {
+ return Coface_range(Coface_iterator(*this, dimension() + 1), Coface_iterator());
+ }
+
+ /** \brief Returns true, if the simplex is a face of other simplex.
+ *
+ * @param[in] other A simplex that is potential a coface of the current simplex.
+ */
+ bool is_face_of(const Permutahedral_representation& other) const {
+ using Part = typename OrderedSetPartition::value_type;
+
+ if (other.dimension() < dimension()) return false;
+ if (other.vertex_.size() != vertex_.size())
+ std::cerr << "Error: Permutahedral_representation::is_face_of: incompatible ambient dimensions.\n";
+
+ Vertex v_self = vertex_, v_other = other.vertex_;
+ auto self_partition_it = partition_.begin();
+ auto other_partition_it = other.partition_.begin();
+ while (self_partition_it != partition_.end()) {
+ while (other_partition_it != other.partition_.end() && v_self != v_other) {
+ const Part& other_part = *other_partition_it++;
+ if (other_partition_it == other.partition_.end()) return false;
+ for (const auto& k : other_part) v_other[k]++;
+ }
+ if (other_partition_it == other.partition_.end()) return false;
+ const Part& self_part = *self_partition_it++;
+ if (self_partition_it == partition_.end()) return true;
+ for (const auto& k : self_part) v_self[k]++;
+ }
+ return true;
+ }
+
+ private:
+ Vertex vertex_;
+ OrderedSetPartition partition_;
+};
+
+/** \brief Print a permutahedral representation to a stream.
+ * \ingroup coxeter_triangulation
+ *
+ * @param[in] os The output stream.
+ * @param[in] simplex A simplex represented by its permutahedral representation.
+ */
+template <class Vertex, class OrderedSetPartition>
+std::ostream& operator<<(std::ostream& os, const Permutahedral_representation<Vertex, OrderedSetPartition>& simplex) {
+ // vertex part
+ os << "(";
+ if (simplex.vertex().empty()) {
+ os << ")";
+ return os;
+ }
+ auto v_it = simplex.vertex().begin();
+ os << *v_it++;
+ for (; v_it != simplex.vertex().end(); ++v_it) os << ", " << *v_it;
+ os << ")";
+
+ // ordered partition part
+ using Part = typename OrderedSetPartition::value_type;
+ auto print_part = [&os](const Part& p) {
+ os << "{";
+ if (p.empty()) {
+ os << "}";
+ }
+ auto p_it = p.begin();
+ os << *p_it++;
+ for (; p_it != p.end(); ++p_it) os << ", " << *p_it;
+ os << "}";
+ };
+ os << " [";
+ if (simplex.partition().empty()) {
+ os << "]";
+ return os;
+ }
+ auto o_it = simplex.partition().begin();
+ print_part(*o_it++);
+ for (; o_it != simplex.partition().end(); ++o_it) {
+ os << ", ";
+ print_part(*o_it);
+ }
+ os << "]";
+ return os;
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif // PERMUTAHEDRAL_REPRESENTATION_H_
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Combination_iterator.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Combination_iterator.h
new file mode 100644
index 00000000..5f382e31
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Combination_iterator.h
@@ -0,0 +1,83 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_COMBINATION_ITERATOR_H_
+#define PERMUTAHEDRAL_REPRESENTATION_COMBINATION_ITERATOR_H_
+
+#include <vector>
+#include <boost/range/iterator_range.hpp>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+typedef unsigned uint;
+
+/** \brief Class that allows the user to generate combinations of
+ * k elements in a set of n elements.
+ * Based on the algorithm by Mifsud.
+ */
+class Combination_iterator
+ : public boost::iterator_facade<Combination_iterator, std::vector<uint> const, boost::forward_traversal_tag> {
+ typedef std::vector<uint> value_t;
+
+ protected:
+ friend class boost::iterator_core_access;
+
+ bool equal(Combination_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void increment() {
+ if (value_[0] == n_ - k_) {
+ is_end_ = true;
+ return;
+ }
+ uint j = k_ - 1;
+ if (value_[j] < n_ - 1) {
+ value_[j]++;
+ return;
+ }
+ for (; j > 0; --j)
+ if (value_[j - 1] < n_ - k_ + j - 1) {
+ value_[j - 1]++;
+ for (uint s = j; s < k_; s++) value_[s] = value_[j - 1] + s - (j - 1);
+ return;
+ }
+ }
+
+ public:
+ Combination_iterator(const uint& n, const uint& k) : value_(k), is_end_(n == 0), n_(n), k_(k) {
+ for (uint i = 0; i < k; ++i) value_[i] = i;
+ }
+
+ // Used for the creating an end iterator
+ Combination_iterator() : is_end_(true), n_(0), k_(0) {}
+
+ void reinitialize() {
+ if (n_ > 0) {
+ is_end_ = false;
+ for (uint i = 0; i < n_; ++i) value_[i] = i;
+ }
+ }
+
+ private:
+ value_t value_; // the dereference value
+ bool is_end_; // is true when the current permutation is the final one
+
+ uint n_;
+ uint k_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h
new file mode 100644
index 00000000..3ee73754
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h
@@ -0,0 +1,114 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_INTEGER_COMBINATION_ITERATOR_H_
+#define PERMUTAHEDRAL_REPRESENTATION_INTEGER_COMBINATION_ITERATOR_H_
+
+#include <vector>
+#include <boost/range/iterator_range.hpp>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+typedef unsigned uint;
+
+/** \brief Class that allows the user to generate combinations of
+ * k elements in a set of n elements.
+ * Based on the algorithm by Mifsud.
+ */
+class Integer_combination_iterator
+ : public boost::iterator_facade<Integer_combination_iterator, std::vector<uint> const,
+ boost::forward_traversal_tag> {
+ using value_t = std::vector<uint>;
+
+ private:
+ friend class boost::iterator_core_access;
+
+ bool equal(Integer_combination_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void increment() {
+ uint j1 = 0;
+ uint s = 0;
+ while (value_[j1] == 0 && j1 < k_) j1++;
+ uint j2 = j1 + 1;
+ while (value_[j2] == bounds_[j2]) {
+ if (bounds_[j2] != 0) {
+ s += value_[j1];
+ value_[j1] = 0;
+ j1 = j2;
+ }
+ j2++;
+ }
+ if (j2 >= k_) {
+ is_end_ = true;
+ return;
+ }
+ s += value_[j1] - 1;
+ value_[j1] = 0;
+ value_[j2]++;
+ uint i = 0;
+ while (s >= bounds_[i]) {
+ value_[i] = bounds_[i];
+ s -= bounds_[i];
+ i++;
+ }
+ value_[i++] = s;
+ }
+
+ public:
+ template <class Bound_range>
+ Integer_combination_iterator(const uint& n, const uint& k, const Bound_range& bounds)
+ : value_(k + 2), is_end_(n == 0 || k == 0), n_(n), k_(k) {
+ bounds_.reserve(k + 2);
+ uint sum_radices = 0;
+ for (auto b : bounds) {
+ bounds_.push_back(b);
+ sum_radices += b;
+ }
+ bounds_.push_back(2);
+ bounds_.push_back(1);
+ if (n > sum_radices) {
+ is_end_ = true;
+ return;
+ }
+ uint i = 0;
+ uint s = n;
+ while (s >= bounds_[i]) {
+ value_[i] = bounds_[i];
+ s -= bounds_[i];
+ i++;
+ }
+ value_[i++] = s;
+
+ while (i < k_) value_[i++] = 0;
+ value_[k] = 1;
+ value_[k + 1] = 0;
+ }
+
+ // Used for the creating an end iterator
+ Integer_combination_iterator() : is_end_(true), n_(0), k_(0) {}
+
+ private:
+ value_t value_; // the dereference value
+ bool is_end_; // is true when the current integer combination is the final one
+
+ uint n_;
+ uint k_;
+ std::vector<uint> bounds_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Ordered_set_partition_iterator.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Ordered_set_partition_iterator.h
new file mode 100644
index 00000000..866079fa
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Ordered_set_partition_iterator.h
@@ -0,0 +1,93 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_ORDERED_SET_PARTITION_ITERATOR_H_
+#define PERMUTAHEDRAL_REPRESENTATION_ORDERED_SET_PARTITION_ITERATOR_H_
+
+#include <vector>
+#include <limits>
+
+#include <gudhi/Permutahedral_representation/Permutation_iterator.h>
+#include <gudhi/Permutahedral_representation/Set_partition_iterator.h>
+
+#include <boost/range/iterator_range.hpp>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+typedef unsigned uint;
+
+/** \brief Class that represents an ordered set partition of a set {0,...,n-1} in k parts as
+ * a pair of an unordered set partition given in lexicographic order and
+ * a permutation of the parts.
+ */
+struct Ordered_set_partition {
+ Set_partition_iterator s_it_;
+ Permutation_iterator p_it_;
+
+ // Ordered_set_partition(const Set_partition_iterator& s_it, const Permutation_iterator& p_it)
+ // : s_it_(s_it), p_it_(p_it) {}
+
+ const std::vector<uint> operator[](const uint& i) const { return (*s_it_)[(*p_it_)[i]]; }
+
+ std::size_t size() const { return s_it_->size(); }
+};
+
+/** \brief Class that allows the user to generate set partitions of a set {0,...,n-1} in k parts.
+ *
+ */
+class Ordered_set_partition_iterator
+ : public boost::iterator_facade<Ordered_set_partition_iterator, Ordered_set_partition const,
+ boost::forward_traversal_tag> {
+ using value_t = Ordered_set_partition;
+
+ private:
+ friend class boost::iterator_core_access;
+
+ bool equal(Ordered_set_partition_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void increment() {
+ if (++value_.p_it_ == p_end_) {
+ if (++value_.s_it_ == s_end_) {
+ is_end_ = true;
+ return;
+ } else
+ value_.p_it_.reinitialize();
+ }
+ }
+
+ public:
+ Ordered_set_partition_iterator(const uint& n, const uint& k)
+ : value_({Set_partition_iterator(n, k), Permutation_iterator(k)}), is_end_(n == 0) {}
+
+ // Used for the creating an end iterator
+ Ordered_set_partition_iterator() : is_end_(true) {}
+
+ void reinitialize() {
+ is_end_ = false;
+ value_.p_it_.reinitialize();
+ value_.s_it_.reinitialize();
+ }
+
+ private:
+ Set_partition_iterator s_end_; // Set partition iterator and the corresponding end iterator
+ Permutation_iterator p_end_; // Permutation iterator and the corresponding end iterator
+ value_t value_; // the dereference value
+ bool is_end_; // is true when the current permutation is the final one
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutahedral_representation_iterators.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutahedral_representation_iterators.h
new file mode 100644
index 00000000..db145741
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutahedral_representation_iterators.h
@@ -0,0 +1,254 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_PERMUTAHEDRAL_REPRESENTATION_ITERATORS_H_
+#define PERMUTAHEDRAL_REPRESENTATION_PERMUTAHEDRAL_REPRESENTATION_ITERATORS_H_
+
+#include <gudhi/Permutahedral_representation/Size_range.h>
+#include <gudhi/Permutahedral_representation/Ordered_set_partition_iterator.h>
+#include <gudhi/Permutahedral_representation/Integer_combination_iterator.h>
+#include <gudhi/Permutahedral_representation/Combination_iterator.h>
+#include <gudhi/Permutahedral_representation/face_from_indices.h>
+#include <boost/iterator/iterator_facade.hpp>
+
+#include <vector>
+#include <iostream>
+#include <algorithm> // for std::find
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/* \addtogroup coxeter_triangulation
+ * Iterator types for Permutahedral_representation
+ * @{
+ */
+
+/* \brief Iterator over the vertices of a simplex
+ * represented by its permutahedral representation.
+ *
+ * Forward iterator, 'value_type' is Permutahedral_representation::Vertex.*/
+template <class Permutahedral_representation>
+class Vertex_iterator
+ : public boost::iterator_facade<Vertex_iterator<Permutahedral_representation>,
+ typename Permutahedral_representation::Vertex const, boost::forward_traversal_tag> {
+ private:
+ friend class boost::iterator_core_access;
+
+ using Vertex = typename Permutahedral_representation::Vertex;
+ using Ordered_partition = typename Permutahedral_representation::OrderedSetPartition;
+
+ using value_t = Vertex;
+
+ bool equal(Vertex_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void update_value() {
+ std::size_t d = value_.size();
+ for (auto i : *o_it_)
+ if (i != d)
+ value_[i]++;
+ else
+ for (std::size_t j = 0; j < d; j++) value_[j]--;
+ }
+
+ void increment() {
+ if (is_end_) return;
+ update_value();
+ if (++o_it_ == o_end_) is_end_ = true;
+ }
+
+ public:
+ Vertex_iterator(const Permutahedral_representation& simplex)
+ : o_it_(simplex.partition().begin()),
+ o_end_(simplex.partition().end()),
+ value_(simplex.vertex()),
+ is_end_(o_it_ == o_end_) {}
+
+ Vertex_iterator() : is_end_(true) {}
+
+ private:
+ typename Ordered_partition::const_iterator o_it_, o_end_;
+ value_t value_;
+ bool is_end_;
+
+}; // Vertex_iterator
+
+/*---------------------------------------------------------------------------*/
+/* \brief Iterator over the k-faces of a simplex
+ * given by its permutahedral representation.
+ *
+ * Forward iterator, value_type is Permutahedral_representation. */
+template <class Permutahedral_representation>
+class Face_iterator : public boost::iterator_facade<Face_iterator<Permutahedral_representation>,
+ Permutahedral_representation const, boost::forward_traversal_tag> {
+ using value_t = Permutahedral_representation;
+
+ private:
+ friend class boost::iterator_core_access;
+
+ using Vertex = typename Permutahedral_representation::Vertex;
+ using Ordered_partition = typename Permutahedral_representation::OrderedSetPartition;
+
+ bool equal(Face_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void increment() {
+ if (++c_it_ == c_end_) {
+ is_end_ = true;
+ return;
+ }
+ update_value();
+ }
+
+ void update_value() {
+ // Combination *c_it_ is supposed to be sorted in increasing order
+ value_ = face_from_indices<Permutahedral_representation>(simplex_, *c_it_);
+ }
+
+ public:
+ Face_iterator(const Permutahedral_representation& simplex, const uint& k)
+ : simplex_(simplex),
+ k_(k),
+ l_(simplex.dimension()),
+ c_it_(l_ + 1, k_ + 1),
+ is_end_(k_ > l_),
+ value_({Vertex(simplex.vertex().size()), Ordered_partition(k + 1)}) {
+ update_value();
+ }
+
+ // Used for the creating an end iterator
+ Face_iterator() : is_end_(true) {}
+
+ private:
+ Permutahedral_representation simplex_; // Input simplex
+ uint k_;
+ uint l_; // Dimension of the input simplex
+ Combination_iterator c_it_, c_end_; // indicates the vertices in the current face
+
+ bool is_end_; // is true when the current permutation is the final one
+ value_t value_; // the dereference value
+
+}; // Face_iterator
+
+/*---------------------------------------------------------------------------*/
+/* \brief Iterator over the k-cofaces of a simplex
+ * given by its permutahedral representation.
+ *
+ * Forward iterator, value_type is Permutahedral_representation. */
+template <class Permutahedral_representation>
+class Coface_iterator
+ : public boost::iterator_facade<Coface_iterator<Permutahedral_representation>, Permutahedral_representation const,
+ boost::forward_traversal_tag> {
+ using value_t = Permutahedral_representation;
+
+ private:
+ friend class boost::iterator_core_access;
+
+ using Vertex = typename Permutahedral_representation::Vertex;
+ using Ordered_partition = typename Permutahedral_representation::OrderedSetPartition;
+
+ bool equal(Coface_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void increment() {
+ uint i = 0;
+ for (; i < k_ + 1; i++) {
+ if (++(o_its_[i]) != o_end_) break;
+ }
+ if (i == k_ + 1) {
+ if (++i_it_ == i_end_) {
+ is_end_ = true;
+ return;
+ }
+ o_its_.clear();
+ for (uint j = 0; j < k_ + 1; j++)
+ o_its_.emplace_back(simplex_.partition()[j].size(), (*i_it_)[j] + 1);
+ } else
+ for (uint j = 0; j < i; j++) o_its_[j].reinitialize();
+ update_value();
+ }
+
+ void update_value() {
+ value_.vertex() = simplex_.vertex();
+ for (auto& p : value_.partition()) p.clear();
+ uint u_ = 0; // the part in o_its_[k_] that contains t_
+ for (; u_ <= (*i_it_)[k_]; u_++) {
+ auto range = (*o_its_[k_])[u_];
+ if (std::find(range.begin(), range.end(), t_) != range.end()) break;
+ }
+ uint i = 0;
+ for (uint j = u_ + 1; j <= (*i_it_)[k_]; j++, i++)
+ for (uint b : (*o_its_[k_])[j]) {
+ uint c = simplex_.partition()[k_][b];
+ value_.partition()[i].push_back(c);
+ value_.vertex()[c]--;
+ }
+ for (uint h = 0; h < k_; h++)
+ for (uint j = 0; j <= (*i_it_)[h]; j++, i++) {
+ for (uint b : (*o_its_[h])[j]) value_.partition()[i].push_back(simplex_.partition()[h][b]);
+ }
+ for (uint j = 0; j <= u_; j++, i++)
+ for (uint b : (*o_its_[k_])[j]) value_.partition()[i].push_back(simplex_.partition()[k_][b]);
+ // sort the values in each part (probably not needed)
+ for (auto& part : value_.partition()) std::sort(part.begin(), part.end());
+ }
+
+ public:
+ Coface_iterator(const Permutahedral_representation& simplex, const uint& l)
+ : simplex_(simplex),
+ d_(simplex.vertex().size()),
+ l_(l),
+ k_(simplex.dimension()),
+ i_it_(l_ - k_, k_ + 1, Size_range<Ordered_partition>(simplex.partition())),
+ is_end_(k_ > l_),
+ value_({Vertex(d_), Ordered_partition(l_ + 1)}) {
+ uint j = 0;
+ for (; j < simplex_.partition()[k_].size(); j++)
+ if (simplex_.partition()[k_][j] == d_) {
+ t_ = j;
+ break;
+ }
+ if (j == simplex_.partition()[k_].size()) {
+ std::cerr << "Coface iterator: the argument simplex is not a permutahedral representation\n";
+ is_end_ = true;
+ return;
+ }
+ for (uint i = 0; i < k_ + 1; i++)
+ o_its_.emplace_back(simplex_.partition()[i].size(), (*i_it_)[i] + 1);
+ update_value();
+ }
+
+ // Used for the creating an end iterator
+ Coface_iterator() : is_end_(true) {}
+
+ private:
+ Permutahedral_representation simplex_; // Input simplex
+ uint d_; // Ambient dimension
+ uint l_; // Dimension of the coface
+ uint k_; // Dimension of the input simplex
+ uint t_; // The position of d in simplex_.partition()[k_]
+ Integer_combination_iterator i_it_, i_end_; // indicates in how many parts each simplex_[i] is subdivided
+ std::vector<Ordered_set_partition_iterator> o_its_; // indicates subdivision for each simplex_[i]
+ Ordered_set_partition_iterator o_end_; // one end for all o_its_
+
+ bool is_end_; // is true when the current permutation is the final one
+ value_t value_; // the dereference value
+
+}; // Coface_iterator
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutation_iterator.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutation_iterator.h
new file mode 100644
index 00000000..0f91d41c
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Permutation_iterator.h
@@ -0,0 +1,120 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_PERMUTATION_ITERATOR_H_
+#define PERMUTAHEDRAL_REPRESENTATION_PERMUTATION_ITERATOR_H_
+
+#include <cstdlib> // for std::size_t
+#include <vector>
+
+#include <boost/range/iterator_range.hpp>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+typedef unsigned uint;
+
+/** \brief Class that allows the user to generate permutations.
+ * Based on the optimization of the Heap's algorithm by Sedgewick.
+ */
+class Permutation_iterator
+ : public boost::iterator_facade<Permutation_iterator, std::vector<uint> const, boost::forward_traversal_tag> {
+ using value_t = std::vector<uint>;
+
+ private:
+ friend class boost::iterator_core_access;
+
+ bool equal(Permutation_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void swap_two_indices(std::size_t i, std::size_t j) {
+ uint t = value_[i];
+ value_[i] = value_[j];
+ value_[j] = t;
+ }
+
+ void elementary_increment() {
+ uint j = 0;
+ while (d_[j] == j + 1) {
+ d_[j] = 0;
+ ++j;
+ }
+ if (j == n_ - 1) {
+ is_end_ = true;
+ return;
+ }
+ uint k = j + 1;
+ uint x = (k % 2 ? d_[j] : 0);
+ swap_two_indices(k, x);
+ ++d_[j];
+ }
+
+ void elementary_increment_optim_3() {
+ if (ct_ != 0) {
+ --ct_;
+ swap_two_indices(1 + (ct_ % 2), 0);
+ } else {
+ ct_ = 5;
+ uint j = 2;
+ while (d_[j] == j + 1) {
+ d_[j] = 0;
+ ++j;
+ }
+ if (j == n_ - 1) {
+ is_end_ = true;
+ return;
+ }
+ uint k = j + 1;
+ uint x = (k % 2 ? d_[j] : 0);
+ swap_two_indices(k, x);
+ ++d_[j];
+ }
+ }
+
+ void increment() {
+ if (optim_3_)
+ elementary_increment_optim_3();
+ else
+ elementary_increment();
+ }
+
+ public:
+ Permutation_iterator(const uint& n) : value_(n), is_end_(n == 0), optim_3_(n >= 3), n_(n), d_(n), ct_(5) {
+ for (uint i = 0; i < n; ++i) {
+ value_[i] = i;
+ d_[i] = 0;
+ }
+ if (n > 0) d_[n - 1] = -1;
+ }
+
+ // Used for the creating an end iterator
+ Permutation_iterator() : is_end_(true), n_(0) {}
+
+ void reinitialize() {
+ if (n_ > 0) is_end_ = false;
+ }
+
+ private:
+ value_t value_; // the dereference value
+ bool is_end_; // is true when the current permutation is the final one
+ bool optim_3_; // true if n>=3. for n >= 3, the algorithm is optimized
+
+ uint n_;
+ std::vector<uint> d_; // mix radix digits with radix [2 3 4 ... n-1 (sentinel=-1)]
+ uint ct_; // counter with values in {0,...,5} used in the n>=3 optimization.
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Set_partition_iterator.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Set_partition_iterator.h
new file mode 100644
index 00000000..94ac10c2
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Set_partition_iterator.h
@@ -0,0 +1,111 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_SET_PARTITION_ITERATOR_H_
+#define PERMUTAHEDRAL_REPRESENTATION_SET_PARTITION_ITERATOR_H_
+
+#include <vector>
+#include <limits>
+#include <boost/range/iterator_range.hpp>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+typedef unsigned uint;
+
+/** \brief Class that allows the user to generate set partitions of a set {0,...,n-1} in k parts.
+ *
+ */
+class Set_partition_iterator
+ : public boost::iterator_facade<Set_partition_iterator, std::vector<std::vector<uint>> const,
+ boost::forward_traversal_tag> {
+ using value_t = std::vector<std::vector<uint>>;
+
+ private:
+ friend class boost::iterator_core_access;
+
+ bool equal(Set_partition_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ value_t const& dereference() const { return value_; }
+
+ void update_value() {
+ for (uint i = 0; i < k_; i++) value_[i].clear();
+ for (uint i = 0; i < n_; i++) value_[rgs_[i]].push_back(i);
+ }
+
+ void increment() {
+ if (k_ <= 1) {
+ is_end_ = true;
+ return;
+ }
+ uint i = n_ - 1;
+ while (rgs_[i] + 1 > max_[i] || rgs_[i] + 1 >= k_) i--;
+ if (i == 0) {
+ is_end_ = true;
+ return;
+ }
+ rgs_[i]++;
+ uint mm = max_[i];
+ mm += (rgs_[i] >= mm);
+ max_[i + 1] = mm;
+ while (++i < n_) {
+ rgs_[i] = 0;
+ max_[i + 1] = mm;
+ }
+ uint p = k_;
+ if (mm < p) do {
+ max_[i] = p;
+ --i;
+ --p;
+ rgs_[i] = p;
+ } while (max_[i] < p);
+ update_value();
+ }
+
+ public:
+ Set_partition_iterator(const uint& n, const uint& k)
+ : value_(k), rgs_(n, 0), max_(n + 1), is_end_(n == 0), n_(n), k_(k) {
+ max_[0] = std::numeric_limits<uint>::max();
+ for (uint i = 0; i <= n - k; ++i) value_[0].push_back(i);
+ for (uint i = n - k + 1, j = 1; i < n; ++i, ++j) {
+ rgs_[i] = j;
+ value_[j].push_back(i);
+ }
+ for (uint i = 1; i <= n; i++) max_[i] = rgs_[i - 1] + 1;
+ update_value();
+ }
+
+ // Used for creating an end iterator
+ Set_partition_iterator() : is_end_(true), n_(0), k_(0) {}
+
+ void reinitialize() {
+ if (n_ > 0) is_end_ = false;
+ for (uint i = 0; i <= n_ - k_; ++i) rgs_[i] = 0;
+ for (uint i = n_ - k_ + 1, j = 1; i < n_; ++i, ++j) rgs_[i] = j;
+ for (uint i = 1; i <= n_; i++) max_[i] = rgs_[i - 1] + 1;
+ update_value();
+ }
+
+ private:
+ value_t value_; // the dereference value
+ std::vector<uint> rgs_; // restricted growth string
+ std::vector<uint> max_; // max_[i] = max(rgs_[0],...,rgs[i-1]) + 1
+ bool is_end_; // is true when the current permutation is the final one
+
+ uint n_;
+ uint k_;
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Simplex_comparator.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Simplex_comparator.h
new file mode 100644
index 00000000..905d68d5
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Simplex_comparator.h
@@ -0,0 +1,54 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_SIMPLEX_COMPARATOR_H_
+#define PERMUTAHEDRAL_REPRESENTATION_SIMPLEX_COMPARATOR_H_
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \class Simplex_comparator
+ * \brief A comparator class for Permutahedral_representation.
+ * The comparison is in lexicographic order first on
+ * vertices and then on ordered partitions with sorted parts.
+ * The lexicographic order forces that any face is larger than
+ * a coface.
+ *
+ * \tparam Permutahdral_representation_ Needs to be
+ * Permutahedral_representation<Vertex_, Ordered_set_partition_>
+ *
+ * \ingroup coxeter_triangulation
+ */
+template <class Permutahedral_representation_>
+struct Simplex_comparator {
+ /** \brief Comparison between two permutahedral representations.
+ * Both permutahedral representations need to be valid and
+ * the vertices of both permutahedral representations need to be of the same size.
+ */
+ bool operator()(const Permutahedral_representation_& lhs, const Permutahedral_representation_& rhs) const {
+ if (lhs.vertex() < rhs.vertex()) return true;
+ if (lhs.vertex() > rhs.vertex()) return false;
+
+ if (lhs.partition().size() > rhs.partition().size()) return true;
+ if (lhs.partition().size() < rhs.partition().size()) return false;
+
+ if (lhs.partition() < rhs.partition()) return true;
+ if (lhs.partition() > rhs.partition()) return false;
+
+ return false;
+ }
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Size_range.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Size_range.h
new file mode 100644
index 00000000..c43effc8
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Size_range.h
@@ -0,0 +1,73 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_SIZE_RANGE_H_
+#define PERMUTAHEDRAL_REPRESENTATION_SIZE_RANGE_H_
+
+#include <cstdlib> // for std::size_t
+
+#include <boost/range/iterator_range.hpp>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \brief Auxillary iterator class for sizes of parts in an ordered set partition.
+ */
+template <class T_it>
+class Size_iterator
+ : public boost::iterator_facade<Size_iterator<T_it>, std::size_t const, boost::forward_traversal_tag> {
+ friend class boost::iterator_core_access;
+
+ private:
+ bool equal(Size_iterator const& other) const { return (is_end_ && other.is_end_); }
+
+ std::size_t const& dereference() const { return value_; }
+
+ void increment() {
+ if (++t_it_ == t_end_) {
+ is_end_ = true;
+ return;
+ }
+ value_ = t_it_->size() - 1;
+ }
+
+ public:
+ Size_iterator(const T_it& t_begin, const T_it& t_end) : t_it_(t_begin), t_end_(t_end), is_end_(t_begin == t_end) {
+ if (!is_end_) value_ = t_it_->size() - 1;
+ }
+
+ private:
+ T_it t_it_, t_end_;
+ bool is_end_;
+ std::size_t value_;
+};
+
+template <class T>
+class Size_range {
+ const T& t_;
+
+ public:
+ typedef Size_iterator<typename T::const_iterator> iterator;
+
+ Size_range(const T& t) : t_(t) {}
+
+ std::size_t operator[](std::size_t i) const { return t_[i].size() - 1; }
+
+ iterator begin() const { return iterator(t_.begin(), t_.end()); }
+
+ iterator end() const { return iterator(t_.end(), t_.end()); }
+};
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/face_from_indices.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/face_from_indices.h
new file mode 100644
index 00000000..47120689
--- /dev/null
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/face_from_indices.h
@@ -0,0 +1,66 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#ifndef PERMUTAHEDRAL_REPRESENTATION_FACE_FROM_INDICES_H_
+#define PERMUTAHEDRAL_REPRESENTATION_FACE_FROM_INDICES_H_
+
+#include <cstdlib> // for std::size_t
+#include <algorithm>
+
+namespace Gudhi {
+
+namespace coxeter_triangulation {
+
+/** \brief Computes the permutahedral representation of a face of a given simplex
+ * and a range of the vertex indices that compose the face.
+ *
+ * \tparam Permutahedral_representation has to be Permutahedral_representation
+ * \tparam Index_range is a range of unsigned integers taking values in 0,...,k,
+ * where k is the dimension of the simplex simplex.
+ *
+ * @param[in] simplex Input simplex.
+ * @param[in] indices Input range of indices.
+ */
+template <class Permutahedral_representation, class Index_range>
+Permutahedral_representation face_from_indices(const Permutahedral_representation& simplex,
+ const Index_range& indices) {
+ using range_index = typename Index_range::value_type;
+ using Ordered_set_partition = typename Permutahedral_representation::OrderedSetPartition;
+ using Part = typename Ordered_set_partition::value_type;
+ using part_index = typename Part::value_type;
+ Permutahedral_representation value;
+ std::size_t d = simplex.vertex().size();
+ value.vertex() = simplex.vertex();
+ std::size_t k = indices.size() - 1;
+ value.partition().resize(k + 1);
+ std::size_t l = simplex.partition().size() - 1;
+ for (std::size_t h = 1; h < k + 1; h++)
+ for (range_index i = indices[h - 1]; i < indices[h]; i++)
+ for (part_index j : simplex.partition()[i]) value.partition()[h - 1].push_back(j);
+ for (range_index i = indices[k]; i < l + 1; i++)
+ for (part_index j : simplex.partition()[i]) value.partition()[k].push_back(j);
+ for (range_index i = 0; i < indices[0]; i++)
+ for (part_index j : simplex.partition()[i]) {
+ if (j != d)
+ value.vertex()[j]++;
+ else
+ for (std::size_t l = 0; l < d; l++) value.vertex()[l]--;
+ value.partition()[k].push_back(j);
+ }
+ // sort the values in each part (probably not needed)
+ for (auto& part : value.partition()) std::sort(part.begin(), part.end());
+ return value;
+}
+
+} // namespace coxeter_triangulation
+
+} // namespace Gudhi
+
+#endif
diff --git a/src/Coxeter_triangulation/test/CMakeLists.txt b/src/Coxeter_triangulation/test/CMakeLists.txt
new file mode 100644
index 00000000..74ded91e
--- /dev/null
+++ b/src/Coxeter_triangulation/test/CMakeLists.txt
@@ -0,0 +1,30 @@
+project(Coxeter_triangulation_test)
+
+include(GUDHI_boost_test)
+
+if (NOT EIGEN3_VERSION VERSION_LESS 3.1.0)
+ add_executable ( Coxeter_triangulation_permutahedral_representation_test perm_rep_test.cpp )
+ gudhi_add_boost_test(Coxeter_triangulation_permutahedral_representation_test)
+
+ add_executable ( Coxeter_triangulation_freudenthal_triangulation_test freud_triang_test.cpp )
+ gudhi_add_boost_test(Coxeter_triangulation_freudenthal_triangulation_test)
+
+ add_executable ( Coxeter_triangulation_functions_test function_test.cpp )
+ gudhi_add_boost_test(Coxeter_triangulation_functions_test)
+
+ # because of random_orthogonal_matrix inclusion
+ if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+ add_executable ( Coxeter_triangulation_random_orthogonal_matrix_function_test random_orthogonal_matrix_function_test.cpp )
+ target_link_libraries(Coxeter_triangulation_random_orthogonal_matrix_function_test ${CGAL_LIBRARY})
+ gudhi_add_boost_test(Coxeter_triangulation_random_orthogonal_matrix_function_test)
+ endif()
+
+ add_executable ( Coxeter_triangulation_oracle_test oracle_test.cpp )
+ gudhi_add_boost_test(Coxeter_triangulation_oracle_test)
+
+ add_executable ( Coxeter_triangulation_manifold_tracing_test manifold_tracing_test.cpp )
+ gudhi_add_boost_test(Coxeter_triangulation_manifold_tracing_test)
+
+ add_executable ( Coxeter_triangulation_cell_complex_test cell_complex_test.cpp )
+ gudhi_add_boost_test(Coxeter_triangulation_cell_complex_test)
+endif() \ No newline at end of file
diff --git a/src/Coxeter_triangulation/test/cell_complex_test.cpp b/src/Coxeter_triangulation/test/cell_complex_test.cpp
new file mode 100644
index 00000000..4f7f3ec5
--- /dev/null
+++ b/src/Coxeter_triangulation/test/cell_complex_test.cpp
@@ -0,0 +1,59 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "cell_complex"
+#include <boost/test/unit_test.hpp>
+#include <gudhi/Unitary_tests_utils.h>
+
+#include <gudhi/Debug_utils.h>
+#include <gudhi/IO/output_debug_traces_to_html.h>
+#include <iostream>
+
+#include <gudhi/Coxeter_triangulation.h>
+#include <gudhi/Functions/Function_Sm_in_Rd.h>
+#include <gudhi/Functions/Function_torus_in_R3.h>
+#include <gudhi/Implicit_manifold_intersection_oracle.h>
+#include <gudhi/Manifold_tracing.h>
+#include <gudhi/Coxeter_triangulation/Cell_complex/Cell_complex.h>
+
+using namespace Gudhi::coxeter_triangulation;
+
+BOOST_AUTO_TEST_CASE(cell_complex) {
+ double radius = 1.1111;
+ Function_torus_in_R3 fun_torus(radius, 3 * radius);
+ Eigen::VectorXd seed = fun_torus.seed();
+ Function_Sm_in_Rd fun_bound(2.5 * radius, 2, seed);
+
+ auto oracle = make_oracle(fun_torus, fun_bound);
+ double lambda = 0.2;
+ Coxeter_triangulation<> cox_tr(oracle.amb_d());
+ cox_tr.change_offset(Eigen::VectorXd::Random(oracle.amb_d()));
+ cox_tr.change_matrix(lambda * cox_tr.matrix());
+
+ using MT = Manifold_tracing<Coxeter_triangulation<> >;
+ using Out_simplex_map = typename MT::Out_simplex_map;
+ std::vector<Eigen::VectorXd> seed_points(1, seed);
+ Out_simplex_map interior_simplex_map, boundary_simplex_map;
+ manifold_tracing_algorithm(seed_points, cox_tr, oracle, interior_simplex_map, boundary_simplex_map);
+
+ std::size_t intr_d = oracle.amb_d() - oracle.cod_d();
+ Cell_complex<Out_simplex_map> cell_complex(intr_d);
+ cell_complex.construct_complex(interior_simplex_map, boundary_simplex_map);
+
+ std::size_t interior_sc_map_size0 = cell_complex.interior_simplex_cell_map(0).size();
+ std::size_t interior_sc_map_size1 = cell_complex.interior_simplex_cell_map(1).size();
+ std::size_t interior_sc_map_size2 = cell_complex.interior_simplex_cell_map(2).size();
+ std::size_t boundary_sc_map_size0 = cell_complex.boundary_simplex_cell_map(0).size();
+ std::size_t boundary_sc_map_size1 = cell_complex.boundary_simplex_cell_map(1).size();
+ BOOST_CHECK(interior_simplex_map.size() == interior_sc_map_size0);
+ BOOST_CHECK(boundary_sc_map_size0 - boundary_sc_map_size1 == 0);
+ BOOST_CHECK(interior_sc_map_size0 - interior_sc_map_size1 + interior_sc_map_size2 == 0);
+}
diff --git a/src/Coxeter_triangulation/test/freud_triang_test.cpp b/src/Coxeter_triangulation/test/freud_triang_test.cpp
new file mode 100644
index 00000000..2cf8f00e
--- /dev/null
+++ b/src/Coxeter_triangulation/test/freud_triang_test.cpp
@@ -0,0 +1,114 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "freudenthal_triangulation"
+#include <boost/test/unit_test.hpp>
+
+#include <gudhi/Unitary_tests_utils.h>
+#include <gudhi/Freudenthal_triangulation.h>
+#include <gudhi/Coxeter_triangulation.h>
+
+BOOST_AUTO_TEST_CASE(freudenthal_triangulation) {
+ // Point location check
+ typedef std::vector<double> Point;
+ typedef Gudhi::coxeter_triangulation::Freudenthal_triangulation<> FK_triangulation;
+ typedef typename FK_triangulation::Simplex_handle Simplex_handle;
+ typedef typename FK_triangulation::Vertex_handle Vertex_handle;
+ typedef typename Simplex_handle::OrderedSetPartition Ordered_set_partition;
+ typedef typename Ordered_set_partition::value_type Part;
+
+ FK_triangulation tr(3);
+
+ // Point location check
+ {
+ Point point({3, -1, 0});
+ Simplex_handle s = tr.locate_point(point);
+ BOOST_CHECK(s.vertex() == Vertex_handle({3, -1, 0}));
+ BOOST_CHECK(s.partition() == Ordered_set_partition({Part({0, 1, 2, 3})}));
+ }
+
+ {
+ Point point({3.5, -1.5, 0.5});
+ Simplex_handle s = tr.locate_point(point);
+ BOOST_CHECK(s.vertex() == Vertex_handle({3, -2, 0}));
+ BOOST_CHECK(s.partition() == Ordered_set_partition({Part({0, 1, 2}), Part({3})}));
+ }
+
+ {
+ Point point({3.5, -1.8, 0.5});
+ Simplex_handle s = tr.locate_point(point);
+ BOOST_CHECK(s.vertex() == Vertex_handle({3, -2, 0}));
+ BOOST_CHECK(s.partition() == Ordered_set_partition({Part({0, 2}), Part({1}), Part({3})}));
+ }
+
+ {
+ Point point({3.5, -1.8, 0.3});
+ Simplex_handle s = tr.locate_point(point);
+ BOOST_CHECK(s.vertex() == Vertex_handle({3, -2, 0}));
+ BOOST_CHECK(s.partition() == Ordered_set_partition({Part({0}), Part({2}), Part({1}), Part({3})}));
+ }
+
+ // Dimension check
+ BOOST_CHECK(tr.dimension() == 3);
+ // Matrix check
+ Eigen::MatrixXd default_matrix = Eigen::MatrixXd::Identity(3, 3);
+ BOOST_CHECK(tr.matrix() == default_matrix);
+ // Vector check
+ Eigen::MatrixXd default_offset = Eigen::VectorXd::Zero(3);
+ BOOST_CHECK(tr.offset() == default_offset);
+
+ // Barycenter check
+ Point point({3.5, -1.8, 0.3});
+ Simplex_handle s = tr.locate_point(point);
+ Eigen::Vector3d barycenter_cart = Eigen::Vector3d::Zero();
+ for (auto v : s.vertex_range())
+ for (std::size_t i = 0; i < v.size(); i++) barycenter_cart(i) += v[i];
+ barycenter_cart /= 4.; // simplex is three-dimensional
+ Eigen::Vector3d barycenter = tr.barycenter(s);
+ for (std::size_t i = 0; (long int)i < barycenter.size(); i++)
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(barycenter(i), barycenter_cart(i), 1e-7);
+
+ // Barycenter check for twice the scale
+ s = tr.locate_point(point, 2);
+ barycenter_cart = Eigen::Vector3d::Zero();
+ for (auto v : s.vertex_range())
+ for (std::size_t i = 0; i < v.size(); i++) barycenter_cart(i) += v[i];
+ barycenter_cart /= 3.; // simplex is now a two-dimensional face
+ barycenter_cart /= 2.; // scale
+ barycenter = tr.barycenter(s, 2);
+ for (std::size_t i = 0; (long int)i < barycenter.size(); i++)
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(barycenter(i), barycenter_cart(i), 1e-7);
+
+ // Matrix and offset change check
+ Eigen::MatrixXd new_matrix(3, 3);
+ new_matrix << 1, 0, 0, -1, 1, 0, -1, 0, 1;
+ Eigen::Vector3d new_offset(1.5, 1, 0.5);
+ tr.change_matrix(new_matrix);
+ tr.change_offset(new_offset);
+
+ BOOST_CHECK(tr.matrix() == new_matrix);
+ BOOST_CHECK(tr.offset() == new_offset);
+}
+
+#ifdef GUDHI_DEBUG
+BOOST_AUTO_TEST_CASE(freudenthal_triangulation_exceptions_in_debug_mode) {
+ // Point location check
+ typedef Gudhi::coxeter_triangulation::Freudenthal_triangulation<> FK_triangulation;
+
+ BOOST_CHECK_THROW (FK_triangulation tr(3, Eigen::MatrixXd::Identity(3, 3), Eigen::VectorXd::Zero(4)),
+ std::invalid_argument);
+
+ FK_triangulation tr(3);
+ // Point of dimension 4
+ std::vector<double> point({3.5, -1.8, 0.3, 4.1});
+ BOOST_CHECK_THROW (tr.locate_point(point), std::invalid_argument);
+}
+#endif
diff --git a/src/Coxeter_triangulation/test/function_test.cpp b/src/Coxeter_triangulation/test/function_test.cpp
new file mode 100644
index 00000000..43dbcb75
--- /dev/null
+++ b/src/Coxeter_triangulation/test/function_test.cpp
@@ -0,0 +1,158 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+// workaround for the annoying boost message in boost 1.69
+#define BOOST_PENDING_INTEGER_LOG2_HPP
+#include <boost/integer/integer_log2.hpp>
+// end workaround
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "function"
+#include <boost/test/unit_test.hpp>
+#include <gudhi/Unitary_tests_utils.h>
+
+#include <gudhi/Functions/Function_Sm_in_Rd.h>
+#include <gudhi/Functions/Function_affine_plane_in_Rd.h>
+#include <gudhi/Functions/Constant_function.h>
+#include <gudhi/Functions/Function_chair_in_R3.h>
+#include <gudhi/Functions/Function_torus_in_R3.h>
+#include <gudhi/Functions/Function_whitney_umbrella_in_R3.h>
+#include <gudhi/Functions/Function_lemniscate_revolution_in_R3.h>
+#include <gudhi/Functions/Function_iron_in_R3.h>
+#include <gudhi/Functions/Function_moment_curve_in_Rd.h>
+#include <gudhi/Functions/Embed_in_Rd.h>
+#include <gudhi/Functions/Translate.h>
+#include <gudhi/Functions/Linear_transformation.h>
+#include <gudhi/Functions/Negation.h>
+#include <gudhi/Functions/Cartesian_product.h>
+#include <gudhi/Functions/PL_approximation.h>
+
+#include <gudhi/Coxeter_triangulation.h>
+
+#include <string>
+
+#include <random>
+#include <cstdlib>
+
+using namespace Gudhi::coxeter_triangulation;
+
+template <class Function>
+void test_function(const Function& fun) {
+ Eigen::VectorXd seed = fun.seed();
+ Eigen::VectorXd res_seed = fun(fun.seed());
+ BOOST_CHECK(seed.size() == (long int)fun.amb_d());
+ BOOST_CHECK(res_seed.size() == (long int)fun.cod_d());
+ for (std::size_t i = 0; i < fun.cod_d(); i++) GUDHI_TEST_FLOAT_EQUALITY_CHECK(res_seed(i), 0., 1e-10);
+}
+
+BOOST_AUTO_TEST_CASE(function) {
+ {
+ // the sphere testing part
+ std::size_t m = 3, d = 5;
+ Eigen::VectorXd center(d);
+ center << 2, 1.5, -0.5, 4.5, -1;
+ double radius = 5;
+ typedef Function_Sm_in_Rd Function_sphere;
+ Function_sphere fun_sphere(radius, m, d, center);
+ test_function(fun_sphere);
+ }
+ {
+ // the affine plane testing part
+ std::size_t m = 0, d = 5;
+ Eigen::MatrixXd normal_matrix = Eigen::MatrixXd::Zero(d, d - m);
+ for (std::size_t i = 0; i < d - m; ++i) normal_matrix(i, i) = 1;
+ typedef Function_affine_plane_in_Rd Function_plane;
+ Function_plane fun_plane(normal_matrix);
+ test_function(fun_plane);
+ }
+ {
+ // the constant function testing part
+ std::size_t k = 2, d = 5;
+ auto x = Eigen::VectorXd::Constant(k, 1);
+ Constant_function fun_const(d, k, x);
+ Eigen::VectorXd res_zero = fun_const(Eigen::VectorXd::Zero(d));
+ for (std::size_t i = 0; i < k; ++i) GUDHI_TEST_FLOAT_EQUALITY_CHECK(res_zero(i), x(i), 1e-10);
+ }
+ {
+ // the chair function
+ Function_chair_in_R3 fun_chair;
+ test_function(fun_chair);
+ }
+ {
+ // the torus function
+ Function_torus_in_R3 fun_torus;
+ test_function(fun_torus);
+ }
+ {
+ // the whitney umbrella function
+ Function_whitney_umbrella_in_R3 fun_umbrella;
+ test_function(fun_umbrella);
+ }
+ {
+ // the lemniscate revolution function
+ Function_lemniscate_revolution_in_R3 fun_lemniscate;
+ test_function(fun_lemniscate);
+ }
+ {
+ // the iron function
+ Function_iron_in_R3 fun_iron;
+ test_function(fun_iron);
+ }
+ {
+ Function_moment_curve_in_Rd fun_moment_curve(3, 5);
+ test_function(fun_moment_curve);
+ }
+ {
+ // function embedding
+ Function_iron_in_R3 fun_iron;
+ auto fun_embed = make_embedding(fun_iron, 5);
+ test_function(fun_iron);
+
+ // function translation
+ Eigen::VectorXd off = Eigen::VectorXd::Random(5);
+ auto fun_trans = translate(fun_embed, off);
+ test_function(fun_trans);
+
+ // function linear transformation
+ Eigen::MatrixXd matrix = Eigen::MatrixXd::Random(5, 5);
+ BOOST_CHECK(matrix.determinant() != 0.);
+ auto fun_lin = make_linear_transformation(fun_trans, matrix);
+ test_function(fun_lin);
+
+ // function negative
+ auto fun_neg = negation(fun_lin);
+ test_function(fun_neg);
+
+ // function product
+ typedef Function_Sm_in_Rd Function_sphere;
+ Function_sphere fun_sphere(1, 1);
+ auto fun_prod = make_product_function(fun_sphere, fun_sphere, fun_sphere);
+ test_function(fun_prod);
+
+ // function PL approximation
+ Coxeter_triangulation<> cox_tr(6);
+ typedef Coxeter_triangulation<>::Vertex_handle Vertex_handle;
+ auto fun_pl = make_pl_approximation(fun_prod, cox_tr);
+ Vertex_handle v0 = Vertex_handle(cox_tr.dimension(), 0);
+ Eigen::VectorXd x0 = cox_tr.cartesian_coordinates(v0);
+ Eigen::VectorXd value0 = fun_prod(x0);
+ Eigen::VectorXd pl_value0 = fun_pl(x0);
+ for (std::size_t i = 0; i < fun_pl.cod_d(); i++) GUDHI_TEST_FLOAT_EQUALITY_CHECK(value0(i), pl_value0(i), 1e-10);
+ Vertex_handle v1 = v0;
+ v1[0] += 1;
+ Eigen::VectorXd x1 = cox_tr.cartesian_coordinates(v1);
+ Eigen::VectorXd value1 = fun_prod(x1);
+ Eigen::VectorXd pl_value1 = fun_pl(x1);
+ for (std::size_t i = 0; i < fun_pl.cod_d(); i++) GUDHI_TEST_FLOAT_EQUALITY_CHECK(value1(i), pl_value1(i), 1e-10);
+ Eigen::VectorXd pl_value_mid = fun_pl(0.5 * x0 + 0.5 * x1);
+ for (std::size_t i = 0; i < fun_pl.cod_d(); i++)
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(0.5 * value0(i) + 0.5 * value1(i), pl_value_mid(i), 1e-10);
+ }
+}
diff --git a/src/Coxeter_triangulation/test/manifold_tracing_test.cpp b/src/Coxeter_triangulation/test/manifold_tracing_test.cpp
new file mode 100644
index 00000000..63497f5a
--- /dev/null
+++ b/src/Coxeter_triangulation/test/manifold_tracing_test.cpp
@@ -0,0 +1,62 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "manifold_tracing"
+#include <boost/test/unit_test.hpp>
+#include <gudhi/Unitary_tests_utils.h>
+
+#include <iostream>
+
+#include <gudhi/Coxeter_triangulation.h>
+#include <gudhi/Functions/Function_Sm_in_Rd.h>
+#include <gudhi/Implicit_manifold_intersection_oracle.h>
+#include <gudhi/Manifold_tracing.h>
+
+using namespace Gudhi::coxeter_triangulation;
+
+BOOST_AUTO_TEST_CASE(manifold_tracing) {
+ // manifold without boundary
+ Function_Sm_in_Rd fun_sph(5.1111, 2);
+ auto oracle = make_oracle(fun_sph);
+ Coxeter_triangulation<> cox_tr(oracle.amb_d());
+ // cox_tr.change_offset(Eigen::VectorXd::Random(oracle.amb_d()));
+
+ using MT = Manifold_tracing<Coxeter_triangulation<> >;
+ Eigen::VectorXd seed = fun_sph.seed();
+ std::vector<Eigen::VectorXd> seed_points(1, seed);
+ typename MT::Out_simplex_map out_simplex_map;
+ manifold_tracing_algorithm(seed_points, cox_tr, oracle, out_simplex_map);
+
+ for (auto si_pair : out_simplex_map) {
+ BOOST_CHECK(si_pair.first.dimension() == oracle.function().cod_d());
+ BOOST_CHECK(si_pair.second.size() == (long int)oracle.function().amb_d());
+ }
+ std::clog << "out_simplex_map.size() = " << out_simplex_map.size() << "\n";
+ BOOST_CHECK(out_simplex_map.size() == 1118);
+
+ // manifold with boundary
+ Function_Sm_in_Rd fun_boundary(3.0, 2, fun_sph.seed());
+ auto oracle_with_boundary = make_oracle(fun_sph, fun_boundary);
+ typename MT::Out_simplex_map interior_simplex_map, boundary_simplex_map;
+ manifold_tracing_algorithm(seed_points, cox_tr, oracle_with_boundary, interior_simplex_map, boundary_simplex_map);
+ for (auto si_pair : interior_simplex_map) {
+ BOOST_CHECK(si_pair.first.dimension() == oracle.function().cod_d());
+ BOOST_CHECK(si_pair.second.size() == (long int)oracle.function().amb_d());
+ }
+ std::clog << "interior_simplex_map.size() = " << interior_simplex_map.size() << "\n";
+ BOOST_CHECK(interior_simplex_map.size() == 96);
+ for (auto si_pair : boundary_simplex_map) {
+ BOOST_CHECK(si_pair.first.dimension() == oracle.function().cod_d() + 1);
+ BOOST_CHECK(si_pair.second.size() == (long int)oracle.function().amb_d());
+ }
+ std::clog << "boundary_simplex_map.size() = " << boundary_simplex_map.size() << "\n";
+ BOOST_CHECK(boundary_simplex_map.size() == 54);
+}
diff --git a/src/Coxeter_triangulation/test/oracle_test.cpp b/src/Coxeter_triangulation/test/oracle_test.cpp
new file mode 100644
index 00000000..ed2042f5
--- /dev/null
+++ b/src/Coxeter_triangulation/test/oracle_test.cpp
@@ -0,0 +1,56 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "oracle"
+#include <boost/test/unit_test.hpp>
+#include <gudhi/Unitary_tests_utils.h>
+
+#include <string>
+
+#include <gudhi/Implicit_manifold_intersection_oracle.h>
+
+#include <gudhi/Functions/Function_Sm_in_Rd.h>
+#include <gudhi/Functions/Cartesian_product.h>
+
+#include <gudhi/Coxeter_triangulation.h>
+
+#include <random>
+#include <cstdlib>
+
+using namespace Gudhi::coxeter_triangulation;
+
+BOOST_AUTO_TEST_CASE(oracle) {
+ Function_Sm_in_Rd fun_sph(5.1111, 2);
+ auto oracle = make_oracle(fun_sph);
+ Coxeter_triangulation<> cox_tr(oracle.amb_d());
+ // cox_tr.change_offset(Eigen::VectorXd::Random(oracle.amb_d()));
+
+ Eigen::VectorXd seed = fun_sph.seed();
+ auto s = cox_tr.locate_point(seed);
+
+ std::size_t num_intersected_edges = 0;
+ for (auto f : s.face_range(oracle.cod_d())) {
+ auto qr = oracle.intersects(f, cox_tr);
+ if (qr.success) num_intersected_edges++;
+ auto vertex_it = f.vertex_range().begin();
+ Eigen::Vector3d p1 = cox_tr.cartesian_coordinates(*vertex_it++);
+ Eigen::Vector3d p2 = cox_tr.cartesian_coordinates(*vertex_it++);
+ BOOST_CHECK(vertex_it == f.vertex_range().end());
+ Eigen::MatrixXd m(3, 3);
+ if (qr.success) {
+ m.col(0) = qr.intersection;
+ m.col(1) = p1;
+ m.col(2) = p2;
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(m.determinant(), 0.0, 1e-10);
+ }
+ }
+ BOOST_CHECK(num_intersected_edges == 3 || num_intersected_edges == 4);
+}
diff --git a/src/Coxeter_triangulation/test/perm_rep_test.cpp b/src/Coxeter_triangulation/test/perm_rep_test.cpp
new file mode 100644
index 00000000..a668fc66
--- /dev/null
+++ b/src/Coxeter_triangulation/test/perm_rep_test.cpp
@@ -0,0 +1,61 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "permutahedral_representation"
+#include <boost/test/unit_test.hpp>
+
+#include <gudhi/Permutahedral_representation.h>
+
+BOOST_AUTO_TEST_CASE(permutahedral_representation) {
+ typedef std::vector<int> Vertex;
+ typedef std::vector<std::size_t> Part;
+ typedef std::vector<Part> Partition;
+ typedef Gudhi::coxeter_triangulation::Permutahedral_representation<Vertex, Partition> Simplex_handle;
+ Vertex v0(10, 0);
+ Partition omega = {Part({5}), Part({2}), Part({3, 7}), Part({4, 9}), Part({0, 6, 8}), Part({1, 10})};
+ Simplex_handle s(v0, omega);
+
+ // Dimension check
+ BOOST_CHECK(s.dimension() == 5);
+
+ // Vertex number check
+ std::vector<Vertex> vertices;
+ for (auto& v : s.vertex_range()) vertices.push_back(v);
+ BOOST_CHECK(vertices.size() == 6);
+
+ // Facet number check
+ std::vector<Simplex_handle> facets;
+ for (auto& f : s.facet_range()) facets.push_back(f);
+ BOOST_CHECK(facets.size() == 6);
+
+ // Face of dim 3 number check
+ std::vector<Simplex_handle> faces3;
+ for (auto& f : s.face_range(3)) faces3.push_back(f);
+ BOOST_CHECK(faces3.size() == 15);
+
+ // Cofacet number check
+ std::vector<Simplex_handle> cofacets;
+ for (auto& f : s.cofacet_range()) cofacets.push_back(f);
+ BOOST_CHECK(cofacets.size() == 12);
+
+ // Is face check
+ Vertex v1(10, 0);
+ Partition omega1 = {Part({5}), Part({0, 1, 2, 3, 4, 6, 7, 8, 9, 10})};
+ Simplex_handle s1(v1, omega1);
+ Vertex v2(10, 0);
+ v2[1] = -1;
+ Partition omega2 = {Part({1}), Part({5}), Part({2}), Part({3, 7}), Part({4, 9}), Part({0, 6, 8}), Part({10})};
+ Simplex_handle s2(v2, omega2);
+ BOOST_CHECK(s.is_face_of(s));
+ BOOST_CHECK(s1.is_face_of(s));
+ BOOST_CHECK(!s2.is_face_of(s));
+ BOOST_CHECK(s.is_face_of(s2));
+}
diff --git a/src/Coxeter_triangulation/test/random_orthogonal_matrix_function_test.cpp b/src/Coxeter_triangulation/test/random_orthogonal_matrix_function_test.cpp
new file mode 100644
index 00000000..84178741
--- /dev/null
+++ b/src/Coxeter_triangulation/test/random_orthogonal_matrix_function_test.cpp
@@ -0,0 +1,36 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Siargey Kachanovich
+ *
+ * Copyright (C) 2019 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "random_orthogonal_matrix_function"
+#include <boost/test/unit_test.hpp>
+#include <gudhi/Unitary_tests_utils.h>
+
+#include <gudhi/Functions/random_orthogonal_matrix.h>
+
+#include <string>
+
+#include <random>
+#include <cstdlib>
+
+using namespace Gudhi::coxeter_triangulation;
+
+// this test is separated as it requires CGAL
+BOOST_AUTO_TEST_CASE(random_orthogonal_matrix_function) {
+ // random orthogonal matrix
+ Eigen::MatrixXd matrix = random_orthogonal_matrix(5);
+ Eigen::MatrixXd id_matrix = matrix.transpose() * matrix;
+ for (std::size_t i = 0; i < 5; ++i)
+ for (std::size_t j = 0; j < 5; ++j)
+ if (i == j)
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(id_matrix(i, j), 1.0, 1e-10);
+ else
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(id_matrix(i, j), 0.0, 1e-10);
+}
diff --git a/src/Doxyfile.in b/src/Doxyfile.in
index 49e781bd..ae8db1a3 100644
--- a/src/Doxyfile.in
+++ b/src/Doxyfile.in
@@ -1,4 +1,4 @@
-# Doxyfile 1.8.6
+# Doxyfile 1.8.13
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
@@ -32,7 +32,7 @@ DOXYFILE_ENCODING = UTF-8
# title of most generated pages and in a few other places.
# The default value is: My Project.
-PROJECT_NAME = "GUDHI"
+PROJECT_NAME = "@CMAKE_PROJECT_NAME@"
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
@@ -46,10 +46,10 @@ PROJECT_NUMBER = "@GUDHI_VERSION@"
PROJECT_BRIEF = "C++ library for Topological Data Analysis (TDA) and Higher Dimensional Geometry Understanding."
-# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
-# the documentation. The maximum height of the logo should not exceed 55 pixels
-# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
-# to the output directory.
+# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+# in the documentation. The maximum height of the logo should not exceed 55
+# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+# the logo to the output directory.
PROJECT_LOGO =
@@ -58,9 +58,9 @@ PROJECT_LOGO =
# entered, it will be relative to the location where doxygen was started. If
# left blank the current directory will be used.
-OUTPUT_DIRECTORY = "doc/"
+OUTPUT_DIRECTORY =
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
# directories (in 2 levels) under the output directory of each output format and
# will distribute the generated files over these directories. Enabling this
# option can be useful when feeding doxygen a huge amount of source files, where
@@ -70,6 +70,14 @@ OUTPUT_DIRECTORY = "doc/"
CREATE_SUBDIRS = NO
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES = NO
+
# The OUTPUT_LANGUAGE tag is used to specify the language in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all constant output in the proper language.
@@ -85,14 +93,14 @@ CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
-# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
+# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
# The default value is: YES.
BRIEF_MEMBER_DESC = YES
-# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
+# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
# description of a member or function before the detailed description
#
# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
@@ -127,7 +135,7 @@ ALWAYS_DETAILED_SEC = NO
INLINE_INHERITED_MEMB = NO
-# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
+# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
# before files name in the file list and in the header files. If set to NO the
# shortest path that makes the file name unique will be used
# The default value is: YES.
@@ -153,7 +161,8 @@ STRIP_FROM_PATH =
# specify the list of include paths that are normally passed to the compiler
# using the -I flag.
-STRIP_FROM_INC_PATH = include concept
+STRIP_FROM_INC_PATH = include \
+ concept
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
# less readable) file names. This can be useful is your file systems doesn't
@@ -197,9 +206,9 @@ MULTILINE_CPP_IS_BRIEF = NO
INHERIT_DOCS = YES
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
-# new page for each member. If set to NO, the documentation of a member will be
-# part of the file/class/namespace that contains it.
+# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+# page for each member. If set to NO, the documentation of a member will be part
+# of the file/class/namespace that contains it.
# The default value is: NO.
SEPARATE_MEMBER_PAGES = NO
@@ -261,11 +270,14 @@ OPTIMIZE_OUTPUT_VHDL = NO
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
-# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make
-# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
-# (default is Fortran), use: inc=Fortran f=C.
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
+# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
+# Fortran. In the later case the parser tries to guess whether the code is fixed
+# or free formatted code, this is the default for Fortran type files), VHDL. For
+# instance to make doxygen treat .inc files as Fortran files (default is PHP),
+# and .f files as C (default is Fortran), use: inc=Fortran f=C.
#
-# Note For files without extension you can use no_extension as a placeholder.
+# Note: For files without extension you can use no_extension as a placeholder.
#
# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
# the files are not read by doxygen.
@@ -282,10 +294,19 @@ EXTENSION_MAPPING =
MARKDOWN_SUPPORT = YES
+# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
+# to that level are automatically included in the table of contents, even if
+# they do not have an id attribute.
+# Note: This feature currently applies only to Markdown headings.
+# Minimum value: 0, maximum value: 99, default value: 0.
+# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
+
+TOC_INCLUDE_HEADINGS = 0
+
# When enabled doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
-# be prevented in individual cases by by putting a % sign in front of the word
-# or globally by setting AUTOLINK_SUPPORT to NO.
+# be prevented in individual cases by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
# The default value is: YES.
AUTOLINK_SUPPORT = YES
@@ -325,13 +346,20 @@ SIP_SUPPORT = NO
IDL_PROPERTY_SUPPORT = YES
# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
+# tag is set to YES then doxygen will reuse the documentation of the first
# member in the group (if any) for the other members of the group. By default
# all members of a group must be documented explicitly.
# The default value is: NO.
DISTRIBUTE_GROUP_DOC = NO
+# If one adds a struct or class to a group and this option is enabled, then also
+# any nested class or struct is added to the same group. By default this option
+# is disabled and one has to add nested compounds explicitly via \ingroup.
+# The default value is: NO.
+
+GROUP_NESTED_COMPOUNDS = NO
+
# Set the SUBGROUPING tag to YES to allow class member groups of the same type
# (for instance a group of public functions) to be put as a subgroup of that
# type (e.g. under the Public Functions section). Set it to NO to prevent
@@ -390,7 +418,7 @@ LOOKUP_CACHE_SIZE = 0
# Build related configuration options
#---------------------------------------------------------------------------
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
# documentation are documented, even if no documentation was available. Private
# class members and static file members will be hidden unless the
# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
@@ -400,35 +428,35 @@ LOOKUP_CACHE_SIZE = 0
EXTRACT_ALL = NO
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
+# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
# be included in the documentation.
# The default value is: NO.
EXTRACT_PRIVATE = NO
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
+# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
# scope will be included in the documentation.
# The default value is: NO.
EXTRACT_PACKAGE = NO
-# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
+# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
# included in the documentation.
# The default value is: NO.
EXTRACT_STATIC = NO
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
-# locally in source files will be included in the documentation. If set to NO
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO,
# only classes defined in header files are included. Does not have any effect
# for Java sources.
# The default value is: YES.
EXTRACT_LOCAL_CLASSES = NO
-# This flag is only useful for Objective-C code. When set to YES local methods,
+# This flag is only useful for Objective-C code. If set to YES, local methods,
# which are defined in the implementation section but not in the interface are
-# included in the documentation. If set to NO only methods in the interface are
+# included in the documentation. If set to NO, only methods in the interface are
# included.
# The default value is: NO.
@@ -453,21 +481,21 @@ HIDE_UNDOC_MEMBERS = YES
# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
# undocumented classes that are normally visible in the class hierarchy. If set
-# to NO these classes will be included in the various overviews. This option has
-# no effect if EXTRACT_ALL is enabled.
+# to NO, these classes will be included in the various overviews. This option
+# has no effect if EXTRACT_ALL is enabled.
# The default value is: NO.
HIDE_UNDOC_CLASSES = YES
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
-# (class|struct|union) declarations. If set to NO these declarations will be
+# (class|struct|union) declarations. If set to NO, these declarations will be
# included in the documentation.
# The default value is: NO.
HIDE_FRIEND_COMPOUNDS = NO
# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
-# documentation blocks found inside the body of a function. If set to NO these
+# documentation blocks found inside the body of a function. If set to NO, these
# blocks will be appended to the function's detailed documentation block.
# The default value is: NO.
@@ -481,7 +509,7 @@ HIDE_IN_BODY_DOCS = NO
INTERNAL_DOCS = NO
# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
-# names in lower-case letters. If set to YES upper-case letters are also
+# names in lower-case letters. If set to YES, upper-case letters are also
# allowed. This is useful if you have classes or files whose names only differ
# in case and if your file system supports case sensitive file names. Windows
# and Mac users are advised to set this option to NO.
@@ -490,12 +518,19 @@ INTERNAL_DOCS = NO
CASE_SENSE_NAMES = NO
# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
-# their full class and namespace scopes in the documentation. If set to YES the
+# their full class and namespace scopes in the documentation. If set to YES, the
# scope will be hidden.
# The default value is: NO.
HIDE_SCOPE_NAMES = NO
+# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+# append additional text to a page's title, such as Class Reference. If set to
+# YES the compound reference will be hidden.
+# The default value is: NO.
+
+HIDE_COMPOUND_REFERENCE= NO
+
# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
# the files that are included by a file in the documentation of that file.
# The default value is: YES.
@@ -523,14 +558,14 @@ INLINE_INFO = YES
# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
# (detailed) documentation of file and class members alphabetically by member
-# name. If set to NO the members will appear in declaration order.
+# name. If set to NO, the members will appear in declaration order.
# The default value is: YES.
SORT_MEMBER_DOCS = YES
# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
# descriptions of file, namespace and class members alphabetically by member
-# name. If set to NO the members will appear in declaration order. Note that
+# name. If set to NO, the members will appear in declaration order. Note that
# this will also influence the order of the classes in the class list.
# The default value is: NO.
@@ -575,27 +610,25 @@ SORT_BY_SCOPE_NAME = NO
STRICT_PROTO_MATCHING = NO
-# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
-# todo list. This list is created by putting \todo commands in the
-# documentation.
+# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+# list. This list is created by putting \todo commands in the documentation.
# The default value is: YES.
GENERATE_TODOLIST = NO
-# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
-# test list. This list is created by putting \test commands in the
-# documentation.
+# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+# list. This list is created by putting \test commands in the documentation.
# The default value is: YES.
GENERATE_TESTLIST = NO
-# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
+# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
# list. This list is created by putting \bug commands in the documentation.
# The default value is: YES.
GENERATE_BUGLIST = NO
-# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
# the deprecated list. This list is created by putting \deprecated commands in
# the documentation.
# The default value is: YES.
@@ -620,8 +653,8 @@ ENABLED_SECTIONS =
MAX_INITIALIZER_LINES = 30
# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
-# the bottom of the documentation of classes and structs. If set to YES the list
-# will mention the files that were used to generate the documentation.
+# the bottom of the documentation of classes and structs. If set to YES, the
+# list will mention the files that were used to generate the documentation.
# The default value is: YES.
SHOW_USED_FILES = YES
@@ -669,12 +702,11 @@ LAYOUT_FILE =
# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
# For LaTeX the style of the bibliography can be controlled using
# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
-# search path. Do not use file names with spaces, bibtex cannot handle them. See
-# also \cite for info how to create references.
+# search path. See also \cite for info how to create references.
-CITE_BIB_FILES = biblio/bibliography.bib \
- biblio/how_to_cite_cgal.bib \
- biblio/how_to_cite_gudhi.bib
+CITE_BIB_FILES = @CMAKE_SOURCE_DIR@/biblio/bibliography.bib \
+ @CMAKE_SOURCE_DIR@/biblio/how_to_cite_cgal.bib \
+ @CMAKE_SOURCE_DIR@/biblio/how_to_cite_gudhi.bib
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
@@ -688,7 +720,7 @@ CITE_BIB_FILES = biblio/bibliography.bib \
QUIET = NO
# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
+# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
# this implies that the warnings are on.
#
# Tip: Turn warnings on while writing the documentation.
@@ -696,7 +728,7 @@ QUIET = NO
WARNINGS = YES
-# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
+# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
# will automatically be disabled.
# The default value is: YES.
@@ -713,12 +745,18 @@ WARN_IF_DOC_ERROR = YES
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
-# value. If set to NO doxygen will only warn about wrong or incomplete parameter
-# documentation, but not about the absence of documentation.
+# value. If set to NO, doxygen will only warn about wrong or incomplete
+# parameter documentation, but not about the absence of documentation.
# The default value is: NO.
WARN_NO_PARAMDOC = NO
+# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
+# a warning is encountered.
+# The default value is: NO.
+
+WARN_AS_ERROR = NO
+
# The WARN_FORMAT tag determines the format of the warning messages that doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
# will be replaced by the file and line number from which the warning originated
@@ -742,10 +780,10 @@ WARN_LOGFILE =
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
-# spaces.
+# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# Note: If this tag is empty the current directory is searched.
-INPUT =
+INPUT = @CMAKE_SOURCE_DIR@
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
@@ -758,14 +796,30 @@ INPUT_ENCODING = UTF-8
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank the
-# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
-# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
-# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
-# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
-# *.qsf, *.as and *.js.
-
-#FILE_PATTERNS =
+# *.h) to filter out the source-files in the directories.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# read by doxygen.
+#
+# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
+# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
+# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
+# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08,
+# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf.
+
+FILE_PATTERNS = *.c \
+ *.cc \
+ *.cxx \
+ *.cpp \
+ *.c++ \
+ *.h \
+ *.hh \
+ *.hxx \
+ *.hpp \
+ *.h++ \
+ *.md \
+ *.mm \
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
@@ -780,13 +834,14 @@ RECURSIVE = YES
# Note that relative paths are relative to the directory from which doxygen is
# run.
-EXCLUDE = data/ \
- example/ \
- GudhUI/ \
- cmake/ \
- python/ \
- ext/ \
- README.md
+EXCLUDE = @CMAKE_SOURCE_DIR@/data/ \
+ @CMAKE_SOURCE_DIR@/ext/ \
+ @CMAKE_SOURCE_DIR@/README.md \
+ @CMAKE_SOURCE_DIR@/.github \
+ @CMAKE_CURRENT_BINARY_DIR@/new_gudhi_version_creation.md \
+ @GUDHI_DOXYGEN_SOURCE_PREFIX@/GudhUI/ \
+ @GUDHI_DOXYGEN_SOURCE_PREFIX@/cmake/ \
+ @GUDHI_DOXYGEN_SOURCE_PREFIX@/python/
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
@@ -802,7 +857,7 @@ EXCLUDE_SYMLINKS = NO
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
-EXCLUDE_PATTERNS = */utilities/*/*.md
+EXCLUDE_PATTERNS = @GUDHI_DOXYGEN_SOURCE_PREFIX@/@GUDHI_DOXYGEN_UTILS_PATH@/*.md
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
@@ -819,17 +874,16 @@ EXCLUDE_SYMBOLS =
# that contain example code fragments that are included (see the \include
# command).
-EXAMPLE_PATH = biblio/ \
- example/ \
- utilities/ \
- data/
+EXAMPLE_PATH = @CMAKE_SOURCE_DIR@/biblio/ \
+ @CMAKE_SOURCE_DIR@/data/ \
+ @GUDHI_DOXYGEN_EXAMPLE_PATH@
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank all
# files are included.
-EXAMPLE_PATTERNS =
+EXAMPLE_PATTERNS =
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \include or \dontinclude commands
@@ -858,6 +912,10 @@ IMAGE_PATH = @GUDHI_DOXYGEN_IMAGE_PATH@
# Note that the filter must not add or remove lines; it is applied before the
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
INPUT_FILTER =
@@ -867,11 +925,15 @@ INPUT_FILTER =
# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
# patterns match the file name, INPUT_FILTER is applied.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
FILTER_PATTERNS =
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER ) will also be used to filter the input files that are used for
+# INPUT_FILTER) will also be used to filter the input files that are used for
# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
# The default value is: NO.
@@ -890,7 +952,7 @@ FILTER_SOURCE_PATTERNS =
# (index.html). This can be useful if you have a project on for instance GitHub
# and want to reuse the introduction page also for the doxygen output.
-USE_MDFILE_AS_MAINPAGE = doc/common/main_page.md
+USE_MDFILE_AS_MAINPAGE = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/main_page.md
#---------------------------------------------------------------------------
# Configuration options related to source browsing
@@ -931,7 +993,7 @@ REFERENCED_BY_RELATION = NO
REFERENCES_RELATION = NO
# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
-# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
+# to YES then the hyperlinks from functions in REFERENCES_RELATION and
# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
# link to the documentation.
# The default value is: YES.
@@ -978,6 +1040,25 @@ USE_HTAGS = NO
VERBATIM_HEADERS = YES
+# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
+# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
+# cost of reduced performance. This can be particularly helpful with template
+# rich C++ code for which doxygen's built-in parser lacks the necessary type
+# information.
+# Note: The availability of this option depends on whether or not doxygen was
+# generated with the -Duse-libclang=ON option for CMake.
+# The default value is: NO.
+
+CLANG_ASSISTED_PARSING = NO
+
+# If clang assisted parsing is enabled you can provide the compiler with command
+# line options that you would normally use when invoking the compiler. Note that
+# the include paths will already be set by doxygen for the files and directories
+# specified with INPUT and INCLUDE_PATH.
+# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
+
+CLANG_OPTIONS =
+
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
@@ -1008,7 +1089,7 @@ IGNORE_PREFIX =
# Configuration options related to the HTML output
#---------------------------------------------------------------------------
-# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
# The default value is: YES.
GENERATE_HTML = YES
@@ -1046,7 +1127,7 @@ HTML_FILE_EXTENSION = .html
# of the possible markers and block names see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
-HTML_HEADER = doc/common/header.html
+HTML_HEADER = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/header.html
# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
# generated HTML page. If the tag is left blank doxygen will generate a standard
@@ -1056,7 +1137,7 @@ HTML_HEADER = doc/common/header.html
# that doxygen normally uses.
# This tag requires that the tag GENERATE_HTML is set to YES.
-HTML_FOOTER = doc/common/footer.html
+HTML_FOOTER = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/footer.html
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
# sheet that is used by each HTML page. It can be used to fine-tune the look of
@@ -1068,15 +1149,17 @@ HTML_FOOTER = doc/common/footer.html
# obsolete.
# This tag requires that the tag GENERATE_HTML is set to YES.
-HTML_STYLESHEET = doc/common/stylesheet.css
+HTML_STYLESHEET = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/stylesheet.css
-# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
-# defined cascading style sheet that is included after the standard style sheets
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
# created by doxygen. Using this option one can overrule certain style aspects.
# This is preferred over using HTML_STYLESHEET since it does not replace the
-# standard style sheet and is therefor more robust against future updates.
-# Doxygen will copy the style sheet file to the output directory. For an example
-# see the documentation.
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_STYLESHEET =
@@ -1092,7 +1175,7 @@ HTML_EXTRA_STYLESHEET =
HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
-# will adjust the colors in the stylesheet and background images according to
+# will adjust the colors in the style sheet and background images according to
# this color. Hue is specified as an angle on a colorwheel, see
# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
@@ -1123,8 +1206,9 @@ HTML_COLORSTYLE_GAMMA = 80
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
# page will contain the date and time when the page was generated. Setting this
-# to NO can help when comparing the output of multiple runs.
-# The default value is: YES.
+# to YES can help to show when doxygen was last run and thus if the
+# documentation is up to date.
+# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_TIMESTAMP = YES
@@ -1220,28 +1304,29 @@ GENERATE_HTMLHELP = NO
CHM_FILE =
# The HHC_LOCATION tag can be used to specify the location (absolute path
-# including file name) of the HTML help compiler ( hhc.exe). If non-empty
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
# doxygen will try to run the HTML help compiler on the generated index.hhp.
# The file has to be specified with full path.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
HHC_LOCATION =
-# The GENERATE_CHI flag controls if a separate .chi index file is generated (
-# YES) or that it should be included in the master .chm file ( NO).
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the master .chm file (NO).
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
GENERATE_CHI = NO
-# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
# and project file content.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_INDEX_ENCODING =
-# The BINARY_TOC flag controls whether a binary table of contents is generated (
-# YES) or a normal table of contents ( NO) in the .chm file.
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
@@ -1354,7 +1439,7 @@ DISABLE_INDEX = YES
# index structure (just like the one that is generated for HTML Help). For this
# to work a browser that supports JavaScript, DHTML, CSS and frames is required
# (i.e. any modern browser). Windows users are probably better off using the
-# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
# further fine-tune the look of the index. As an example, the default style
# sheet generated by doxygen has an example that shows how to put an image at
# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
@@ -1382,7 +1467,7 @@ ENUM_VALUES_PER_LINE = 4
TREEVIEW_WIDTH = 250
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
# external symbols imported via tag files in a separate window.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
@@ -1411,7 +1496,7 @@ FORMULA_TRANSPARENT = YES
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# http://www.mathjax.org) which uses client side Javascript for the rendering
-# instead of using prerendered bitmaps. Use this if you do not have LaTeX
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
# installed or if you want to formulas look prettier in the HTML output. When
# enabled you may also need to install MathJax separately and configure the path
# to it using the MATHJAX_RELPATH option.
@@ -1448,7 +1533,8 @@ MATHJAX_RELPATH = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2
# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# This tag requires that the tag USE_MATHJAX is set to YES.
-MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+MATHJAX_EXTENSIONS = TeX/AMSmath \
+ TeX/AMSsymbols
# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
# of code that will be used on startup of the MathJax code. See the MathJax site
@@ -1481,11 +1567,11 @@ SEARCHENGINE = YES
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
# implemented using a web server instead of a web client using Javascript. There
-# are two flavours of web server based searching depending on the
-# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
-# searching and an index file used by the script. When EXTERNAL_SEARCH is
-# enabled the indexing and searching needs to be provided by external tools. See
-# the section "External Indexing and Searching" for details.
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
@@ -1497,7 +1583,7 @@ SERVER_BASED_SEARCH = NO
# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
# search results.
#
-# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: http://xapian.org/).
#
@@ -1510,7 +1596,7 @@ EXTERNAL_SEARCH = NO
# The SEARCHENGINE_URL should point to a search engine hosted by a web server
# which will return the search results when EXTERNAL_SEARCH is enabled.
#
-# Doxygen ships with an example indexer ( doxyindexer) and search engine
+# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: http://xapian.org/). See the section "External Indexing and
# Searching" for details.
@@ -1548,7 +1634,7 @@ EXTRA_SEARCH_MAPPINGS =
# Configuration options related to the LaTeX output
#---------------------------------------------------------------------------
-# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
# The default value is: YES.
GENERATE_LATEX = NO
@@ -1579,7 +1665,7 @@ LATEX_CMD_NAME = latex
MAKEINDEX_CMD_NAME = makeindex
-# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
+# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
@@ -1597,13 +1683,18 @@ COMPACT_LATEX = NO
PAPER_TYPE = a4
# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
-# that should be included in the LaTeX output. To get the times font for
-# instance you can specify
-# EXTRA_PACKAGES=times
+# that should be included in the LaTeX output. The package can be specified just
+# by its name or with the correct syntax as to be used with the LaTeX
+# \usepackage command. To get the times font for instance you can specify :
+# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
+# To use the option intlimits with the amsmath package you can specify:
+# EXTRA_PACKAGES=[intlimits]{amsmath}
# If left blank no extra packages will be included.
# This tag requires that the tag GENERATE_LATEX is set to YES.
-EXTRA_PACKAGES = amsfonts amsmath amssymb
+EXTRA_PACKAGES = amsfonts \
+ amsmath \
+ amssymb
# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
# generated LaTeX document. The header should contain everything until the first
@@ -1613,23 +1704,36 @@ EXTRA_PACKAGES = amsfonts amsmath amssymb
#
# Note: Only use a user-defined header if you know what you are doing! The
# following commands have a special meaning inside the header: $title,
-# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
-# replace them by respectively the title of the page, the current date and time,
-# only the current date, the version number of doxygen, the project name (see
-# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
+# string, for the replacement values of the other commands the user is referred
+# to HTML_HEADER.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
# generated LaTeX document. The footer should contain everything after the last
-# chapter. If it is left blank doxygen will generate a standard footer.
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
#
# Note: Only use a user-defined footer if you know what you are doing!
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_FOOTER =
+# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# LaTeX style sheets that are included after the standard style sheets created
+# by doxygen. Using this option one can overrule certain style aspects. Doxygen
+# will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_STYLESHEET =
+
# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the LATEX_OUTPUT output
# directory. Note that the files will be copied as-is; there are no commands or
@@ -1647,8 +1751,8 @@ LATEX_EXTRA_FILES =
PDF_HYPERLINKS = YES
-# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
-# the PDF file directly from the LaTeX files. Set this option to YES to get a
+# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES, to get a
# higher quality PDF documentation.
# The default value is: YES.
# This tag requires that the tag GENERATE_LATEX is set to YES.
@@ -1689,11 +1793,19 @@ LATEX_SOURCE_CODE = NO
LATEX_BIB_STYLE = plain
+# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_TIMESTAMP = NO
+
#---------------------------------------------------------------------------
# Configuration options related to the RTF output
#---------------------------------------------------------------------------
-# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
+# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
# RTF output is optimized for Word 97 and may not look too pretty with other RTF
# readers/editors.
# The default value is: NO.
@@ -1708,7 +1820,7 @@ GENERATE_RTF = NO
RTF_OUTPUT = rtf
-# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
+# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
# documents. This may be useful for small projects and may help to save some
# trees in general.
# The default value is: NO.
@@ -1745,11 +1857,21 @@ RTF_STYLESHEET_FILE =
RTF_EXTENSIONS_FILE =
+# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
+# with syntax highlighting in the RTF output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_SOURCE_CODE = NO
+
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
-# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
+# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
# classes and files.
# The default value is: NO.
@@ -1773,6 +1895,13 @@ MAN_OUTPUT = man
MAN_EXTENSION = .3
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR =
+
# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
# will generate one additional man file for each entity documented in the real
# man page(s). These additional files only source the real man page, but without
@@ -1786,7 +1915,7 @@ MAN_LINKS = NO
# Configuration options related to the XML output
#---------------------------------------------------------------------------
-# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
+# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
# captures the structure of the code including all documentation.
# The default value is: NO.
@@ -1800,7 +1929,7 @@ GENERATE_XML = NO
XML_OUTPUT = xml
-# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
+# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
# listings (including syntax highlighting and cross-referencing information) to
# the XML output. Note that enabling this will significantly increase the size
# of the XML output.
@@ -1813,7 +1942,7 @@ XML_PROGRAMLISTING = YES
# Configuration options related to the DOCBOOK output
#---------------------------------------------------------------------------
-# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
+# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
# that can be used to generate PDF.
# The default value is: NO.
@@ -1827,14 +1956,23 @@ GENERATE_DOCBOOK = NO
DOCBOOK_OUTPUT = docbook
+# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
+# program listings (including syntax highlighting and cross-referencing
+# information) to the DOCBOOK output. Note that enabling this will significantly
+# increase the size of the DOCBOOK output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_PROGRAMLISTING = NO
+
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
-# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
-# Definitions (see http://autogen.sf.net) file that captures the structure of
-# the code including all documentation. Note that this feature is still
-# experimental and incomplete at the moment.
+# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
+# AutoGen Definitions (see http://autogen.sf.net) file that captures the
+# structure of the code including all documentation. Note that this feature is
+# still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_AUTOGEN_DEF = NO
@@ -1843,7 +1981,7 @@ GENERATE_AUTOGEN_DEF = NO
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
-# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
+# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
# file that captures the structure of the code including all documentation.
#
# Note that this feature is still experimental and incomplete at the moment.
@@ -1851,7 +1989,7 @@ GENERATE_AUTOGEN_DEF = NO
GENERATE_PERLMOD = NO
-# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
+# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
# output from the Perl module output.
# The default value is: NO.
@@ -1859,9 +1997,9 @@ GENERATE_PERLMOD = NO
PERLMOD_LATEX = NO
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
+# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
# formatted so it can be parsed by a human reader. This is useful if you want to
-# understand what is going on. On the other hand, if this tag is set to NO the
+# understand what is going on. On the other hand, if this tag is set to NO, the
# size of the Perl module output will be much smaller and Perl will parse it
# just the same.
# The default value is: YES.
@@ -1881,14 +2019,14 @@ PERLMOD_MAKEVAR_PREFIX =
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
-# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
+# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
# C-preprocessor directives found in the sources and include files.
# The default value is: YES.
ENABLE_PREPROCESSING = YES
-# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
-# in the source code. If set to NO only conditional compilation will be
+# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
+# in the source code. If set to NO, only conditional compilation will be
# performed. Macro expansion can be done in a controlled way by setting
# EXPAND_ONLY_PREDEF to YES.
# The default value is: NO.
@@ -1904,7 +2042,7 @@ MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = YES
-# If the SEARCH_INCLUDES tag is set to YES the includes files in the
+# If the SEARCH_INCLUDES tag is set to YES, the include files in the
# INCLUDE_PATH will be searched if a #include is found.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
@@ -1946,9 +2084,9 @@ PREDEFINED = protected=private
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
-# remove all refrences to function-like macros that are alone on a line, have an
-# all uppercase name, and do not end with a semicolon. Such function macros are
-# typically used for boiler-plate code, and will confuse the parser if not
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
# removed.
# The default value is: YES.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
@@ -1968,7 +2106,7 @@ SKIP_FUNCTION_MACROS = YES
# where loc1 and loc2 can be relative or absolute paths or URLs. See the
# section "Linking to external documentation" for more information about the use
# of tag files.
-# Note: Each tag file must have an unique name (where the name does NOT include
+# Note: Each tag file must have a unique name (where the name does NOT include
# the path). If a tag file is not located in the directory in which doxygen is
# run, you must also specify the path to the tagfile here.
@@ -1980,20 +2118,21 @@ TAGFILES =
GENERATE_TAGFILE =
-# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
-# class index. If set to NO only the inherited external classes will be listed.
+# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
+# the class index. If set to NO, only the inherited external classes will be
+# listed.
# The default value is: NO.
ALLEXTERNALS = NO
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
-# the modules index. If set to NO, only the current project's groups will be
+# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will be
# listed.
# The default value is: YES.
EXTERNAL_GROUPS = YES
-# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
+# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
# the related pages index. If set to NO, only the current project's pages will
# be listed.
# The default value is: YES.
@@ -2010,7 +2149,7 @@ PERL_PATH = /usr/bin/perl
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
-# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
+# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
# NO turns the diagrams off. Note that this option also works with HAVE_DOT
# disabled, but it is recommended to install and use dot, since it yields more
@@ -2035,7 +2174,7 @@ MSCGEN_PATH =
DIA_PATH =
-# If set to YES, the inheritance and collaboration graphs will hide inheritance
+# If set to YES the inheritance and collaboration graphs will hide inheritance
# and usage relations if the target is undocumented or is not a class.
# The default value is: YES.
@@ -2046,7 +2185,7 @@ HIDE_UNDOC_RELATIONS = YES
# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
-# The default value is: NO.
+# The default value is: YES.
HAVE_DOT = YES
@@ -2060,7 +2199,7 @@ HAVE_DOT = YES
DOT_NUM_THREADS = 0
-# When you want a differently looking font n the dot files that doxygen
+# When you want a differently looking font in the dot files that doxygen
# generates you can specify the font name using DOT_FONTNAME. You need to make
# sure dot is able to find the font, which can be done by putting it in a
# standard location or by setting the DOTFONTPATH environment variable or by
@@ -2108,7 +2247,7 @@ COLLABORATION_GRAPH = NO
GROUP_GRAPHS = YES
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
# collaboration diagrams in a style similar to the OMG's Unified Modeling
# Language.
# The default value is: NO.
@@ -2160,7 +2299,8 @@ INCLUDED_BY_GRAPH = NO
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable call graphs for selected
-# functions only using the \callgraph command.
+# functions only using the \callgraph command. Disabling a call graph can be
+# accomplished by means of the command \hidecallgraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
@@ -2171,7 +2311,8 @@ CALL_GRAPH = NO
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable caller graphs for selected
-# functions only using the \callergraph command.
+# functions only using the \callergraph command. Disabling a caller graph can be
+# accomplished by means of the command \hidecallergraph.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
@@ -2194,11 +2335,17 @@ GRAPHICAL_HIERARCHY = YES
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot.
+# generated by dot. For an explanation of the image formats see the section
+# output formats in the documentation of the dot tool (Graphviz (see:
+# http://www.graphviz.org/)).
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
-# Possible values are: png, jpg, gif and svg.
+# Possible values are: png, png:cairo, png:cairo:cairo, png:cairo:gd, png:gd,
+# png:gd:gd, jpg, jpg:cairo, jpg:cairo:gd, jpg:gd, jpg:gd:gd, gif, gif:cairo,
+# gif:cairo:gd, gif:gd, gif:gd:gd, svg, png:gd, png:gd:gd, png:cairo,
+# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
+# png:gdiplus:gdiplus.
# The default value is: png.
# This tag requires that the tag HAVE_DOT is set to YES.
@@ -2241,6 +2388,24 @@ MSCFILE_DIRS =
DIAFILE_DIRS =
+# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
+# path where java can find the plantuml.jar file. If left blank, it is assumed
+# PlantUML is not used or called during a preprocessing step. Doxygen will
+# generate a warning when it encounters a \startuml command in this case and
+# will not generate output for the diagram.
+
+PLANTUML_JAR_PATH =
+
+# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
+# configuration file for plantuml.
+
+PLANTUML_CFG_FILE =
+
+# When using plantuml, the specified paths are searched for files specified by
+# the !include statement in a plantuml block.
+
+PLANTUML_INCLUDE_PATH =
+
# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
# that will be shown in the graph. If the number of nodes in a graph becomes
# larger than this value, doxygen will truncate the graph, which is visualized
@@ -2277,7 +2442,7 @@ MAX_DOT_GRAPH_DEPTH = 0
DOT_TRANSPARENT = NO
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10) support
# this, this feature is disabled by default.
@@ -2294,7 +2459,7 @@ DOT_MULTI_TARGETS = YES
GENERATE_LEGEND = YES
-# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
+# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
# files that are used to generate the various graphs.
# The default value is: YES.
# This tag requires that the tag HAVE_DOT is set to YES.
diff --git a/src/Nerve_GIC/doc/Intro_graph_induced_complex.h b/src/Nerve_GIC/doc/Intro_graph_induced_complex.h
index f9441b24..a6098860 100644
--- a/src/Nerve_GIC/doc/Intro_graph_induced_complex.h
+++ b/src/Nerve_GIC/doc/Intro_graph_induced_complex.h
@@ -53,7 +53,7 @@ namespace cover_complex {
* covering the height function (coordinate 2),
* which are then refined into their connected components using the triangulation of the .OFF file.
*
- * \include Nerve_GIC/Nerve.cpp
+ * \include Nerve.cpp
*
* When launching:
*
@@ -62,7 +62,7 @@ namespace cover_complex {
*
* the program output is:
*
- * \include Nerve_GIC/Nerve.txt
+ * \include Nerve.txt
*
* The program also writes a file ../../data/points/human_sc.txt. The first three lines in this file are the location
* of the input point cloud and the function used to compute the cover.
@@ -96,7 +96,7 @@ namespace cover_complex {
* comes from the triangulation of the human shape. Note that the resulting simplicial complex is in dimension 3
* in this example.
*
- * \include Nerve_GIC/VoronoiGIC.cpp
+ * \include VoronoiGIC.cpp
*
* When launching:
*
@@ -129,7 +129,7 @@ namespace cover_complex {
* with automatic resolution and gain. Note that automatic threshold, resolution and gain
* can be computed as well for the Nerve.
*
- * \include Nerve_GIC/CoordGIC.cpp
+ * \include CoordGIC.cpp
*
* When launching:
*
@@ -152,7 +152,7 @@ namespace cover_complex {
* The function is now the first eigenfunction given by PCA, whose values
* are written in a file (lucky_cat_PCA1). Threshold, resolution and gain are automatically selected as before.
*
- * \include Nerve_GIC/FuncGIC.cpp
+ * \include FuncGIC.cpp
*
* When launching:
*
diff --git a/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h b/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h
index b4f9fd2c..a3613d0d 100644
--- a/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h
+++ b/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h
@@ -131,7 +131,7 @@ namespace persistent_cohomology {
We provide several example files: run these examples with -h for details on their use, and read the README file.
-\li <a href="_rips_complex_2rips_persistence_8cpp-example.html">
+\li <a href="rips_persistence_8cpp-example.html">
Rips_complex/rips_persistence.cpp</a> computes the Rips complex of a point cloud and outputs its persistence
diagram.
\code $> ./rips_persistence ../../data/points/tore3D_1307.off -r 0.25 -m 0.5 -d 3 -p 3 \endcode
@@ -144,11 +144,11 @@ diagram.
More details on the <a href="../../ripscomplex/">Rips complex utilities</a> dedicated page.
-\li <a href="_persistent_cohomology_2rips_multifield_persistence_8cpp-example.html">
+\li <a href="rips_multifield_persistence_8cpp-example.html">
Persistent_cohomology/rips_multifield_persistence.cpp</a> computes the Rips complex of a point cloud and outputs its
persistence diagram with a family of field coefficients.
-\li <a href="_rips_complex_2rips_distance_matrix_persistence_8cpp-example.html">
+\li <a href="rips_distance_matrix_persistence_8cpp-example.html">
Rips_complex/rips_distance_matrix_persistence.cpp</a> computes the Rips complex of a distance matrix and
outputs its persistence diagram.
@@ -158,7 +158,7 @@ Please refer to data/distance_matrix/lower_triangular_distance_matrix.csv for an
More details on the <a href="../../ripscomplex/">Rips complex utilities</a> dedicated page.
-\li <a href="_rips_complex_2rips_correlation_matrix_persistence_8cpp-example.html">
+\li <a href="rips_correlation_matrix_persistence_8cpp-example.html">
Rips_complex/rips_correlation_matrix_persistence.cpp</a>
computes the Rips complex of a correlation matrix and outputs its persistence diagram.
@@ -169,7 +169,7 @@ Please refer to data/correlation_matrix/lower_triangular_correlation_matrix.csv
More details on the <a href="../../ripscomplex/">Rips complex utilities</a> dedicated page.
-\li <a href="_alpha_complex_2alpha_complex_3d_persistence_8cpp-example.html">
+\li <a href="alpha_complex_3d_persistence_8cpp-example.html">
Alpha_complex/alpha_complex_3d_persistence.cpp</a> computes the persistent homology with
\f$\mathbb{Z}/2\mathbb{Z}\f$ coefficients of the alpha complex on points sampling from an OFF file.
\code $> ./alpha_complex_3d_persistence ../../data/points/tore3D_300.off -p 2 -m 0.45 \endcode
@@ -235,7 +235,7 @@ Note that the lengths of the sides of the periodic cuboid have to be the same.<b
3 2 36.8838 inf
3 3 58.6783 inf \endcode
-\li <a href="_alpha_complex_2alpha_complex_persistence_8cpp-example.html">
+\li <a href="alpha_complex_persistence_8cpp-example.html">
Alpha_complex/alpha_complex_persistence.cpp</a> computes the persistent homology with
\f$\mathbb{Z}/p\mathbb{Z}\f$ coefficients of the alpha complex on points sampling from an OFF file.
\code $> ./alpha_complex_persistence -r 32 -p 2 -m 0.45 ../../data/points/tore3D_300.off \endcode
@@ -248,7 +248,7 @@ Simplex_tree dim: 3
More details on the <a href="../../alphacomplex/">Alpha complex utilities</a> dedicated page.
-\li <a href="_persistent_cohomology_2plain_homology_8cpp-example.html">
+\li <a href="plain_homology_8cpp-example.html">
Persistent_cohomology/plain_homology.cpp</a> computes the plain homology of a simple simplicial complex without
filtration values.
diff --git a/src/Persistent_cohomology/example/CMakeLists.txt b/src/Persistent_cohomology/example/CMakeLists.txt
index c68c6524..d66954d7 100644
--- a/src/Persistent_cohomology/example/CMakeLists.txt
+++ b/src/Persistent_cohomology/example/CMakeLists.txt
@@ -11,7 +11,7 @@ if (TBB_FOUND)
target_link_libraries(persistence_from_simple_simplex_tree ${TBB_LIBRARIES})
endif()
add_test(NAME Persistent_cohomology_example_from_simple_simplex_tree COMMAND $<TARGET_FILE:persistence_from_simple_simplex_tree>
- "1" "0")
+ "2" "0")
if(TARGET Boost::program_options)
add_executable(rips_persistence_step_by_step rips_persistence_step_by_step.cpp)
@@ -40,9 +40,9 @@ if(TARGET Boost::program_options)
target_link_libraries(persistence_from_file ${TBB_LIBRARIES})
endif()
add_test(NAME Persistent_cohomology_example_from_file_3_2_0 COMMAND $<TARGET_FILE:persistence_from_file>
- "${CMAKE_SOURCE_DIR}/data/filtered_simplicial_complex/bunny_5000_complex.fsc" "-p" "2" "-m" "0")
+ "${CMAKE_SOURCE_DIR}/data/filtered_simplicial_complex/Klein_bottle_complex.fsc" "-p" "2" "-m" "0")
add_test(NAME Persistent_cohomology_example_from_file_3_3_100 COMMAND $<TARGET_FILE:persistence_from_file>
- "${CMAKE_SOURCE_DIR}/data/filtered_simplicial_complex/bunny_5000_complex.fsc" "-p" "3" "-m" "100")
+ "${CMAKE_SOURCE_DIR}/data/filtered_simplicial_complex/Klein_bottle_complex.fsc" "-p" "3" "-m" "100")
endif()
if(GMP_FOUND)
diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
index d34ee07d..d428e497 100644
--- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
+++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
@@ -100,7 +100,7 @@ class Persistent_cohomology {
ds_rank_(num_simplices_), // union-find
ds_parent_(num_simplices_), // union-find
ds_repr_(num_simplices_, NULL), // union-find -> annotation vectors
- dsets_(&ds_rank_[0], &ds_parent_[0]), // union-find
+ dsets_(ds_rank_.data(), ds_parent_.data()), // union-find
cam_(), // collection of annotation vectors
zero_cocycles_(), // union-find -> Simplex_key of creator for 0-homology
transverse_idx_(), // key -> row
diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology/Field_Zp.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology/Field_Zp.h
index 0673625c..f442b632 100644
--- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology/Field_Zp.h
+++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology/Field_Zp.h
@@ -13,6 +13,7 @@
#include <utility>
#include <vector>
+#include <stdexcept>
namespace Gudhi {
@@ -33,15 +34,28 @@ class Field_Zp {
}
void init(int charac) {
- assert(charac > 0); // division by zero + non negative values
Prime = charac;
+
+ // Check that the provided prime is less than the maximum allowed as int, calculation below, and 'plus_times_equal' function : 46337 ; i.e (max_prime-1)*max_prime <= INT_MAX
+ if(Prime > 46337)
+ throw std::invalid_argument("Maximum homology_coeff_field allowed value is 46337");
+
+ // Check for primality
+ if (Prime <= 1)
+ throw std::invalid_argument("homology_coeff_field must be a prime number");
+
inverse_.clear();
inverse_.reserve(charac);
inverse_.push_back(0);
for (int i = 1; i < Prime; ++i) {
int inv = 1;
- while (((inv * i) % Prime) != 1)
+ int mult = inv * i;
+ while ( (mult % Prime) != 1) {
++inv;
+ if(mult == Prime)
+ throw std::invalid_argument("homology_coeff_field must be a prime number");
+ mult = inv * i;
+ }
inverse_.push_back(inv);
}
}
diff --git a/src/Persistent_cohomology/test/persistent_cohomology_unit_test.cpp b/src/Persistent_cohomology/test/persistent_cohomology_unit_test.cpp
index fe3f8517..ea41a8aa 100644
--- a/src/Persistent_cohomology/test/persistent_cohomology_unit_test.cpp
+++ b/src/Persistent_cohomology/test/persistent_cohomology_unit_test.cpp
@@ -21,7 +21,7 @@ using namespace boost::unit_test;
typedef Simplex_tree<> typeST;
-std::string test_rips_persistence(int coefficient, int min_persistence) {
+std::string test_persistence(int coefficient, int min_persistence) {
// file is copied in CMakeLists.txt
std::ifstream simplex_tree_stream;
simplex_tree_stream.open("simplex_tree_file_for_unit_test.txt");
@@ -44,16 +44,16 @@ std::string test_rips_persistence(int coefficient, int min_persistence) {
Persistent_cohomology<Simplex_tree<>, Field_Zp> pcoh(st);
pcoh.init_coefficients( coefficient ); // initializes the coefficient field for homology
- // Check infinite rips
+ // Compute the persistent homology of the complex
pcoh.compute_persistent_cohomology( min_persistence ); // Minimal lifetime of homology feature to be recorded.
- std::ostringstream ossInfinite;
+ std::ostringstream ossPers;
- pcoh.output_diagram(ossInfinite);
- std::string strInfinite = ossInfinite.str();
- return strInfinite;
+ pcoh.output_diagram(ossPers);
+ std::string strPers = ossPers.str();
+ return strPers;
}
-void test_rips_persistence_in_dimension(int dimension) {
+void test_persistence_with_coeff_field(int coeff_field) {
std::string value0(" 0 0.02 1.12");
std::string value1(" 0 0.03 1.13");
std::string value2(" 0 0.04 1.14");
@@ -65,112 +65,104 @@ void test_rips_persistence_in_dimension(int dimension) {
std::string value8(" 0 0 inf" );
std::string value9(" 0 0.01 inf" );
- value0.insert(0,std::to_string(dimension));
- value1.insert(0,std::to_string(dimension));
- value2.insert(0,std::to_string(dimension));
- value3.insert(0,std::to_string(dimension));
- value4.insert(0,std::to_string(dimension));
- value5.insert(0,std::to_string(dimension));
- value6.insert(0,std::to_string(dimension));
- value7.insert(0,std::to_string(dimension));
- value8.insert(0,std::to_string(dimension));
- value9.insert(0,std::to_string(dimension));
+ value0.insert(0,std::to_string(coeff_field));
+ value1.insert(0,std::to_string(coeff_field));
+ value2.insert(0,std::to_string(coeff_field));
+ value3.insert(0,std::to_string(coeff_field));
+ value4.insert(0,std::to_string(coeff_field));
+ value5.insert(0,std::to_string(coeff_field));
+ value6.insert(0,std::to_string(coeff_field));
+ value7.insert(0,std::to_string(coeff_field));
+ value8.insert(0,std::to_string(coeff_field));
+ value9.insert(0,std::to_string(coeff_field));
std::clog << "********************************************************************" << std::endl;
- std::clog << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=0" << std::endl;
+ std::clog << "TEST OF PERSISTENT_COHOMOLOGY_SINGLE_FIELD COEFF_FIELD=" << coeff_field << " MIN_PERS=0" << std::endl;
- std::string str_rips_persistence = test_rips_persistence(dimension, 0);
- std::clog << str_rips_persistence << std::endl;
+ std::string str_persistence = test_persistence(coeff_field, 0);
+ std::clog << str_persistence << std::endl;
- BOOST_CHECK(str_rips_persistence.find(value0) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value1) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value2) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value3) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value4) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value5) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value6) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value7) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value8) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value9) != std::string::npos); // Check found
- std::clog << "str_rips_persistence=" << str_rips_persistence << std::endl;
+ BOOST_CHECK(str_persistence.find(value0) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value1) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value2) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value3) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value4) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value5) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value6) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value7) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value8) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value9) != std::string::npos); // Check found
+ std::clog << "str_persistence=" << str_persistence << std::endl;
std::clog << "********************************************************************" << std::endl;
- std::clog << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=1" << std::endl;
-
- str_rips_persistence = test_rips_persistence(dimension, 1);
-
- BOOST_CHECK(str_rips_persistence.find(value0) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value1) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value2) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value3) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value4) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value5) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value6) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value7) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value8) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value9) != std::string::npos); // Check found
- std::clog << "str_rips_persistence=" << str_rips_persistence << std::endl;
+ std::clog << "TEST OF PERSISTENT_COHOMOLOGY_SINGLE_FIELD COEFF_FIELD=" << coeff_field << " MIN_PERS=1" << std::endl;
+
+ str_persistence = test_persistence(coeff_field, 1);
+
+ BOOST_CHECK(str_persistence.find(value0) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value1) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value2) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value3) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value4) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value5) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value6) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value7) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value8) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value9) != std::string::npos); // Check found
+ std::clog << "str_persistence=" << str_persistence << std::endl;
std::clog << "********************************************************************" << std::endl;
- std::clog << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=2" << std::endl;
-
- str_rips_persistence = test_rips_persistence(dimension, 2);
-
- BOOST_CHECK(str_rips_persistence.find(value0) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value1) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value2) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value3) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value4) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value5) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value6) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value7) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value8) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value9) != std::string::npos); // Check found
- std::clog << "str_rips_persistence=" << str_rips_persistence << std::endl;
+ std::clog << "TEST OF PERSISTENT_COHOMOLOGY_SINGLE_FIELD COEFF_FIELD=" << coeff_field << " MIN_PERS=2" << std::endl;
+
+ str_persistence = test_persistence(coeff_field, 2);
+
+ BOOST_CHECK(str_persistence.find(value0) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value1) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value2) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value3) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value4) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value5) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value6) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value7) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value8) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value9) != std::string::npos); // Check found
+ std::clog << "str_persistence=" << str_persistence << std::endl;
std::clog << "********************************************************************" << std::endl;
- std::clog << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_SINGLE_FIELD DIM=" << dimension << " MIN_PERS=Inf" << std::endl;
-
- str_rips_persistence = test_rips_persistence(dimension, (std::numeric_limits<int>::max)());
-
- BOOST_CHECK(str_rips_persistence.find(value0) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value1) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value2) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value3) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value4) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value5) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value6) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value7) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value8) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value9) != std::string::npos); // Check found
- std::clog << "str_rips_persistence=" << str_rips_persistence << std::endl;
+ std::clog << "TEST OF PERSISTENT_COHOMOLOGY_SINGLE_FIELD COEFF_FIELD=" << coeff_field << " MIN_PERS=Inf" << std::endl;
+
+ str_persistence = test_persistence(coeff_field, (std::numeric_limits<int>::max)());
+
+ BOOST_CHECK(str_persistence.find(value0) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value1) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value2) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value3) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value4) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value5) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value6) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value7) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value8) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value9) != std::string::npos); // Check found
+ std::clog << "str_persistence=" << str_persistence << std::endl;
}
-BOOST_AUTO_TEST_CASE( rips_persistent_cohomology_single_field_dim_1 )
+BOOST_AUTO_TEST_CASE( persistent_cohomology_single_field_coeff_not_prime )
{
- test_rips_persistence_in_dimension(1);
+ for (auto non_prime : {0, 1, 4, 6})
+ BOOST_CHECK_THROW(test_persistence_with_coeff_field(non_prime), std::invalid_argument);
}
-BOOST_AUTO_TEST_CASE( rips_persistent_cohomology_single_field_dim_2 )
+BOOST_AUTO_TEST_CASE( persistent_cohomology_single_field_coeff_prime )
{
- test_rips_persistence_in_dimension(2);
+ for (auto prime : {2, 3, 5, 11, 13})
+ test_persistence_with_coeff_field(prime);
}
-BOOST_AUTO_TEST_CASE( rips_persistent_cohomology_single_field_dim_3 )
+BOOST_AUTO_TEST_CASE( persistent_cohomology_single_field_coeff_limit )
{
- test_rips_persistence_in_dimension(3);
+ BOOST_CHECK_THROW(test_persistence_with_coeff_field(46349), std::invalid_argument);
}
-BOOST_AUTO_TEST_CASE( rips_persistent_cohomology_single_field_dim_5 )
-{
- test_rips_persistence_in_dimension(5);
-}
-
-// TODO(VR): not working from 6
-// std::string str_rips_persistence = test_rips_persistence(6, 0);
-// TODO(VR): division by zero
-// std::string str_rips_persistence = test_rips_persistence(0, 0);
-
/** SimplexTree minimal options to test the limits.
*
* Maximum number of simplices to compute persistence is <CODE>std::numeric_limits<std::uint8_t>::max()<\CODE> = 256.*/
diff --git a/src/Persistent_cohomology/test/persistent_cohomology_unit_test_multi_field.cpp b/src/Persistent_cohomology/test/persistent_cohomology_unit_test_multi_field.cpp
index 3602aa09..c6c0bfaf 100644
--- a/src/Persistent_cohomology/test/persistent_cohomology_unit_test_multi_field.cpp
+++ b/src/Persistent_cohomology/test/persistent_cohomology_unit_test_multi_field.cpp
@@ -21,7 +21,7 @@ using namespace boost::unit_test;
typedef Simplex_tree<> typeST;
-std::string test_rips_persistence(int min_coefficient, int max_coefficient, double min_persistence) {
+std::string test_persistence(int min_coefficient, int max_coefficient, double min_persistence) {
// file is copied in CMakeLists.txt
std::ifstream simplex_tree_stream;
simplex_tree_stream.open("simplex_tree_file_for_multi_field_unit_test.txt");
@@ -44,17 +44,17 @@ std::string test_rips_persistence(int min_coefficient, int max_coefficient, doub
Persistent_cohomology<Simplex_tree<>, Multi_field> pcoh(st);
pcoh.init_coefficients(min_coefficient, max_coefficient); // initializes the coefficient field for homology
- // Check infinite rips
+ // Compute the persistent homology of the complex
pcoh.compute_persistent_cohomology(min_persistence); // Minimal lifetime of homology feature to be recorded.
- std::ostringstream ossRips;
- pcoh.output_diagram(ossRips);
+ std::ostringstream ossPers;
+ pcoh.output_diagram(ossPers);
- std::string strRips = ossRips.str();
- return strRips;
+ std::string strPers = ossPers.str();
+ return strPers;
}
-void test_rips_persistence_in_dimension(int min_dimension, int max_dimension) {
+void test_persistence_with_coeff_field(int min_coefficient, int max_coefficient) {
// there are 2 discontinued ensembles
std::string value0(" 0 0.25 inf");
std::string value1(" 1 0.4 inf");
@@ -69,47 +69,59 @@ void test_rips_persistence_in_dimension(int min_dimension, int max_dimension) {
std::string value7(" 2 0.4 inf");
std::clog << "********************************************************************" << std::endl;
- std::clog << "TEST OF RIPS_PERSISTENT_COHOMOLOGY_MULTI_FIELD MIN_DIM=" << min_dimension << " MAX_DIM=" << max_dimension << " MIN_PERS=0" << std::endl;
+ std::clog << "TEST OF PERSISTENT_COHOMOLOGY_MULTI_FIELD MIN_COEFF=" << min_coefficient << " MAX_COEFF=" << max_coefficient << " MIN_PERS=0" << std::endl;
- std::string str_rips_persistence = test_rips_persistence(min_dimension, max_dimension, 0.0);
- std::clog << "str_rips_persistence=" << str_rips_persistence << std::endl;
+ std::string str_persistence = test_persistence(min_coefficient, max_coefficient, 0.0);
+ std::clog << "str_persistence=" << str_persistence << std::endl;
- BOOST_CHECK(str_rips_persistence.find(value0) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value1) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value2) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value0) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value1) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value2) != std::string::npos); // Check found
- if ((min_dimension < 2) && (max_dimension < 2)) {
- BOOST_CHECK(str_rips_persistence.find(value3) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value4) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value5) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value6) != std::string::npos); // Check found
- BOOST_CHECK(str_rips_persistence.find(value7) != std::string::npos); // Check found
+ if ((min_coefficient < 2) && (max_coefficient < 2)) {
+ BOOST_CHECK(str_persistence.find(value3) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value4) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value5) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value6) != std::string::npos); // Check found
+ BOOST_CHECK(str_persistence.find(value7) != std::string::npos); // Check found
} else {
- BOOST_CHECK(str_rips_persistence.find(value3) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value4) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value5) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value6) == std::string::npos); // Check not found
- BOOST_CHECK(str_rips_persistence.find(value7) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value3) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value4) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value5) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value6) == std::string::npos); // Check not found
+ BOOST_CHECK(str_persistence.find(value7) == std::string::npos); // Check not found
}
}
-BOOST_AUTO_TEST_CASE(rips_persistent_cohomology_multi_field_dim_1_2) {
- test_rips_persistence_in_dimension(0, 1);
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_0_0) {
+ test_persistence_with_coeff_field(0, 0);
}
-BOOST_AUTO_TEST_CASE(rips_persistent_cohomology_multi_field_dim_2_3) {
- test_rips_persistence_in_dimension(1, 3);
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_0_1) {
+ test_persistence_with_coeff_field(0, 1);
}
-BOOST_AUTO_TEST_CASE(rips_persistent_cohomology_multi_field_dim_1_5) {
- test_rips_persistence_in_dimension(1, 5);
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_0_6) {
+ test_persistence_with_coeff_field(0, 6);
}
-// TODO(VR): not working from 6
-// std::string str_rips_persistence = test_rips_persistence(6, 0);
-// TODO(VR): division by zero
-// std::string str_rips_persistence = test_rips_persistence(0, 0);
-// TODO(VR): is result OK of :
-// test_rips_persistence_in_dimension(3, 4);
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_1_2) {
+ test_persistence_with_coeff_field(1, 2);
+}
+
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_1_3) {
+ test_persistence_with_coeff_field(1, 3);
+}
+
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_1_5) {
+ test_persistence_with_coeff_field(1, 5);
+}
+
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_2_3) {
+ test_persistence_with_coeff_field(2, 3);
+}
+BOOST_AUTO_TEST_CASE(persistent_cohomology_multi_field_coeff_3_4) {
+ test_persistence_with_coeff_field(3, 4);
+}
diff --git a/src/Rips_complex/doc/Intro_rips_complex.h b/src/Rips_complex/doc/Intro_rips_complex.h
index b2840686..3888ec8f 100644
--- a/src/Rips_complex/doc/Intro_rips_complex.h
+++ b/src/Rips_complex/doc/Intro_rips_complex.h
@@ -64,7 +64,7 @@ namespace rips_complex {
* And so on for simplex (0,1,2,3).
*
* If the Rips_complex interfaces are not detailed enough for your need, please refer to
- * <a href="_persistent_cohomology_2rips_persistence_step_by_step_8cpp-example.html">
+ * <a href="rips_persistence_step_by_step_8cpp-example.html">
* rips_persistence_step_by_step.cpp</a> example, where the constructions of the graph and
* the Simplex_tree are more detailed.
*
@@ -111,7 +111,7 @@ namespace rips_complex {
*
* Then, it is asked to display information about the simplicial complex.
*
- * \include Rips_complex/example_one_skeleton_rips_from_points.cpp
+ * \include example_one_skeleton_rips_from_points.cpp
*
* When launching (Rips maximal distance between 2 points is 12.0, is expanded
* until dimension 1 - one skeleton graph in other words):
@@ -121,7 +121,7 @@ namespace rips_complex {
*
* the program output is:
*
- * \include Rips_complex/one_skeleton_rips_for_doc.txt
+ * \include one_skeleton_rips_for_doc.txt
*
* \subsection ripsoffexample Example from OFF file
*
@@ -132,7 +132,7 @@ namespace rips_complex {
*
* Then, it is asked to display information about the Rips complex.
*
- * \include Rips_complex/example_rips_complex_from_off_file.cpp
+ * \include example_rips_complex_from_off_file.cpp
*
* When launching:
*
@@ -141,7 +141,7 @@ namespace rips_complex {
*
* the program output is:
*
- * \include Rips_complex/full_skeleton_rips_for_doc.txt
+ * \include full_skeleton_rips_for_doc.txt
*
*
* \subsection sparseripspointscloudexample Example of a sparse Rips from a point cloud
@@ -149,7 +149,7 @@ namespace rips_complex {
* This example builds the full sparse Rips of a set of 2D Euclidean points, then prints some minimal
* information about the complex.
*
- * \include Rips_complex/example_sparse_rips.cpp
+ * \include example_sparse_rips.cpp
*
* When launching:
*
@@ -172,7 +172,7 @@ namespace rips_complex {
*
* Then, it is asked to display information about the simplicial complex.
*
- * \include Rips_complex/example_one_skeleton_rips_from_distance_matrix.cpp
+ * \include example_one_skeleton_rips_from_distance_matrix.cpp
*
* When launching (Rips maximal distance between 2 points is 1.0, is expanded until dimension 1 - one skeleton graph
* with other words):
@@ -182,7 +182,7 @@ namespace rips_complex {
*
* the program output is:
*
- * \include Rips_complex/one_skeleton_rips_for_doc.txt
+ * \include one_skeleton_rips_for_doc.txt
*
* \subsection ripscsvdistanceexample Example from a distance matrix read in a csv file
*
@@ -192,7 +192,7 @@ namespace rips_complex {
*
* Then, it is asked to display information about the Rips complex.
*
- * \include Rips_complex/example_rips_complex_from_csv_distance_matrix_file.cpp
+ * \include example_rips_complex_from_csv_distance_matrix_file.cpp
*
* When launching:
*
@@ -201,7 +201,7 @@ namespace rips_complex {
*
* the program output is:
*
- * \include Rips_complex/full_skeleton_rips_for_doc.txt
+ * \include full_skeleton_rips_for_doc.txt
*
*
* \section ripscorrelationematrix Correlation matrix
@@ -213,7 +213,7 @@ namespace rips_complex {
*
* Then, it is asked to display information about the simplicial complex.
*
- * \include Rips_complex/example_one_skeleton_rips_from_correlation_matrix.cpp
+ * \include example_one_skeleton_rips_from_correlation_matrix.cpp
*
* When launching:
*
@@ -222,7 +222,7 @@ namespace rips_complex {
*
* the program output is:
*
- * \include Rips_complex/one_skeleton_rips_from_correlation_matrix_for_doc.txt
+ * \include one_skeleton_rips_from_correlation_matrix_for_doc.txt
*
* All the other constructions discussed for Rips complex for distance matrix can be also performed for Rips complexes
* construction from correlation matrices.
diff --git a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h
index a5501004..7ae7b317 100644
--- a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h
+++ b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h
@@ -15,12 +15,71 @@
#include <gudhi/graph_simplicial_complex.h>
#include <gudhi/choose_n_farthest_points.h>
-#include <boost/graph/adjacency_list.hpp>
+#include <boost/graph/graph_traits.hpp>
#include <boost/range/metafunctions.hpp>
+#include <boost/iterator/counting_iterator.hpp>
#include <vector>
namespace Gudhi {
+namespace rips_complex {
+// A custom graph class, because boost::adjacency_list does not conveniently allow to choose vertex descriptors
+template <class Vertex_handle, class Filtration_value>
+struct Graph {
+ typedef std::vector<Vertex_handle> VList;
+ typedef std::vector<std::tuple<Vertex_handle, Vertex_handle, Filtration_value>> EList;
+ typedef typename VList::const_iterator vertex_iterator;
+ typedef boost::counting_iterator<std::size_t> edge_iterator;
+ VList vlist;
+ EList elist;
+};
+template <class Vertex_handle, class Filtration_value>
+void add_vertex(Vertex_handle v, Graph<Vertex_handle, Filtration_value>&g) { g.vlist.push_back(v); }
+template <class Vertex_handle, class Filtration_value>
+void add_edge(Vertex_handle u, Vertex_handle v, Filtration_value f, Graph<Vertex_handle, Filtration_value>&g) { g.elist.emplace_back(u, v, f); }
+template <class Vertex_handle, class Filtration_value>
+std::size_t num_vertices(Graph<Vertex_handle, Filtration_value> const&g) { return g.vlist.size(); }
+template <class Vertex_handle, class Filtration_value>
+std::size_t num_edges(Graph<Vertex_handle, Filtration_value> const&g) { return g.elist.size(); }
+template <class Vertex_handle, class Filtration_value, class Iter = typename Graph<Vertex_handle, Filtration_value>::vertex_iterator>
+std::pair<Iter, Iter>
+vertices(Graph<Vertex_handle, Filtration_value> const&g) {
+ return { g.vlist.begin(), g.vlist.end() };
+}
+template <class Vertex_handle, class Filtration_value>
+std::pair<boost::counting_iterator<std::size_t>, boost::counting_iterator<std::size_t>>
+edges(Graph<Vertex_handle, Filtration_value> const&g) {
+ typedef boost::counting_iterator<std::size_t> I;
+ return { I(0), I(g.elist.size()) };
+}
+template <class Vertex_handle, class Filtration_value>
+Vertex_handle source(std::size_t e, Graph<Vertex_handle, Filtration_value> const&g) { return std::get<0>(g.elist[e]); }
+template <class Vertex_handle, class Filtration_value>
+Vertex_handle target(std::size_t e, Graph<Vertex_handle, Filtration_value> const&g) { return std::get<1>(g.elist[e]); }
+template <class Vertex_handle, class Filtration_value>
+Filtration_value get(vertex_filtration_t, Graph<Vertex_handle, Filtration_value> const&, Vertex_handle) { return 0; }
+template <class Vertex_handle, class Filtration_value>
+Filtration_value get(edge_filtration_t, Graph<Vertex_handle, Filtration_value> const&g, std::size_t e) { return std::get<2>(g.elist[e]); }
+} // namespace rips_complex
+} // namespace Gudhi
+namespace boost {
+template <class Vertex_handle, class Filtration_value>
+struct graph_traits<Gudhi::rips_complex::Graph<Vertex_handle, Filtration_value>> {
+ typedef Gudhi::rips_complex::Graph<Vertex_handle, Filtration_value> G;
+ struct traversal_category : vertex_list_graph_tag, edge_list_graph_tag {};
+ typedef Vertex_handle vertex_descriptor;
+ typedef typename G::vertex_iterator vertex_iterator;
+ typedef std::size_t vertices_size_type;
+ typedef std::size_t edge_descriptor;
+ typedef typename G::edge_iterator edge_iterator;
+ typedef std::size_t edges_size_type;
+ typedef directed_tag directed_category;
+ typedef disallow_parallel_edge_tag edge_parallel_category;
+};
+// Etc, since we don't expose this graph to the world, we know we are not going to query property_traits for instance.
+}
+
+namespace Gudhi {
namespace rips_complex {
@@ -45,12 +104,8 @@ template <typename Filtration_value>
class Sparse_rips_complex {
private:
// TODO(MG): use a different graph where we know we can safely insert in parallel.
- typedef typename boost::adjacency_list<boost::vecS, boost::vecS, boost::directedS,
- boost::property<vertex_filtration_t, Filtration_value>,
- boost::property<edge_filtration_t, Filtration_value>>
- Graph;
-
typedef int Vertex_handle;
+ typedef rips_complex::Graph<Vertex_handle, Filtration_value> Graph;
public:
/** \brief Sparse_rips_complex constructor from a list of points.
@@ -63,10 +118,11 @@ class Sparse_rips_complex {
*
*/
template <typename RandomAccessPointRange, typename Distance>
- Sparse_rips_complex(const RandomAccessPointRange& points, Distance distance, double epsilon, Filtration_value mini=-std::numeric_limits<Filtration_value>::infinity(), Filtration_value maxi=std::numeric_limits<Filtration_value>::infinity())
+ Sparse_rips_complex(const RandomAccessPointRange& points, Distance distance, double const epsilon, Filtration_value const mini=-std::numeric_limits<Filtration_value>::infinity(), Filtration_value const maxi=std::numeric_limits<Filtration_value>::infinity())
: epsilon_(epsilon) {
GUDHI_CHECK(epsilon > 0, "epsilon must be positive");
auto dist_fun = [&](Vertex_handle i, Vertex_handle j) { return distance(points[i], points[j]); };
+ // TODO: stop choose_n_farthest_points once it reaches mini or 0?
subsampling::choose_n_farthest_points(dist_fun, boost::irange<Vertex_handle>(0, boost::size(points)), -1, -1,
std::back_inserter(sorted_points), std::back_inserter(params));
compute_sparse_graph(dist_fun, epsilon, mini, maxi);
@@ -83,7 +139,7 @@ class Sparse_rips_complex {
* @param[in] maxi Maximal filtration value. Ignore anything above this scale.
*/
template <typename DistanceMatrix>
- Sparse_rips_complex(const DistanceMatrix& distance_matrix, double epsilon, Filtration_value mini=-std::numeric_limits<Filtration_value>::infinity(), Filtration_value maxi=std::numeric_limits<Filtration_value>::infinity())
+ Sparse_rips_complex(const DistanceMatrix& distance_matrix, double const epsilon, Filtration_value const mini=-std::numeric_limits<Filtration_value>::infinity(), Filtration_value const maxi=std::numeric_limits<Filtration_value>::infinity())
: Sparse_rips_complex(boost::irange<Vertex_handle>(0, boost::size(distance_matrix)),
[&](Vertex_handle i, Vertex_handle j) { return (i==j) ? 0 : (i<j) ? distance_matrix[j][i] : distance_matrix[i][j]; },
epsilon, mini, maxi) {}
@@ -99,7 +155,7 @@ class Sparse_rips_complex {
*
*/
template <typename SimplicialComplexForRips>
- void create_complex(SimplicialComplexForRips& complex, int dim_max) {
+ void create_complex(SimplicialComplexForRips& complex, int const dim_max) {
GUDHI_CHECK(complex.num_vertices() == 0,
std::invalid_argument("Sparse_rips_complex::create_complex - simplicial complex is not empty"));
@@ -108,17 +164,17 @@ class Sparse_rips_complex {
complex.expansion(dim_max);
return;
}
- const int n = boost::size(params);
- std::vector<Filtration_value> lambda(n);
+ const std::size_t n = num_vertices(graph_);
+ std::vector<Filtration_value> lambda(max_v + 1);
// lambda[original_order]=params[sorted_order]
- for(int i=0;i<n;++i)
+ for(std::size_t i=0;i<n;++i)
lambda[sorted_points[i]] = params[i];
double cst = epsilon_ * (1 - epsilon_) / 2;
auto block = [cst,&complex,&lambda](typename SimplicialComplexForRips::Simplex_handle sh){
auto filt = complex.filtration(sh);
- auto mini = filt * cst;
+ auto min_f = filt * cst;
for(auto v : complex.simplex_vertex_range(sh)){
- if(lambda[v] < mini)
+ if(lambda[v] < min_f)
return true; // v died before this simplex could be born
}
return false;
@@ -129,32 +185,34 @@ class Sparse_rips_complex {
private:
// PointRange must be random access.
template <typename Distance>
- void compute_sparse_graph(Distance& dist, double epsilon, Filtration_value mini, Filtration_value maxi) {
+ void compute_sparse_graph(Distance& dist, double const epsilon, Filtration_value const mini, Filtration_value const maxi) {
const auto& points = sorted_points; // convenience alias
- const int n = boost::size(points);
+ std::size_t n = boost::size(points);
double cst = epsilon * (1 - epsilon) / 2;
- graph_.~Graph();
- new (&graph_) Graph(n);
- // for(auto v : vertices(g)) // doesn't work :-(
- typename boost::graph_traits<Graph>::vertex_iterator v_i, v_e;
- for (std::tie(v_i, v_e) = vertices(graph_); v_i != v_e; ++v_i) {
- auto v = *v_i;
- // This whole loop might not be necessary, leave it until someone investigates if it is safe to remove.
- put(vertex_filtration_t(), graph_, v, 0);
+ max_v = -1; // Useful for the size of the map lambda.
+ for (std::size_t i = 0; i < n; ++i) {
+ if ((params[i] < mini || params[i] <= 0) && i != 0) break;
+ // The parameter of the first point is not very meaningful, it is supposed to be infinite,
+ // but if the type does not support it...
+ // It would be better to do this reduction of the number of points earlier, around choose_n_farthest_points.
+ add_vertex(points[i], graph_);
+ max_v = std::max(max_v, points[i]);
}
+ n = num_vertices(graph_);
// TODO(MG):
// - make it parallel
// - only test near-enough neighbors
- for (int i = 0; i < n; ++i) {
+ for (std::size_t i = 0; i < n; ++i) {
auto&& pi = points[i];
auto li = params[i];
- if (li < mini) break;
- for (int j = i + 1; j < n; ++j) {
+ // If we inserted all the points, points with multiplicity would get connected to their first representative,
+ // no need to handle the redundant ones in the outer loop.
+ // if (li <= 0 && i != 0) break;
+ for (std::size_t j = i + 1; j < n; ++j) {
auto&& pj = points[j];
auto d = dist(pi, pj);
auto lj = params[j];
- if (lj < mini) break;
GUDHI_CHECK(lj <= li, "Bad furthest point sorting");
Filtration_value alpha;
@@ -178,6 +236,7 @@ class Sparse_rips_complex {
Graph graph_;
double epsilon_;
+ Vertex_handle max_v;
// Because of the arbitrary split between constructor and create_complex
// sorted_points[sorted_order]=original_order
std::vector<Vertex_handle> sorted_points;
diff --git a/src/Simplex_tree/doc/Intro_simplex_tree.h b/src/Simplex_tree/doc/Intro_simplex_tree.h
index 800879fe..ef8dec91 100644
--- a/src/Simplex_tree/doc/Intro_simplex_tree.h
+++ b/src/Simplex_tree/doc/Intro_simplex_tree.h
@@ -39,10 +39,10 @@ namespace Gudhi {
* \subsubsection filteredcomplexessimplextreeexamples Examples
*
* Here is a list of simplex tree examples :
- * \li <a href="_simplex_tree_2simple_simplex_tree_8cpp-example.html">
+ * \li <a href="simple_simplex_tree_8cpp-example.html">
* Simplex_tree/simple_simplex_tree.cpp</a> - Simple simplex tree construction and basic function use.
*
- * \li <a href="_simplex_tree_2simplex_tree_from_cliques_of_graph_8cpp-example.html">
+ * \li <a href="simplex_tree_from_cliques_of_graph_8cpp-example.html">
* Simplex_tree/simplex_tree_from_cliques_of_graph.cpp</a> - Simplex tree construction from cliques of graph read in
* a file.
*
@@ -54,11 +54,11 @@ Expand the simplex tree in 3.8e-05 s.
Information of the Simplex Tree:
Number of vertices = 10 Number of simplices = 98 \endcode
*
- * \li <a href="_simplex_tree_2example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
+ * \li <a href="example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
* Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp</a> - Simplex tree is computed and displayed
* from a 3D alpha complex (Requires CGAL, GMP and GMPXX to be installed).
*
- * \li <a href="_simplex_tree_2graph_expansion_with_blocker_8cpp-example.html">
+ * \li <a href="graph_expansion_with_blocker_8cpp-example.html">
* Simplex_tree/graph_expansion_with_blocker.cpp</a> - Simple simplex tree construction from a one-skeleton graph with
* a simple blocker expansion method.
*
diff --git a/src/Simplex_tree/example/CMakeLists.txt b/src/Simplex_tree/example/CMakeLists.txt
index 73b2c6f9..81d352fc 100644
--- a/src/Simplex_tree/example/CMakeLists.txt
+++ b/src/Simplex_tree/example/CMakeLists.txt
@@ -29,7 +29,7 @@ if(GMP_FOUND AND NOT CGAL_VERSION VERSION_LESS 4.11.0)
target_link_libraries(Simplex_tree_example_alpha_shapes_3_from_off ${TBB_LIBRARIES})
endif()
add_test(NAME Simplex_tree_example_alpha_shapes_3_from_off COMMAND $<TARGET_FILE:Simplex_tree_example_alpha_shapes_3_from_off>
- "${CMAKE_SOURCE_DIR}/data/points/bunny_5000.off")
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_1307.off")
endif()
diff --git a/src/Simplex_tree/example/README b/src/Simplex_tree/example/README
deleted file mode 100644
index a9498173..00000000
--- a/src/Simplex_tree/example/README
+++ /dev/null
@@ -1,73 +0,0 @@
-To build the example, run in a Terminal:
-
-cd /path-to-gudhi/
-cmake .
-cd /path-to-example/
-make
-
-
-Example of use :
-
-*** Simple simplex tree construction
-
-./Simplex_tree_example_simple_simplex_tree
-
-********************************************************************
-EXAMPLE OF SIMPLE INSERTION
- * INSERT 0
- + 0 INSERTED
- * INSERT 1
- + 1 INSERTED
- * INSERT (0,1)
- + (0,1) INSERTED
- * INSERT 2
- + 2 INSERTED
- * INSERT (2,0)
- + (2,0) INSERTED
- * INSERT (2,1)
- + (2,1) INSERTED
- * INSERT (2,1,0)
- + (2,1,0) INSERTED
- * INSERT 3
- + 3 INSERTED
- * INSERT (3,0)
- + (3,0) INSERTED
- * INSERT 0 (already inserted)
- - 0 NOT INSERTED
- * INSERT (2,1,0) (already inserted)
- - (2,1,0) NOT INSERTED
-********************************************************************
-* The complex contains 9 simplices
- - dimension 2 - filtration 0.4
-* Iterator on Simplices in the filtration, with [filtration value]:
- [0.1] 0
- [0.1] 1
- [0.1] 2
- [0.1] 3
- [0.2] 1 0
- [0.2] 2 0
- [0.2] 2 1
- [0.2] 3 0
- [0.3] 2 1 0
-
-*** Simplex tree construction with Z/2Z coefficients on weighted graph Klein bottle file:
-
-./Simplex_tree_example_from_cliques_of_graph ../../../data/points/Klein_bottle_complex.txt 2
-Insert the 1-skeleton in the simplex tree in 0 s.
-Expand the simplex tree in 0 s.
-Information of the Simplex Tree:
- Number of vertices = 10 Number of simplices = 82
-
-with Z/3Z coefficients:
-
-./Simplex_tree_example_from_cliques_of_graph ../../../data/points/Klein_bottle_complex.txt 3
-
-Insert the 1-skeleton in the simplex tree in 0 s.
-Expand the simplex tree in 0 s.
-Information of the Simplex Tree:
- Number of vertices = 10 Number of simplices = 106
-
-*** Simplex_tree computed and displayed from a 3D alpha complex:
- [ Requires CGAL, GMP and GMPXX to be installed]
-
-./Simplex_tree_example_alpha_shapes_3_from_off ../../../data/points/bunny_5000
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h
index 85d6c3b0..85790baf 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h
@@ -1060,8 +1060,8 @@ class Simplex_tree {
*
* Inserts all vertices and edges given by a OneSkeletonGraph.
* OneSkeletonGraph must be a model of
- * <a href="http://www.boost.org/doc/libs/1_65_1/libs/graph/doc/EdgeListGraph.html">boost::EdgeListGraph</a>
- * and <a href="http://www.boost.org/doc/libs/1_65_1/libs/graph/doc/PropertyGraph.html">boost::PropertyGraph</a>.
+ * <a href="http://www.boost.org/doc/libs/1_76_0/libs/graph/doc/VertexAndEdgeListGraph.html">boost::VertexAndEdgeListGraph</a>
+ * and <a href="http://www.boost.org/doc/libs/1_76_0/libs/graph/doc/PropertyGraph.html">boost::PropertyGraph</a>.
*
* The vertex filtration value is accessible through the property tag
* vertex_filtration_t.
@@ -1081,7 +1081,10 @@ class Simplex_tree {
// the simplex tree must be empty
assert(num_simplices() == 0);
- if (boost::num_vertices(skel_graph) == 0) {
+ // is there a better way to let the compiler know that we don't mean Simplex_tree::num_vertices?
+ using boost::num_vertices;
+
+ if (num_vertices(skel_graph) == 0) {
return;
}
if (num_edges(skel_graph) == 0) {
@@ -1090,18 +1093,18 @@ class Simplex_tree {
dimension_ = 1;
}
- root_.members_.reserve(boost::num_vertices(skel_graph));
+ root_.members_.reserve(num_vertices(skel_graph));
typename boost::graph_traits<OneSkeletonGraph>::vertex_iterator v_it,
v_it_end;
- for (std::tie(v_it, v_it_end) = boost::vertices(skel_graph); v_it != v_it_end;
+ for (std::tie(v_it, v_it_end) = vertices(skel_graph); v_it != v_it_end;
++v_it) {
root_.members_.emplace_hint(
root_.members_.end(), *v_it,
- Node(&root_, boost::get(vertex_filtration_t(), skel_graph, *v_it)));
+ Node(&root_, get(vertex_filtration_t(), skel_graph, *v_it)));
}
std::pair<typename boost::graph_traits<OneSkeletonGraph>::edge_iterator,
- typename boost::graph_traits<OneSkeletonGraph>::edge_iterator> boost_edges = boost::edges(skel_graph);
+ typename boost::graph_traits<OneSkeletonGraph>::edge_iterator> boost_edges = edges(skel_graph);
// boost_edges.first is the equivalent to boost_edges.begin()
// boost_edges.second is the equivalent to boost_edges.end()
for (; boost_edges.first != boost_edges.second; boost_edges.first++) {
@@ -1123,7 +1126,7 @@ class Simplex_tree {
}
sh->second.children()->members().emplace(v,
- Node(sh->second.children(), boost::get(edge_filtration_t(), skel_graph, edge)));
+ Node(sh->second.children(), get(edge_filtration_t(), skel_graph, edge)));
}
}
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker.h
index 653a63fd..0fd56c67 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker.h
@@ -52,8 +52,7 @@ when \f$ \tau \neq \sigma\f$ we say that \f$ \tau\f$ is a proper-face of \f$ \si
An abstract simplicial complex is a set of simplices that contains all the faces of its simplices.
The 1-skeleton of a simplicial complex (or its graph) consists of its elements of dimension lower than 2.
- *\image html "ds_representation.png" "Skeleton-blocker representation" width=20cm
-
+\image html "ds_representation.png" "Skeleton-blocker representation"
To encode, a simplicial complex, one can encodes all its simplices.
In case when this number gets too large,
@@ -73,11 +72,7 @@ For instance, the numbers of blockers is depicted for random 3-dimensional spher
in next figure. Storing the graph and blockers of such simplicial complexes is much compact in this case than storing
their simplices.
-
- *\image html "blockers_curve.png" "Number of blockers of random triangulations of 3-spheres" width=10cm
-
-
-
+\image html "blockers_curve.png" "Number of blockers of random triangulations of 3-spheres"
\section API
diff --git a/src/Spatial_searching/doc/Intro_spatial_searching.h b/src/Spatial_searching/doc/Intro_spatial_searching.h
index 30805570..81c5a3aa 100644
--- a/src/Spatial_searching/doc/Intro_spatial_searching.h
+++ b/src/Spatial_searching/doc/Intro_spatial_searching.h
@@ -36,7 +36,7 @@ namespace spatial_searching {
*
* This example generates 500 random points, then performs all-near-neighbors searches, and queries for nearest and furthest neighbors using different methods.
*
- * \include Spatial_searching/example_spatial_searching.cpp
+ * \include example_spatial_searching.cpp
*
*/
/** @} */ // end defgroup spatial_searching
diff --git a/src/Spatial_searching/include/gudhi/Kd_tree_search.h b/src/Spatial_searching/include/gudhi/Kd_tree_search.h
index a50a8537..6fb611f2 100644
--- a/src/Spatial_searching/include/gudhi/Kd_tree_search.h
+++ b/src/Spatial_searching/include/gudhi/Kd_tree_search.h
@@ -139,7 +139,7 @@ class Kd_tree_search {
}
template <typename Coord_iterator>
- bool contains_point_given_as_coordinates(Coord_iterator pi, Coord_iterator CGAL_UNUSED) const {
+ bool contains_point_given_as_coordinates(Coord_iterator pi, Coord_iterator) const {
FT distance = 0;
auto ccci = traits.construct_cartesian_const_iterator_d_object();
auto ci = ccci(c);
diff --git a/src/Subsampling/doc/Intro_subsampling.h b/src/Subsampling/doc/Intro_subsampling.h
index 1c84fb2e..1c366fe6 100644
--- a/src/Subsampling/doc/Intro_subsampling.h
+++ b/src/Subsampling/doc/Intro_subsampling.h
@@ -32,20 +32,20 @@ namespace subsampling {
* squared distance between any two points
* is greater than or equal to 0.4.
*
- * \include Subsampling/example_sparsify_point_set.cpp
+ * \include example_sparsify_point_set.cpp
*
* \section farthestpointexamples Example: choose_n_farthest_points
*
* This example outputs a subset of 100 points obtained by Gonz&aacute;lez algorithm,
* starting with a random point.
*
- * \include Subsampling/example_choose_n_farthest_points.cpp
+ * \include example_choose_n_farthest_points.cpp
*
* \section randompointexamples Example: pick_n_random_points
*
* This example outputs a subset of 100 points picked randomly.
*
- * \include Subsampling/example_pick_n_random_points.cpp
+ * \include example_pick_n_random_points.cpp
*/
/** @} */ // end defgroup subsampling
diff --git a/src/Subsampling/include/gudhi/choose_n_farthest_points.h b/src/Subsampling/include/gudhi/choose_n_farthest_points.h
index e6347d96..44c02df1 100644
--- a/src/Subsampling/include/gudhi/choose_n_farthest_points.h
+++ b/src/Subsampling/include/gudhi/choose_n_farthest_points.h
@@ -42,7 +42,7 @@ enum : std::size_t {
* The iteration starts with the landmark `starting point` or, if `starting point==random_starting_point`,
* with a random landmark.
* It chooses `final_size` points from a random access range
- * `input_pts` (or the number of distinct points if `final_size` is larger)
+ * `input_pts` (or the number of input points if `final_size` is larger)
* and outputs them in the output iterator `output_it`. It also
* outputs the distance from each of those points to the set of previous
* points in `dist_it`.
@@ -88,34 +88,57 @@ void choose_n_farthest_points(Distance dist,
starting_point = dis(gen);
}
- std::size_t current_number_of_landmarks = 0; // counter for landmarks
- static_assert(std::numeric_limits<double>::has_infinity, "the number type needs to support infinity()");
// FIXME: don't hard-code the type as double. For Epeck_d, we also want to handle types that do not have an infinity.
- const double infty = std::numeric_limits<double>::infinity(); // infinity (see next entry)
- std::vector< double > dist_to_L(nb_points, infty); // vector of current distances to L from input_pts
+ static_assert(std::numeric_limits<double>::has_infinity, "the number type needs to support infinity()");
+
+ *output_it++ = input_pts[starting_point];
+ *dist_it++ = std::numeric_limits<double>::infinity();
+ if (final_size == 1) return;
+
+ std::vector<std::size_t> points(nb_points); // map from remaining points to indexes in input_pts
+ std::vector< double > dist_to_L(nb_points); // vector of current distances to L from points
+ for(std::size_t i = 0; i < nb_points; ++i) {
+ points[i] = i;
+ dist_to_L[i] = dist(input_pts[i], input_pts[starting_point]);
+ }
+ // The indirection through points makes the program a bit slower. Some alternatives:
+ // - the original code never removed points and counted on them not
+ // reappearing because of a self-distance of 0. This causes unnecessary
+ // computations when final_size is large. It also causes trouble if there are
+ // input points at distance 0 from each other.
+ // - copy input_pts and update the local copy when removing points.
std::size_t curr_max_w = starting_point;
- for (current_number_of_landmarks = 0; current_number_of_landmarks != final_size; current_number_of_landmarks++) {
- // curr_max_w at this point is the next landmark
- *output_it++ = input_pts[curr_max_w];
- *dist_it++ = dist_to_L[curr_max_w];
+ for (std::size_t current_number_of_landmarks = 1; current_number_of_landmarks != final_size; current_number_of_landmarks++) {
+ std::size_t latest_landmark = points[curr_max_w];
+ // To remove the latest landmark at index curr_max_w, replace it
+ // with the last point and reduce the length of the vector.
+ std::size_t last = points.size() - 1;
+ if (curr_max_w != last) {
+ points[curr_max_w] = points[last];
+ dist_to_L[curr_max_w] = dist_to_L[last];
+ }
+ points.pop_back();
+
+ // Update distances to L.
std::size_t i = 0;
- for (auto&& p : input_pts) {
- double curr_dist = dist(p, input_pts[curr_max_w]);
+ for (auto p : points) {
+ double curr_dist = dist(input_pts[p], input_pts[latest_landmark]);
if (curr_dist < dist_to_L[i])
dist_to_L[i] = curr_dist;
++i;
}
- // choose the next curr_max_w
- double curr_max_dist = 0; // used for defining the furhest point from L
- for (i = 0; i < dist_to_L.size(); i++)
+ // choose the next landmark
+ curr_max_w = 0;
+ double curr_max_dist = dist_to_L[curr_max_w]; // used for defining the furthest point from L
+ for (i = 1; i < points.size(); i++)
if (dist_to_L[i] > curr_max_dist) {
curr_max_dist = dist_to_L[i];
curr_max_w = i;
}
- // If all that remains are duplicates of points already taken, stop.
- if (curr_max_dist == 0) break;
+ *output_it++ = input_pts[points[curr_max_w]];
+ *dist_it++ = dist_to_L[curr_max_w];
}
}
diff --git a/src/Subsampling/test/test_choose_n_farthest_points.cpp b/src/Subsampling/test/test_choose_n_farthest_points.cpp
index 94793295..c384c61b 100644
--- a/src/Subsampling/test/test_choose_n_farthest_points.cpp
+++ b/src/Subsampling/test/test_choose_n_farthest_points.cpp
@@ -102,11 +102,12 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_choose_farthest_point_limits, Kernel, list_of
BOOST_CHECK(distances[1] == 1);
landmarks.clear(); distances.clear();
- // Ignore duplicated points
+ // Accept duplicated points
points.emplace_back(point.begin(), point.end());
Gudhi::subsampling::choose_n_farthest_points(d, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances));
- BOOST_CHECK(landmarks.size() == 2 && distances.size() == 2);
+ BOOST_CHECK(landmarks.size() == 3 && distances.size() == 3);
BOOST_CHECK(distances[0] == std::numeric_limits<FT>::infinity());
BOOST_CHECK(distances[1] == 1);
+ BOOST_CHECK(distances[2] == 0);
landmarks.clear(); distances.clear();
}
diff --git a/src/Tangential_complex/benchmark/benchmark_tc.cpp b/src/Tangential_complex/benchmark/benchmark_tc.cpp
index e3b2a04f..6da1425f 100644
--- a/src/Tangential_complex/benchmark/benchmark_tc.cpp
+++ b/src/Tangential_complex/benchmark/benchmark_tc.cpp
@@ -704,7 +704,7 @@ int main() {
points = Gudhi::generate_points_on_torus_d<Kernel>(
num_points,
intrinsic_dim,
- param1 == "Y", // uniform
+ (param1 == "Y") ? "grid" : "random", // grid or random sample type
std::atof(param2.c_str())); // radius_noise_percentage
} else if (input == "generate_klein_bottle_3D") {
points = Gudhi::generate_points_on_klein_bottle_3D<Kernel>(
diff --git a/src/Tangential_complex/doc/Intro_tangential_complex.h b/src/Tangential_complex/doc/Intro_tangential_complex.h
index ce277185..cb8c6122 100644
--- a/src/Tangential_complex/doc/Intro_tangential_complex.h
+++ b/src/Tangential_complex/doc/Intro_tangential_complex.h
@@ -88,7 +88,7 @@ This example builds the Tangential complex of point set.
Note that the dimension of the kernel here is dynamic, which is slower, but more flexible:
the intrinsic and ambient dimensions does not have to be known at compile-time.
-\include Tangential_complex/example_basic.cpp
+\include example_basic.cpp
\section example_with_perturb Example with perturbation
@@ -97,7 +97,7 @@ by perturbing the positions of points involved in inconsistent simplices.
Note that the dimension of the kernel here is static, which is the best choice when the
dimensions are known at compile-time.
-\include Tangential_complex/example_with_perturb.cpp
+\include example_with_perturb.cpp
*/
/** @} */ // end defgroup tangential_complex
diff --git a/src/Toplex_map/benchmark/CMakeLists.txt b/src/Toplex_map/benchmark/CMakeLists.txt
index 2d58a156..6703d9d0 100644
--- a/src/Toplex_map/benchmark/CMakeLists.txt
+++ b/src/Toplex_map/benchmark/CMakeLists.txt
@@ -1,3 +1,7 @@
project(Toplex_map_benchmark)
add_executable(Toplex_map_benchmark benchmark_tm.cpp)
+
+if (TBB_FOUND)
+ target_link_libraries(Toplex_map_benchmark ${TBB_LIBRARIES})
+endif()
diff --git a/src/Witness_complex/doc/Witness_complex_doc.h b/src/Witness_complex/doc/Witness_complex_doc.h
index 202f4539..c66b106e 100644
--- a/src/Witness_complex/doc/Witness_complex_doc.h
+++ b/src/Witness_complex/doc/Witness_complex_doc.h
@@ -108,14 +108,14 @@ int main(int argc, char * const argv[]) {
Here is an example of constructing a strong witness complex filtration and computing persistence on it:
- \include Witness_complex/strong_witness_persistence.cpp
+ \include strong_witness_persistence.cpp
\section witnessexample3 Example3: Computing relaxed witness complex persistence from a distance matrix
In this example we compute the relaxed witness complex persistence from a given matrix of closest landmarks to each witness.
Each landmark is given as the couple (index, distance).
- \include Witness_complex/example_nearest_landmark_table.cpp
+ \include example_nearest_landmark_table.cpp
*/
diff --git a/src/cmake/modules/GUDHI_doxygen_target.cmake b/src/cmake/modules/GUDHI_doxygen_target.cmake
index 7a84c4e0..0f80b187 100644
--- a/src/cmake/modules/GUDHI_doxygen_target.cmake
+++ b/src/cmake/modules/GUDHI_doxygen_target.cmake
@@ -8,14 +8,47 @@ if(DOXYGEN_FOUND)
get_property(DOXYGEN_EXECUTABLE TARGET Doxygen::doxygen PROPERTY IMPORTED_LOCATION)
endif()
- add_custom_target(doxygen ${DOXYGEN_EXECUTABLE} ${GUDHI_USER_VERSION_DIR}/Doxyfile
- WORKING_DIRECTORY ${GUDHI_USER_VERSION_DIR}
- COMMENT "Generating API documentation with Doxygen in ${GUDHI_USER_VERSION_DIR}/doc/html/" VERBATIM)
-
- if(TARGET user_version)
- # In dev version, doxygen target depends on user_version target. Not existing in user version
- add_dependencies(doxygen user_version)
+ message("++ Project = ${CMAKE_PROJECT_NAME}")
+ if (CMAKE_PROJECT_NAME STREQUAL "GUDHIdev")
+ # Set Doxyfile.in variables for the developer version
+ set(GUDHI_DOXYGEN_SOURCE_PREFIX "${CMAKE_SOURCE_DIR}/src")
+ foreach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
+ if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/doc/")
+ set(GUDHI_DOXYGEN_IMAGE_PATH "${GUDHI_DOXYGEN_IMAGE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/doc/ \\ \n")
+ endif()
+ if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/example/")
+ set(GUDHI_DOXYGEN_EXAMPLE_PATH "${GUDHI_DOXYGEN_EXAMPLE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/example/ \\ \n")
+ endif()
+ if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/utilities/")
+ set(GUDHI_DOXYGEN_EXAMPLE_PATH "${GUDHI_DOXYGEN_EXAMPLE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/utilities/ \\ \n")
+ endif()
+ endforeach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
+ set(GUDHI_DOXYGEN_COMMON_DOC_PATH "${GUDHI_DOXYGEN_SOURCE_PREFIX}/common/doc")
+ set(GUDHI_DOXYGEN_UTILS_PATH "*/utilities")
+ endif()
+ if (CMAKE_PROJECT_NAME STREQUAL "GUDHI")
+ # Set Doxyfile.in variables for the user version
+ set(GUDHI_DOXYGEN_SOURCE_PREFIX "${CMAKE_SOURCE_DIR}")
+ foreach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
+ if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/doc/${GUDHI_MODULE}")
+ set(GUDHI_DOXYGEN_IMAGE_PATH "${GUDHI_DOXYGEN_IMAGE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/doc/${GUDHI_MODULE}/ \\ \n")
+ endif()
+ if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/example/${GUDHI_MODULE}")
+ set(GUDHI_DOXYGEN_EXAMPLE_PATH "${GUDHI_DOXYGEN_EXAMPLE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/example/${GUDHI_MODULE}/ \\ \n")
+ endif()
+ if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/utilities/${GUDHI_MODULE}")
+ set(GUDHI_DOXYGEN_EXAMPLE_PATH "${GUDHI_DOXYGEN_EXAMPLE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/utilities/${GUDHI_MODULE}/ \\ \n")
+ endif()
+ endforeach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
+ set(GUDHI_DOXYGEN_COMMON_DOC_PATH "${GUDHI_DOXYGEN_SOURCE_PREFIX}/doc/common")
+ set(GUDHI_DOXYGEN_UTILS_PATH "utilities/*")
endif()
+
+ configure_file(${GUDHI_DOXYGEN_SOURCE_PREFIX}/Doxyfile.in "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile" @ONLY)
+
+ add_custom_target(doxygen ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMENT "Generating API documentation with Doxygen in 'html' directory" VERBATIM)
else()
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "cpp-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
endif()
diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake
index e1566877..21c9d47b 100644
--- a/src/cmake/modules/GUDHI_third_party_libraries.cmake
+++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake
@@ -6,6 +6,7 @@ find_package(Boost 1.56.0 QUIET OPTIONAL_COMPONENTS filesystem unit_test_framewo
if(NOT Boost_VERSION)
message(FATAL_ERROR "NOTICE: This program requires Boost and will not be compiled.")
endif(NOT Boost_VERSION)
+include_directories(${Boost_INCLUDE_DIRS})
find_package(GMP)
if(GMP_FOUND)
@@ -156,6 +157,8 @@ if( PYTHONINTERP_FOUND )
find_python_module("eagerpy")
find_python_module_no_version("hnswlib")
find_python_module("tensorflow")
+ find_python_module("sphinx_paramlinks")
+ find_python_module_no_version("python_docs_theme")
endif()
if(NOT GUDHI_PYTHON_PATH)
diff --git a/src/cmake/modules/GUDHI_user_version_target.cmake b/src/cmake/modules/GUDHI_user_version_target.cmake
index e4f39aae..9e76c3d9 100644
--- a/src/cmake/modules/GUDHI_user_version_target.cmake
+++ b/src/cmake/modules/GUDHI_user_version_target.cmake
@@ -14,14 +14,7 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
make_directory ${GUDHI_USER_VERSION_DIR}
COMMENT "user_version creation in ${GUDHI_USER_VERSION_DIR}")
-foreach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
- set(GUDHI_DOXYGEN_IMAGE_PATH "${GUDHI_DOXYGEN_IMAGE_PATH} doc/${GUDHI_MODULE}/ \\ \n")
-endforeach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
-
-# Generate Doxyfile for Doxygen - cf. root CMakeLists.txt for explanation
-configure_file(${CMAKE_SOURCE_DIR}/src/Doxyfile.in "${CMAKE_CURRENT_BINARY_DIR}/src/Doxyfile" @ONLY)
-add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
- copy ${CMAKE_CURRENT_BINARY_DIR}/src/Doxyfile ${GUDHI_USER_VERSION_DIR}/Doxyfile)
+file(COPY "${CMAKE_SOURCE_DIR}/src/Doxyfile.in" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/")
# Generate bib files for Doxygen - cf. root CMakeLists.txt for explanation
string(TIMESTAMP GUDHI_VERSION_YEAR "%Y")
@@ -48,6 +41,8 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy ${CMAKE_SOURCE_DIR}/src/GUDHIConfig.cmake.in ${GUDHI_USER_VERSION_DIR}/GUDHIConfig.cmake.in)
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy ${CMAKE_SOURCE_DIR}/CMakeGUDHIVersion.txt ${GUDHI_USER_VERSION_DIR}/CMakeGUDHIVersion.txt)
+add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
+ copy ${CMAKE_SOURCE_DIR}/src/Doxyfile.in ${GUDHI_USER_VERSION_DIR}/Doxyfile.in)
# As cython generates .cpp files in source, we have to copy all except cpp files from python directory
file(GLOB_RECURSE PYTHON_FILES ${CMAKE_SOURCE_DIR}/${GUDHI_PYTHON_PATH}/*)
diff --git a/src/common/benchmark/CMakeLists.txt b/src/common/benchmark/CMakeLists.txt
index a3787d6e..26e4e6af 100644
--- a/src/common/benchmark/CMakeLists.txt
+++ b/src/common/benchmark/CMakeLists.txt
@@ -1,3 +1,7 @@
project(common_benchmark)
add_executable(Graph_simplicial_complex_benchmark Graph_simplicial_complex_benchmark.cpp)
+
+if (TBB_FOUND)
+ target_link_libraries(Graph_simplicial_complex_benchmark ${TBB_LIBRARIES})
+endif()
diff --git a/src/common/doc/examples.h b/src/common/doc/examples.h
index 474f8699..879fb96a 100644
--- a/src/common/doc/examples.h
+++ b/src/common/doc/examples.h
@@ -1,96 +1,134 @@
-// List of GUDHI examples - Doxygen needs at least a file tag to analyse comments
-// In user_version, `find . -name "*.cpp"` in example and utilities folders
+// List of GUDHI examples and utils - Doxygen needs at least a file tag to analyse comments
+// Generated from scripts/cpp_examples_for_doxygen.py
/*! @file Examples
- * @example Alpha_complex/Alpha_complex_from_off.cpp
- * @example Alpha_complex/Alpha_complex_from_points.cpp
- * @example Bottleneck_distance/bottleneck_basic_example.cpp
- * @example Bottleneck_distance/alpha_rips_persistence_bottleneck_distance.cpp
- * @example Witness_complex/example_nearest_landmark_table.cpp
- * @example Witness_complex/example_witness_complex_off.cpp
- * @example Witness_complex/example_witness_complex_sphere.cpp
- * @example Witness_complex/example_strong_witness_complex_off.cpp
- * @example Simplex_tree/mini_simplex_tree.cpp
- * @example Simplex_tree/graph_expansion_with_blocker.cpp
- * @example Simplex_tree/simple_simplex_tree.cpp
- * @example Simplex_tree/simplex_tree_from_cliques_of_graph.cpp
- * @example Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp
- * @example Simplex_tree/cech_complex_cgal_mini_sphere_3d.cpp
- * @example Persistent_cohomology/plain_homology.cpp
- * @example Persistent_cohomology/persistence_from_file.cpp
- * @example Persistent_cohomology/rips_persistence_step_by_step.cpp
- * @example Persistent_cohomology/rips_persistence_via_boundary_matrix.cpp
- * @example Persistent_cohomology/custom_persistence_sort.cpp
- * @example Persistent_cohomology/persistence_from_simple_simplex_tree.cpp
- * @example Persistent_cohomology/rips_multifield_persistence.cpp
- * @example Skeleton_blocker/Skeleton_blocker_from_simplices.cpp
- * @example Skeleton_blocker/Skeleton_blocker_iteration.cpp
- * @example Skeleton_blocker/Skeleton_blocker_link.cpp
- * @example Contraction/Garland_heckbert.cpp
- * @example Contraction/Rips_contraction.cpp
- * @example Bitmap_cubical_complex/Random_bitmap_cubical_complex.cpp
- * @example common/example_CGAL_3D_points_off_reader.cpp
- * @example common/example_vector_double_points_off_reader.cpp
- * @example common/example_CGAL_points_off_reader.cpp
- * @example Rips_complex/example_one_skeleton_rips_from_distance_matrix.cpp
- * @example Rips_complex/example_one_skeleton_rips_from_points.cpp
- * @example Rips_complex/example_rips_complex_from_csv_distance_matrix_file.cpp
- * @example Rips_complex/example_rips_complex_from_off_file.cpp
- * @example Persistence_representations/persistence_intervals.cpp
- * @example Persistence_representations/persistence_vectors.cpp
- * @example Persistence_representations/persistence_heat_maps.cpp
- * @example Persistence_representations/persistence_landscape_on_grid.cpp
- * @example Persistence_representations/persistence_landscape.cpp
- * @example Tangential_complex/example_basic.cpp
- * @example Tangential_complex/example_with_perturb.cpp
- * @example Subsampling/example_custom_distance.cpp
- * @example Subsampling/example_choose_n_farthest_points.cpp
- * @example Subsampling/example_sparsify_point_set.cpp
- * @example Subsampling/example_pick_n_random_points.cpp
- * @example Nerve_GIC/CoordGIC.cpp
- * @example Nerve_GIC/Nerve.cpp
- * @example Nerve_GIC/FuncGIC.cpp
- * @example Nerve_GIC/VoronoiGIC.cpp
- * @example Spatial_searching/example_spatial_searching.cpp
- * @example Alpha_complex/alpha_complex_3d_persistence.cpp
- * @example Alpha_complex/alpha_complex_persistence.cpp
- * @example Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp
- * @example Bottleneck_distance/bottleneck_distance.cpp
- * @example Witness_complex/weak_witness_persistence.cpp
- * @example Witness_complex/strong_witness_persistence.cpp
- * @example Bitmap_cubical_complex/cubical_complex_persistence.cpp
- * @example Bitmap_cubical_complex/periodic_cubical_complex_persistence.cpp
- * @example common/off_file_from_shape_generator.cpp
- * @example Rips_complex/rips_distance_matrix_persistence.cpp
- * @example Rips_complex/rips_persistence.cpp
- * @example Persistence_representations/persistence_landscapes_on_grid/create_landscapes_on_grid.cpp
- * @example Persistence_representations/persistence_landscapes_on_grid/plot_landscapes_on_grid.cpp
- * @example Persistence_representations/persistence_landscapes_on_grid/compute_scalar_product_of_landscapes_on_grid.cpp
- * @example Persistence_representations/persistence_landscapes_on_grid/compute_distance_of_landscapes_on_grid.cpp
- * @example Persistence_representations/persistence_landscapes_on_grid/average_landscapes_on_grid.cpp
- * @example Persistence_representations/persistence_intervals/compute_birth_death_range_in_persistence_diagram.cpp
- * @example Persistence_representations/persistence_intervals/compute_number_of_dominant_intervals.cpp
- * @example Persistence_representations/persistence_intervals/plot_persistence_Betti_numbers.cpp
- * @example Persistence_representations/persistence_intervals/plot_persistence_intervals.cpp
- * @example Persistence_representations/persistence_intervals/plot_histogram_of_intervals_lengths.cpp
- * @example Persistence_representations/persistence_intervals/compute_bottleneck_distance.cpp
- * @example Persistence_representations/persistence_heat_maps/create_pssk.cpp
- * @example Persistence_representations/persistence_heat_maps/create_p_h_m_weighted_by_arctan_of_their_persistence.cpp
- * @example Persistence_representations/persistence_heat_maps/create_p_h_m_weighted_by_squared_diag_distance.cpp
- * @example Persistence_representations/persistence_heat_maps/compute_distance_of_persistence_heat_maps.cpp
- * @example Persistence_representations/persistence_heat_maps/compute_scalar_product_of_persistence_heat_maps.cpp
- * @example Persistence_representations/persistence_heat_maps/create_p_h_m_weighted_by_distance_from_diagonal.cpp
- * @example Persistence_representations/persistence_heat_maps/average_persistence_heat_maps.cpp
- * @example Persistence_representations/persistence_heat_maps/plot_persistence_heat_map.cpp
- * @example Persistence_representations/persistence_heat_maps/create_persistence_heat_maps.cpp
- * @example Persistence_representations/persistence_vectors/plot_persistence_vectors.cpp
- * @example Persistence_representations/persistence_vectors/compute_distance_of_persistence_vectors.cpp
- * @example Persistence_representations/persistence_vectors/average_persistence_vectors.cpp
- * @example Persistence_representations/persistence_vectors/create_persistence_vectors.cpp
- * @example Persistence_representations/persistence_vectors/compute_scalar_product_of_persistence_vectors.cpp
- * @example Persistence_representations/persistence_landscapes/average_landscapes.cpp
- * @example Persistence_representations/persistence_landscapes/compute_scalar_product_of_landscapes.cpp
- * @example Persistence_representations/persistence_landscapes/create_landscapes.cpp
- * @example Persistence_representations/persistence_landscapes/compute_distance_of_landscapes.cpp
- * @example Persistence_representations/persistence_landscapes/plot_landscapes.cpp
+ * \section Witness_complex_example_section Witness_complex
+ * @example strong_witness_persistence.cpp
+ * @example weak_witness_persistence.cpp
+ * @example example_witness_complex_off.cpp
+ * @example example_strong_witness_complex_off.cpp
+ * @example example_nearest_landmark_table.cpp
+ * @example example_witness_complex_sphere.cpp
+ * \section Contraction_example_section Contraction
+ * @example Rips_contraction.cpp
+ * @example Garland_heckbert.cpp
+ * \section Simplex_tree_example_section Simplex_tree
+ * @example mini_simplex_tree.cpp
+ * @example cech_complex_cgal_mini_sphere_3d.cpp
+ * @example graph_expansion_with_blocker.cpp
+ * @example simple_simplex_tree.cpp
+ * @example simplex_tree_from_cliques_of_graph.cpp
+ * @example example_alpha_shapes_3_simplex_tree_from_off_file.cpp
+ * \section Persistent_cohomology_example_section Persistent_cohomology
+ * @example custom_persistence_sort.cpp
+ * @example rips_persistence_step_by_step.cpp
+ * @example persistence_from_file.cpp
+ * @example rips_persistence_via_boundary_matrix.cpp
+ * @example plain_homology.cpp
+ * @example rips_multifield_persistence.cpp
+ * @example persistence_from_simple_simplex_tree.cpp
+ * \section Subsampling_example_section Subsampling
+ * @example example_sparsify_point_set.cpp
+ * @example example_choose_n_farthest_points.cpp
+ * @example example_custom_distance.cpp
+ * @example example_pick_n_random_points.cpp
+ * \section Toplex_map_example_section Toplex_map
+ * @example simple_toplex_map.cpp
+ * \section Collapse_example_section Collapse
+ * @example distance_matrix_edge_collapse_rips_persistence.cpp
+ * @example point_cloud_edge_collapse_rips_persistence.cpp
+ * @example edge_collapse_conserve_persistence.cpp
+ * @example edge_collapse_basic_example.cpp
+ * \section Cech_complex_example_section Cech_complex
+ * @example cech_persistence.cpp
+ * @example cech_complex_step_by_step.cpp
+ * @example cech_complex_example_from_points.cpp
+ * \section Bitmap_cubical_complex_example_section Bitmap_cubical_complex
+ * @example periodic_cubical_complex_persistence.cpp
+ * @example cubical_complex_persistence.cpp
+ * @example Random_bitmap_cubical_complex.cpp
+ * \section Coxeter_triangulation_example_section Coxeter_triangulation
+ * @example cell_complex_from_basic_circle_manifold.cpp
+ * @example manifold_tracing_flat_torus_with_boundary.cpp
+ * @example manifold_tracing_custom_function.cpp
+ * \section Nerve_GIC_example_section Nerve_GIC
+ * @example VoronoiGIC.cpp
+ * @example Nerve.cpp
+ * @example CoordGIC.cpp
+ * @example FuncGIC.cpp
+ * \section Tangential_complex_example_section Tangential_complex
+ * @example example_basic.cpp
+ * @example example_with_perturb.cpp
+ * \section Persistence_representations_example_section Persistence_representations
+ * @example persistence_vectors/create_persistence_vectors.cpp
+ * @example persistence_vectors/compute_scalar_product_of_persistence_vectors.cpp
+ * @example persistence_vectors/plot_persistence_vectors.cpp
+ * @example persistence_vectors/average_persistence_vectors.cpp
+ * @example persistence_vectors/compute_distance_of_persistence_vectors.cpp
+ * @example persistence_landscapes_on_grid/average_landscapes_on_grid.cpp
+ * @example persistence_landscapes_on_grid/create_landscapes_on_grid.cpp
+ * @example persistence_landscapes_on_grid/compute_distance_of_landscapes_on_grid.cpp
+ * @example persistence_landscapes_on_grid/compute_scalar_product_of_landscapes_on_grid.cpp
+ * @example persistence_landscapes_on_grid/plot_landscapes_on_grid.cpp
+ * @example persistence_intervals/compute_birth_death_range_in_persistence_diagram.cpp
+ * @example persistence_intervals/plot_persistence_Betti_numbers.cpp
+ * @example persistence_intervals/compute_bottleneck_distance.cpp
+ * @example persistence_intervals/compute_number_of_dominant_intervals.cpp
+ * @example persistence_intervals/plot_histogram_of_intervals_lengths.cpp
+ * @example persistence_intervals/plot_persistence_intervals.cpp
+ * @example persistence_heat_maps/compute_distance_of_persistence_heat_maps.cpp
+ * @example persistence_heat_maps/create_pssk.cpp
+ * @example persistence_heat_maps/create_p_h_m_weighted_by_arctan_of_their_persistence.cpp
+ * @example persistence_heat_maps/create_p_h_m_weighted_by_distance_from_diagonal.cpp
+ * @example persistence_heat_maps/create_p_h_m_weighted_by_squared_diag_distance.cpp
+ * @example persistence_heat_maps/compute_scalar_product_of_persistence_heat_maps.cpp
+ * @example persistence_heat_maps/plot_persistence_heat_map.cpp
+ * @example persistence_heat_maps/create_persistence_heat_maps.cpp
+ * @example persistence_heat_maps/average_persistence_heat_maps.cpp
+ * @example persistence_landscapes/compute_distance_of_landscapes.cpp
+ * @example persistence_landscapes/compute_scalar_product_of_landscapes.cpp
+ * @example persistence_landscapes/average_landscapes.cpp
+ * @example persistence_landscapes/plot_landscapes.cpp
+ * @example persistence_landscapes/create_landscapes.cpp
+ * @example persistence_landscape_on_grid.cpp
+ * @example persistence_intervals.cpp
+ * @example persistence_landscape.cpp
+ * @example persistence_vectors.cpp
+ * @example sliced_wasserstein.cpp
+ * @example persistence_heat_maps.cpp
+ * \section Spatial_searching_example_section Spatial_searching
+ * @example example_spatial_searching.cpp
+ * \section Bottleneck_distance_example_section Bottleneck_distance
+ * @example bottleneck_distance.cpp
+ * @example bottleneck_basic_example.cpp
+ * @example alpha_rips_persistence_bottleneck_distance.cpp
+ * \section common_example_section common
+ * @example off_file_from_shape_generator.cpp
+ * @example example_vector_double_points_off_reader.cpp
+ * @example example_CGAL_points_off_reader.cpp
+ * @example example_CGAL_3D_points_off_reader.cpp
+ * \section Alpha_complex_example_section Alpha_complex
+ * @example alpha_complex_3d_persistence.cpp
+ * @example alpha_complex_persistence.cpp
+ * @example Fast_alpha_complex_from_off.cpp
+ * @example Alpha_complex_3d_from_points.cpp
+ * @example Alpha_complex_from_off.cpp
+ * @example Weighted_alpha_complex_3d_from_points.cpp
+ * @example Weighted_alpha_complex_from_points.cpp
+ * @example Alpha_complex_from_points.cpp
+ * \section Skeleton_blocker_example_section Skeleton_blocker
+ * @example Skeleton_blocker_from_simplices.cpp
+ * @example Skeleton_blocker_link.cpp
+ * @example Skeleton_blocker_iteration.cpp
+ * \section Rips_complex_example_section Rips_complex
+ * @example rips_persistence.cpp
+ * @example rips_correlation_matrix_persistence.cpp
+ * @example sparse_rips_persistence.cpp
+ * @example rips_distance_matrix_persistence.cpp
+ * @example example_sparse_rips.cpp
+ * @example example_rips_complex_from_csv_distance_matrix_file.cpp
+ * @example example_one_skeleton_rips_from_correlation_matrix.cpp
+ * @example example_one_skeleton_rips_from_distance_matrix.cpp
+ * @example example_one_skeleton_rips_from_points.cpp
+ * @example example_rips_complex_from_off_file.cpp
*/
diff --git a/src/common/doc/header.html b/src/common/doc/header.html
index 9da20bbc..7c20478b 100644
--- a/src/common/doc/header.html
+++ b/src/common/doc/header.html
@@ -49,6 +49,7 @@ $extrastylesheet
<li><a href="/relatedprojects/">Related projects</a></li>
<li><a href="/theyaretalkingaboutus/">They are talking about us</a></li>
<li><a href="/inaction/">GUDHI in action</a></li>
+ <li><a href="/etymology/">Etymology</a></li>
</ul>
</li>
<li class="divider"></li>
diff --git a/src/common/doc/installation.h b/src/common/doc/installation.h
index c2e63a24..ef668dfb 100644
--- a/src/common/doc/installation.h
+++ b/src/common/doc/installation.h
@@ -30,6 +30,10 @@ make \endverbatim
* This action may require to be in the sudoer or administrator of the machine in function of the operating system and
* of <a href="https://cmake.org/cmake/help/latest/variable/CMAKE_INSTALL_PREFIX.html">CMAKE_INSTALL_PREFIX</a>.
*
+ * \note Python module will be compiled by the `make` command, but `make install` will not install it. Please refer to
+ * the <a href="https://gudhi.inria.fr/python/latest/installation.html#gudhi-python-module-installation">Python
+ * module installation documentation</a>.
+ *
* \subsection testsuites Test suites
* To test your build, run the following command in a terminal:
* \verbatim make test \endverbatim
@@ -40,11 +44,8 @@ make \endverbatim
* \subsection documentationgeneration Documentation
* To generate the documentation, <a target="_blank" href="http://www.doxygen.org/">Doxygen</a> is required.
* Run the following command in a terminal:
-\verbatim
-make doxygen
-# Documentation will be generated in the folder YYYY-MM-DD-hh-mm-ss_GUDHI_X.Y.Z/doc/html/
-# You can customize the directory name by calling `cmake -DUSER_VERSION_DIR=/my/custom/folder`
-\endverbatim
+ * \verbatim make doxygen \endverbatim
+ * Documentation will be generated in a folder named <code>html</code>.
*
* \subsection helloworld Hello world !
* The <a target="_blank" href="https://github.com/GUDHI/hello-gudhi-world">Hello world for GUDHI</a>
@@ -57,7 +58,7 @@ make doxygen
*
* The following example requires the <a target="_blank" href="http://gmplib.org/">GNU Multiple Precision Arithmetic
* Library</a> (GMP) and will not be built if GMP is not installed:
- * \li <a href="_persistent_cohomology_2rips_multifield_persistence_8cpp-example.html">
+ * \li <a href="rips_multifield_persistence_8cpp-example.html">
* Persistent_cohomology/rips_multifield_persistence.cpp</a>
*
* Having GMP version 4.2 or higher installed is recommended.
@@ -75,56 +76,58 @@ make doxygen
*
* The following examples/utilities require the <a target="_blank" href="http://www.cgal.org/">Computational Geometry Algorithms
* Library</a> (CGAL \cite cgal:eb-19b) and will not be built if CGAL version 4.11.0 or higher is not installed:
- * \li <a href="_simplex_tree_2example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
+ * \li <a href="example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
* Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp</a>
- * \li <a href="_witness_complex_2strong_witness_persistence_8cpp-example.html">
+ * \li <a href="strong_witness_persistence_8cpp-example.html">
* Witness_complex/strong_witness_persistence.cpp</a>
- * \li <a href="_witness_complex_2weak_witness_persistence_8cpp-example.html">
+ * \li <a href="weak_witness_persistence_8cpp-example.html">
* Witness_complex/weak_witness_persistence.cpp</a>
- * \li <a href="_witness_complex_2example_strong_witness_complex_off_8cpp-example.html">
+ * \li <a href="example_strong_witness_complex_off_8cpp-example.html">
* Witness_complex/example_strong_witness_complex_off.cpp</a>
- * \li <a href="_witness_complex_2example_witness_complex_off_8cpp-example.html">
+ * \li <a href="example_witness_complex_off_8cpp-example.html">
* Witness_complex/example_witness_complex_off.cpp</a>
- * \li <a href="_witness_complex_2example_witness_complex_sphere_8cpp-example.html">
+ * \li <a href="example_witness_complex_sphere_8cpp-example.html">
* Witness_complex/example_witness_complex_sphere.cpp</a>
- * \li <a href="_alpha_complex_2_alpha_complex_from_off_8cpp-example.html">
+ * \li <a href="_alpha_complex_from_off_8cpp-example.html">
* Alpha_complex/Alpha_complex_from_off.cpp</a>
- * \li <a href="_alpha_complex_2_alpha_complex_from_points_8cpp-example.html">
+ * \li <a href="_alpha_complex_from_points_8cpp-example.html">
* Alpha_complex/Alpha_complex_from_points.cpp</a>
- * \li <a href="_alpha_complex_2alpha_complex_persistence_8cpp-example.html">
+ * \li <a href="alpha_complex_persistence_8cpp-example.html">
* Alpha_complex/alpha_complex_persistence.cpp</a>
- * \li <a href="_persistent_cohomology_2custom_persistence_sort_8cpp-example.html">
+ * \li <a href="custom_persistence_sort_8cpp-example.html">
* Persistent_cohomology/custom_persistence_sort.cpp</a>
- * \li <a href="_bottleneck_distance_2alpha_rips_persistence_bottleneck_distance_8cpp-example.html">
+ * \li <a href="alpha_rips_persistence_bottleneck_distance_8cpp-example.html">
* Bottleneck_distance/alpha_rips_persistence_bottleneck_distance.cpp.cpp</a>
- * \li <a href="_bottleneck_distance_2bottleneck_basic_example_8cpp-example.html">
+ * \li <a href="bottleneck_basic_example_8cpp-example.html">
* Bottleneck_distance/bottleneck_basic_example.cpp</a>
- * \li <a href="_bottleneck_distance_2bottleneck_read_file_8cpp-example.html">
+ * \li <a href="bottleneck_distance_8cpp-example.html">
* Bottleneck_distance/bottleneck_distance.cpp</a>
- * \li <a href="_nerve__g_i_c_2_coord_g_i_c_8cpp-example.html">
+ * \li <a href="_coord_g_i_c_8cpp-example.html">
* Nerve_GIC/CoordGIC.cpp</a>
- * \li <a href="_nerve__g_i_c_2_func_g_i_c_8cpp-example.html">
+ * \li <a href="_func_g_i_c_8cpp-example.html">
* Nerve_GIC/FuncGIC.cpp</a>
- * \li <a href="_nerve__g_i_c_2_nerve_8cpp-example.html">
+ * \li <a href="_nerve_8cpp-example.html">
* Nerve_GIC/Nerve.cpp</a>
- * \li <a href="_nerve__g_i_c_2_voronoi_g_i_c_8cpp-example.html">
+ * \li <a href="_voronoi_g_i_c_8cpp-example.html">
* Nerve_GIC/VoronoiGIC.cpp</a>
- * \li <a href="_spatial_searching_2example_spatial_searching_8cpp-example.html">
+ * \li <a href="example_spatial_searching_8cpp-example.html">
* Spatial_searching/example_spatial_searching.cpp</a>
- * \li <a href="_subsampling_2example_choose_n_farthest_points_8cpp-example.html">
+ * \li <a href="example_choose_n_farthest_points_8cpp-example.html">
* Subsampling/example_choose_n_farthest_points.cpp</a>
- * \li <a href="_subsampling_2example_pick_n_random_points_8cpp-example.html">
+ * \li <a href="example_pick_n_random_points_8cpp-example.html">
* Subsampling/example_pick_n_random_points.cpp</a>
- * \li <a href="_subsampling_2example_sparsify_point_set_8cpp-example.html">
+ * \li <a href="example_sparsify_point_set_8cpp-example.html">
* Subsampling/example_sparsify_point_set.cpp</a>
- * \li <a href="_tangential_complex_2example_basic_8cpp-example.html">
+ * \li <a href="example_basic_8cpp-example.html">
* Tangential_complex/example_basic.cpp</a>
- * \li <a href="_tangential_complex_2example_with_perturb_8cpp-example.html">
+ * \li <a href="example_with_perturb_8cpp-example.html">
* Tangential_complex/example_with_perturb.cpp</a>
- * \li <a href="_alpha_complex_2_weighted_alpha_complex_3d_from_points_8cpp-example.html">
+ * \li <a href="_weighted_alpha_complex_3d_from_points_8cpp-example.html">
* Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp</a>
- * \li <a href="_alpha_complex_2alpha_complex_3d_persistence_8cpp-example.html">
+ * \li <a href="alpha_complex_3d_persistence_8cpp-example.html">
* Alpha_complex/alpha_complex_3d_persistence.cpp</a>
+ * \li <a href="_coxeter_triangulation_2manifold_tracing_flat_torus_with_boundary_8cpp-example.html">
+ * Coxeter_triangulation/manifold_tracing_flat_torus_with_boundary.cpp</a>
*
* \subsection eigen Eigen
* Some GUDHI modules (cf. \ref main_page "modules list"), and few examples require
@@ -133,42 +136,48 @@ make doxygen
*
* The following examples/utilities require the <a target="_blank" href="http://eigen.tuxfamily.org/">Eigen</a> and will not be
* built if Eigen is not installed:
- * \li <a href="_alpha_complex_2_alpha_complex_from_off_8cpp-example.html">
+ * \li <a href="_alpha_complex_from_off_8cpp-example.html">
* Alpha_complex/Alpha_complex_from_off.cpp</a>
- * \li <a href="_alpha_complex_2_alpha_complex_from_points_8cpp-example.html">
+ * \li <a href="_alpha_complex_from_points_8cpp-example.html">
* Alpha_complex/Alpha_complex_from_points.cpp</a>
- * \li <a href="_alpha_complex_2alpha_complex_persistence_8cpp-example.html">
+ * \li <a href="alpha_complex_persistence_8cpp-example.html">
* Alpha_complex/alpha_complex_persistence.cpp</a>
- * \li <a href="_alpha_complex_2alpha_complex_3d_persistence_8cpp-example.html">
+ * \li <a href="alpha_complex_3d_persistence_8cpp-example.html">
* Alpha_complex/alpha_complex_3d_persistence.cpp</a>
- * \li <a href="_alpha_complex_2_weighted_alpha_complex_3d_from_points_8cpp-example.html">
+ * \li <a href="_weighted_alpha_complex_3d_from_points_8cpp-example.html">
* Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp</a>
- * \li <a href="_bottleneck_distance_2alpha_rips_persistence_bottleneck_distance_8cpp-example.html">
+ * \li <a href="alpha_rips_persistence_bottleneck_distance_8cpp-example.html">
* Bottleneck_distance/alpha_rips_persistence_bottleneck_distance.cpp.cpp</a>
- * \li <a href="_persistent_cohomology_2custom_persistence_sort_8cpp-example.html">
+ * \li <a href="custom_persistence_sort_8cpp-example.html">
* Persistent_cohomology/custom_persistence_sort.cpp</a>
- * \li <a href="_spatial_searching_2example_spatial_searching_8cpp-example.html">
+ * \li <a href="example_spatial_searching_8cpp-example.html">
* Spatial_searching/example_spatial_searching.cpp</a>
- * \li <a href="_subsampling_2example_choose_n_farthest_points_8cpp-example.html">
+ * \li <a href="example_choose_n_farthest_points_8cpp-example.html">
* Subsampling/example_choose_n_farthest_points.cpp</a>
- * \li <a href="_subsampling_2example_pick_n_random_points_8cpp-example.html">
+ * \li <a href="example_pick_n_random_points_8cpp-example.html">
* Subsampling/example_pick_n_random_points.cpp</a>
- * \li <a href="_subsampling_2example_sparsify_point_set_8cpp-example.html">
+ * \li <a href="example_sparsify_point_set_8cpp-example.html">
* Subsampling/example_sparsify_point_set.cpp</a>
- * \li <a href="_tangential_complex_2example_basic_8cpp-example.html">
+ * \li <a href="example_basic_8cpp-example.html">
* Tangential_complex/example_basic.cpp</a>
- * \li <a href="_tangential_complex_2example_with_perturb_8cpp-example.html">
+ * \li <a href="example_with_perturb_8cpp-example.html">
* Tangential_complex/example_with_perturb.cpp</a>
- * \li <a href="_witness_complex_2strong_witness_persistence_8cpp-example.html">
+ * \li <a href="strong_witness_persistence_8cpp-example.html">
* Witness_complex/strong_witness_persistence.cpp</a>
- * \li <a href="_witness_complex_2weak_witness_persistence_8cpp-example.html">
+ * \li <a href="weak_witness_persistence_8cpp-example.html">
* Witness_complex/weak_witness_persistence.cpp</a>
- * \li <a href="_witness_complex_2example_strong_witness_complex_off_8cpp-example.html">
+ * \li <a href="example_strong_witness_complex_off_8cpp-example.html">
* Witness_complex/example_strong_witness_complex_off.cpp</a>
- * \li <a href="_witness_complex_2example_witness_complex_off_8cpp-example.html">
+ * \li <a href="example_witness_complex_off_8cpp-example.html">
* Witness_complex/example_witness_complex_off.cpp</a>
- * \li <a href="_witness_complex_2example_witness_complex_sphere_8cpp-example.html">
+ * \li <a href="example_witness_complex_sphere_8cpp-example.html">
* Witness_complex/example_witness_complex_sphere.cpp</a>
+ * \li <a href="_coxeter_triangulation_2cell_complex_from_basic_circle_manifold_8cpp-example.html">
+ * Coxeter_triangulation/cell_complex_from_basic_circle_manifold.cpp</a>
+ * \li <a href="_coxeter_triangulation_2manifold_tracing_custom_function_8cpp-example.html">
+ * Coxeter_triangulation/manifold_tracing_custom_function.cpp</a>
+ * \li <a href="_coxeter_triangulation_2manifold_tracing_flat_torus_with_boundary_8cpp-example.html">
+ * Coxeter_triangulation/manifold_tracing_flat_torus_with_boundary.cpp</a>
*
* \subsection tbb Threading Building Blocks
* <a target="_blank" href="https://www.threadingbuildingblocks.org/">Intel&reg; TBB</a> lets you easily write parallel
@@ -178,74 +187,76 @@ make doxygen
* Having Intel&reg; TBB installed is recommended to parallelize and accelerate some GUDHI computations.
*
* The following examples/utilities are using Intel&reg; TBB if installed:
- * \li <a href="_alpha_complex_2_alpha_complex_from_off_8cpp-example.html">
+ * \li <a href="_alpha_complex_from_off_8cpp-example.html">
* Alpha_complex/Alpha_complex_from_off.cpp</a>
- * \li <a href="_alpha_complex_2_alpha_complex_from_points_8cpp-example.html">
+ * \li <a href="_alpha_complex_from_points_8cpp-example.html">
* Alpha_complex/Alpha_complex_from_points.cpp</a>
- * \li <a href="_alpha_complex_2alpha_complex_3d_persistence_8cpp-example.html">
+ * \li <a href="alpha_complex_3d_persistence_8cpp-example.html">
* Alpha_complex/alpha_complex_3d_persistence.cpp</a>
- * \li <a href="_alpha_complex_2alpha_complex_persistence_8cpp-example.html">
+ * \li <a href="alpha_complex_persistence_8cpp-example.html">
* Alpha_complex/alpha_complex_persistence.cpp</a>
- * \li <a href="_bitmap_cubical_complex_2_bitmap_cubical_complex_8cpp-example.html">
+ * \li <a href="cubical_complex_persistence_8cpp-example.html">
* Bitmap_cubical_complex/cubical_complex_persistence.cpp</a>
- * \li <a href="_bitmap_cubical_complex_2_bitmap_cubical_complex_periodic_boundary_conditions_8cpp-example.html">
+ * \li <a href="periodic_cubical_complex_persistence_8cpp-example.html">
* Bitmap_cubical_complex/periodic_cubical_complex_persistence.cpp</a>
- * \li <a href="_bitmap_cubical_complex_2_random_bitmap_cubical_complex_8cpp-example.html">
+ * \li <a href="_random_bitmap_cubical_complex_8cpp-example.html">
* Bitmap_cubical_complex/Random_bitmap_cubical_complex.cpp</a>
- * \li <a href="_nerve__g_i_c_2_coord_g_i_c_8cpp-example.html">
+ * \li <a href="_coord_g_i_c_8cpp-example.html">
* Nerve_GIC/CoordGIC.cpp</a>
- * \li <a href="_nerve__g_i_c_2_func_g_i_c_8cpp-example.html">
+ * \li <a href="_func_g_i_c_8cpp-example.html">
* Nerve_GIC/FuncGIC.cpp</a>
- * \li <a href="_nerve__g_i_c_2_nerve_8cpp-example.html">
+ * \li <a href="_nerve_8cpp-example.html">
* Nerve_GIC/Nerve.cpp</a>
- * \li <a href="_nerve__g_i_c_2_voronoi_g_i_c_8cpp-example.html">
+ * \li <a href="_voronoi_g_i_c_8cpp-example.html">
* Nerve_GIC/VoronoiGIC.cpp</a>
- * \li <a href="_simplex_tree_2simple_simplex_tree_8cpp-example.html">
+ * \li <a href="simple_simplex_tree_8cpp-example.html">
* Simplex_tree/simple_simplex_tree.cpp</a>
- * \li <a href="_simplex_tree_2example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
+ * \li <a href="example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
* Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp</a>
- * \li <a href="_simplex_tree_2simplex_tree_from_cliques_of_graph_8cpp-example.html">
+ * \li <a href="simplex_tree_from_cliques_of_graph_8cpp-example.html">
* Simplex_tree/simplex_tree_from_cliques_of_graph.cpp</a>
- * \li <a href="_simplex_tree_2graph_expansion_with_blocker_8cpp-example.html">
+ * \li <a href="graph_expansion_with_blocker_8cpp-example.html">
* Simplex_tree/graph_expansion_with_blocker.cpp</a>
- * \li <a href="_persistent_cohomology_2alpha_complex_3d_persistence_8cpp-example.html">
+ * \li <a href="alpha_complex_3d_persistence_8cpp-example.html">
* Persistent_cohomology/alpha_complex_3d_persistence.cpp</a>
- * \li <a href="_persistent_cohomology_2alpha_complex_persistence_8cpp-example.html">
+ * \li <a href="alpha_complex_persistence_8cpp-example.html">
* Persistent_cohomology/alpha_complex_persistence.cpp</a>
- * \li <a href="_persistent_cohomology_2rips_persistence_via_boundary_matrix_8cpp-example.html">
+ * \li <a href="rips_persistence_via_boundary_matrix_8cpp-example.html">
* Persistent_cohomology/rips_persistence_via_boundary_matrix.cpp</a>
- * \li <a href="_persistent_cohomology_2persistence_from_file_8cpp-example.html">
+ * \li <a href="persistence_from_file_8cpp-example.html">
* Persistent_cohomology/persistence_from_file.cpp</a>
- * \li <a href="_persistent_cohomology_2persistence_from_simple_simplex_tree_8cpp-example.html">
+ * \li <a href="persistence_from_simple_simplex_tree_8cpp-example.html">
* Persistent_cohomology/persistence_from_simple_simplex_tree.cpp</a>
- * \li <a href="_persistent_cohomology_2plain_homology_8cpp-example.html">
+ * \li <a href="plain_homology_8cpp-example.html">
* Persistent_cohomology/plain_homology.cpp</a>
- * \li <a href="_persistent_cohomology_2rips_multifield_persistence_8cpp-example.html">
+ * \li <a href="rips_multifield_persistence_8cpp-example.html">
* Persistent_cohomology/rips_multifield_persistence.cpp</a>
- * \li <a href="_persistent_cohomology_2rips_persistence_step_by_step_8cpp-example.html">
+ * \li <a href="rips_persistence_step_by_step_8cpp-example.html">
* Persistent_cohomology/rips_persistence_step_by_step.cpp</a>
- * \li <a href="_persistent_cohomology_2custom_persistence_sort_8cpp-example.html">
+ * \li <a href="custom_persistence_sort_8cpp-example.html">
* Persistent_cohomology/custom_persistence_sort.cpp</a>
- * \li <a href="_rips_complex_2example_one_skeleton_rips_from_points_8cpp-example.html">
+ * \li <a href="example_one_skeleton_rips_from_points_8cpp-example.html">
* Rips_complex/example_one_skeleton_rips_from_points.cpp</a>
- * \li <a href="_rips_complex_2example_rips_complex_from_off_file_8cpp-example.html">
+ * \li <a href="example_rips_complex_from_off_file_8cpp-example.html">
* Rips_complex/example_rips_complex_from_off_file.cpp</a>
- * \li <a href="_rips_complex_2rips_distance_matrix_persistence_8cpp-example.html">
+ * \li <a href="rips_distance_matrix_persistence_8cpp-example.html">
* Rips_complex/rips_distance_matrix_persistence.cpp</a>
- * \li <a href="_rips_complex_2rips_persistence_8cpp-example.html">
+ * \li <a href="rips_persistence_8cpp-example.html">
* Rips_complex/rips_persistence.cpp</a>
- * \li <a href="_witness_complex_2strong_witness_persistence_8cpp-example.html">
+ * \li <a href="strong_witness_persistence_8cpp-example.html">
* Witness_complex/strong_witness_persistence.cpp</a>
- * \li <a href="_witness_complex_2weak_witness_persistence_8cpp-example.html">
+ * \li <a href="weak_witness_persistence_8cpp-example.html">
* Witness_complex/weak_witness_persistence.cpp</a>
- * \li <a href="_witness_complex_2example_nearest_landmark_table_8cpp-example.html">
+ * \li <a href="example_nearest_landmark_table_8cpp-example.html">
* Witness_complex/example_nearest_landmark_table.cpp</a>
*
* \section Contributions Bug reports and contributions
- * Please help us improving the quality of the GUDHI library. You may report bugs or suggestions to:
- * \verbatim Contact: gudhi-users@lists.gforge.inria.fr \endverbatim
+ * Please help us improving the quality of the GUDHI library.
+ * You may <a href="https://github.com/GUDHI/gudhi-devel/issues">report bugs</a> or
+ * <a href="https://gudhi.inria.fr/contact/">contact us</a> for any suggestions.
*
- * GUDHI is open to external contributions. If you want to join our development team, please contact us.
+ * GUDHI is open to external contributions. If you want to join our development team, please take some time to read our
+ * <a href="https://github.com/GUDHI/gudhi-devel/blob/master/.github/CONTRIBUTING.md">contributing guide</a>.
*
*/
diff --git a/src/common/doc/main_page.md b/src/common/doc/main_page.md
index e19af537..17354179 100644
--- a/src/common/doc/main_page.md
+++ b/src/common/doc/main_page.md
@@ -135,7 +135,7 @@
</tr>
</table>
-## Filtrations and reconstructions {#FiltrationsReconstructions}
+## Filtrations
### Alpha complex
<table>
@@ -298,6 +298,32 @@
</tr>
</table>
+## Manifold reconstructions
+### Coxeter triangulation
+
+<table>
+ <tr>
+ <td width="35%" rowspan=2>
+ \image html "manifold_tracing_on_custom_function_example.png"
+ </td>
+ <td width="50%">
+ Coxeter triangulation module is designed to provide tools for constructing a piecewise-linear approximation of an
+ \f$m\f$-dimensional smooth manifold embedded in \f$ \mathbb{R}^d \f$ using an ambient triangulation.
+ </td>
+ <td width="15%">
+ <b>Author:</b> Siargey Kachanovich<br>
+ <b>Introduced in:</b> GUDHI 3.4.0<br>
+ <b>Copyright:</b> MIT [(LGPL v3)](../../licensing/)<br>
+ <b>Requires:</b> \ref eigen &ge; 3.1.0
+ </td>
+ </tr>
+ <tr>
+ <td colspan=2 height="25">
+ <b>User manual:</b> \ref coxeter_triangulation
+ </td>
+ </tr>
+</table>
+
### Tangential complex
<table>
diff --git a/src/common/include/gudhi/Points_3D_off_io.h b/src/common/include/gudhi/Points_3D_off_io.h
index 39b79c96..4f74fd4b 100644
--- a/src/common/include/gudhi/Points_3D_off_io.h
+++ b/src/common/include/gudhi/Points_3D_off_io.h
@@ -125,7 +125,7 @@ class Points_3D_off_visitor_reader {
* This example loads points from an OFF file and builds a vector of CGAL points in dimension 3.
* Then, it is asked to display the points.
*
- * @include common/example_CGAL_3D_points_off_reader.cpp
+ * @include example_CGAL_3D_points_off_reader.cpp
*
* When launching:
*
@@ -134,7 +134,7 @@ class Points_3D_off_visitor_reader {
*
* the program output is:
*
- * @include common/cgal3Doffreader_result.txt
+ * @include cgal3Doffreader_result.txt
*/
template<typename Point_3>
class Points_3D_off_reader {
diff --git a/src/common/include/gudhi/Points_off_io.h b/src/common/include/gudhi/Points_off_io.h
index 9dc40568..3aa8afd8 100644
--- a/src/common/include/gudhi/Points_off_io.h
+++ b/src/common/include/gudhi/Points_off_io.h
@@ -107,7 +107,7 @@ class Points_off_visitor_reader {
* This example loads points from an OFF file and builds a vector of points (vector of double).
* Then, it is asked to display the points.
*
- * \include common/example_vector_double_points_off_reader.cpp
+ * \include example_vector_double_points_off_reader.cpp
*
* When launching:
*
@@ -116,7 +116,7 @@ class Points_off_visitor_reader {
*
* the program outputs a file ../../data/points/alphacomplexdoc.off.txt:
*
- * \include common/vectordoubleoffreader_result.txt
+ * \include vectordoubleoffreader_result.txt
*/
template<typename Point_d>
class Points_off_reader {
diff --git a/src/common/include/gudhi/random_point_generators.h b/src/common/include/gudhi/random_point_generators.h
index 9dd88ac4..25a7392d 100644
--- a/src/common/include/gudhi/random_point_generators.h
+++ b/src/common/include/gudhi/random_point_generators.h
@@ -18,6 +18,7 @@
#include <CGAL/version.h> // for CGAL_VERSION_NR
#include <vector> // for vector<>
+#include <boost/math/constants/constants.hpp> // for pi constant
// Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10
#if CGAL_VERSION_NR < 1041101000
@@ -149,6 +150,8 @@ std::vector<typename Kernel::Point_d> generate_points_on_moment_curve(std::size_
template <typename Kernel/*, typename TC_basis*/>
std::vector<typename Kernel::Point_d> generate_points_on_torus_3D(std::size_t num_points, double R, double r,
bool uniform = false) {
+ using namespace boost::math::double_constants;
+
typedef typename Kernel::Point_d Point;
typedef typename Kernel::FT FT;
Kernel k;
@@ -164,11 +167,11 @@ std::vector<typename Kernel::Point_d> generate_points_on_torus_3D(std::size_t nu
if (uniform) {
std::size_t k1 = i / num_lines;
std::size_t k2 = i % num_lines;
- u = 6.2832 * k1 / num_lines;
- v = 6.2832 * k2 / num_lines;
+ u = two_pi * k1 / num_lines;
+ v = two_pi * k2 / num_lines;
} else {
- u = rng.get_double(0, 6.2832);
- v = rng.get_double(0, 6.2832);
+ u = rng.get_double(0, two_pi);
+ v = rng.get_double(0, two_pi);
}
Point p = construct_point(k,
(R + r * std::cos(u)) * std::cos(v),
@@ -182,11 +185,13 @@ std::vector<typename Kernel::Point_d> generate_points_on_torus_3D(std::size_t nu
// "Private" function used by generate_points_on_torus_d
template <typename Kernel, typename OutputIterator>
-static void generate_uniform_points_on_torus_d(const Kernel &k, int dim, std::size_t num_slices,
+static void generate_grid_points_on_torus_d(const Kernel &k, int dim, std::size_t num_slices,
OutputIterator out,
double radius_noise_percentage = 0.,
std::vector<typename Kernel::FT> current_point =
std::vector<typename Kernel::FT>()) {
+ using namespace boost::math::double_constants;
+
CGAL::Random rng;
int point_size = static_cast<int>(current_point.size());
if (point_size == 2 * dim) {
@@ -200,18 +205,20 @@ static void generate_uniform_points_on_torus_d(const Kernel &k, int dim, std::si
(100. + radius_noise_percentage) / 100.);
}
std::vector<typename Kernel::FT> cp2 = current_point;
- double alpha = 6.2832 * slice_idx / num_slices;
+ double alpha = two_pi * slice_idx / num_slices;
cp2.push_back(radius_noise_ratio * std::cos(alpha));
cp2.push_back(radius_noise_ratio * std::sin(alpha));
- generate_uniform_points_on_torus_d(
+ generate_grid_points_on_torus_d(
k, dim, num_slices, out, radius_noise_percentage, cp2);
}
}
}
template <typename Kernel>
-std::vector<typename Kernel::Point_d> generate_points_on_torus_d(std::size_t num_points, int dim, bool uniform = false,
+std::vector<typename Kernel::Point_d> generate_points_on_torus_d(std::size_t num_points, int dim, std::string sample = "random",
double radius_noise_percentage = 0.) {
+ using namespace boost::math::double_constants;
+
typedef typename Kernel::Point_d Point;
typedef typename Kernel::FT FT;
Kernel k;
@@ -219,9 +226,9 @@ std::vector<typename Kernel::Point_d> generate_points_on_torus_d(std::size_t num
std::vector<Point> points;
points.reserve(num_points);
- if (uniform) {
- std::size_t num_slices = (std::size_t)std::pow(num_points, 1. / dim);
- generate_uniform_points_on_torus_d(
+ if (sample == "grid") {
+ std::size_t num_slices = (std::size_t)std::pow(num_points + .5, 1. / dim); // add .5 to avoid rounding down with numerical approximations
+ generate_grid_points_on_torus_d(
k, dim, num_slices, std::back_inserter(points), radius_noise_percentage);
} else {
for (std::size_t i = 0; i < num_points;) {
@@ -234,7 +241,7 @@ std::vector<typename Kernel::Point_d> generate_points_on_torus_d(std::size_t num
std::vector<typename Kernel::FT> pt;
pt.reserve(dim * 2);
for (int curdim = 0; curdim < dim; ++curdim) {
- FT alpha = rng.get_double(0, 6.2832);
+ FT alpha = rng.get_double(0, two_pi);
pt.push_back(radius_noise_ratio * std::cos(alpha));
pt.push_back(radius_noise_ratio * std::sin(alpha));
}
@@ -357,6 +364,8 @@ std::vector<typename Kernel::Point_d> generate_points_on_two_spheres_d(std::size
template <typename Kernel>
std::vector<typename Kernel::Point_d> generate_points_on_3sphere_and_circle(std::size_t num_points,
double sphere_radius) {
+ using namespace boost::math::double_constants;
+
typedef typename Kernel::FT FT;
typedef typename Kernel::Point_d Point;
Kernel k;
@@ -370,7 +379,7 @@ std::vector<typename Kernel::Point_d> generate_points_on_3sphere_and_circle(std:
for (std::size_t i = 0; i < num_points;) {
Point p_sphere = *generator++; // First 3 coords
- FT alpha = rng.get_double(0, 6.2832);
+ FT alpha = rng.get_double(0, two_pi);
std::vector<FT> pt(5);
pt[0] = k_coord(p_sphere, 0);
pt[1] = k_coord(p_sphere, 1);
@@ -388,6 +397,8 @@ std::vector<typename Kernel::Point_d> generate_points_on_3sphere_and_circle(std:
template <typename Kernel>
std::vector<typename Kernel::Point_d> generate_points_on_klein_bottle_3D(std::size_t num_points, double a, double b,
bool uniform = false) {
+ using namespace boost::math::double_constants;
+
typedef typename Kernel::Point_d Point;
typedef typename Kernel::FT FT;
Kernel k;
@@ -403,11 +414,11 @@ std::vector<typename Kernel::Point_d> generate_points_on_klein_bottle_3D(std::si
if (uniform) {
std::size_t k1 = i / num_lines;
std::size_t k2 = i % num_lines;
- u = 6.2832 * k1 / num_lines;
- v = 6.2832 * k2 / num_lines;
+ u = two_pi * k1 / num_lines;
+ v = two_pi * k2 / num_lines;
} else {
- u = rng.get_double(0, 6.2832);
- v = rng.get_double(0, 6.2832);
+ u = rng.get_double(0, two_pi);
+ v = rng.get_double(0, two_pi);
}
double tmp = cos(u / 2) * sin(v) - sin(u / 2) * sin(2. * v);
Point p = construct_point(k,
@@ -424,6 +435,8 @@ std::vector<typename Kernel::Point_d> generate_points_on_klein_bottle_3D(std::si
template <typename Kernel>
std::vector<typename Kernel::Point_d> generate_points_on_klein_bottle_4D(std::size_t num_points, double a, double b,
double noise = 0., bool uniform = false) {
+ using namespace boost::math::double_constants;
+
typedef typename Kernel::Point_d Point;
typedef typename Kernel::FT FT;
Kernel k;
@@ -439,11 +452,11 @@ std::vector<typename Kernel::Point_d> generate_points_on_klein_bottle_4D(std::si
if (uniform) {
std::size_t k1 = i / num_lines;
std::size_t k2 = i % num_lines;
- u = 6.2832 * k1 / num_lines;
- v = 6.2832 * k2 / num_lines;
+ u = two_pi * k1 / num_lines;
+ v = two_pi * k2 / num_lines;
} else {
- u = rng.get_double(0, 6.2832);
- v = rng.get_double(0, 6.2832);
+ u = rng.get_double(0, two_pi);
+ v = rng.get_double(0, two_pi);
}
Point p = construct_point(k,
(a + b * cos(v)) * cos(u) + (noise == 0. ? 0. : rng.get_double(0, noise)),
@@ -463,6 +476,8 @@ template <typename Kernel>
std::vector<typename Kernel::Point_d>
generate_points_on_klein_bottle_variant_5D(
std::size_t num_points, double a, double b, bool uniform = false) {
+ using namespace boost::math::double_constants;
+
typedef typename Kernel::Point_d Point;
typedef typename Kernel::FT FT;
Kernel k;
@@ -478,11 +493,11 @@ generate_points_on_klein_bottle_variant_5D(
if (uniform) {
std::size_t k1 = i / num_lines;
std::size_t k2 = i % num_lines;
- u = 6.2832 * k1 / num_lines;
- v = 6.2832 * k2 / num_lines;
+ u = two_pi * k1 / num_lines;
+ v = two_pi * k2 / num_lines;
} else {
- u = rng.get_double(0, 6.2832);
- v = rng.get_double(0, 6.2832);
+ u = rng.get_double(0, two_pi);
+ v = rng.get_double(0, two_pi);
}
FT x1 = (a + b * cos(v)) * cos(u);
FT x2 = (a + b * cos(v)) * sin(u);
diff --git a/src/common/include/gudhi/reader_utils.h b/src/common/include/gudhi/reader_utils.h
index 0938f5c1..a1b104e2 100644
--- a/src/common/include/gudhi/reader_utils.h
+++ b/src/common/include/gudhi/reader_utils.h
@@ -14,7 +14,11 @@
#include <gudhi/graph_simplicial_complex.h>
#include <gudhi/Debug_utils.h>
-#include <boost/function_output_iterator.hpp>
+#if BOOST_VERSION < 106600
+# include <boost/function_output_iterator.hpp>
+#else
+# include <boost/iterator/function_output_iterator.hpp>
+#endif
#include <boost/graph/adjacency_list.hpp>
#include <iostream>
diff --git a/src/common/test/test_distance_matrix_reader.cpp b/src/common/test/test_distance_matrix_reader.cpp
index 73be8104..92e899b8 100644
--- a/src/common/test/test_distance_matrix_reader.cpp
+++ b/src/common/test/test_distance_matrix_reader.cpp
@@ -57,7 +57,7 @@ BOOST_AUTO_TEST_CASE( full_square_distance_matrix )
{
Distance_matrix from_full_square;
// Read full_square_distance_matrix.csv file where the separator is the default one ';'
- from_full_square = Gudhi::read_lower_triangular_matrix_from_csv_file<double>("full_square_distance_matrix.csv");
+ from_full_square = Gudhi::read_lower_triangular_matrix_from_csv_file<double>("full_square_distance_matrix.csv", ';');
for (auto& i : from_full_square) {
for (auto j : i) {
std::clog << j << " ";
diff --git a/src/common/utilities/off_file_from_shape_generator.cpp b/src/common/utilities/off_file_from_shape_generator.cpp
index 6efef4fc..71ede434 100644
--- a/src/common/utilities/off_file_from_shape_generator.cpp
+++ b/src/common/utilities/off_file_from_shape_generator.cpp
@@ -135,7 +135,7 @@ int main(int argc, char **argv) {
if (dimension == 3)
points = Gudhi::generate_points_on_torus_3D<K>(points_number, dimension, radius, radius/2.);
else
- points = Gudhi::generate_points_on_torus_d<K>(points_number, dimension, true);
+ points = Gudhi::generate_points_on_torus_d<K>(points_number, dimension, "grid");
break;
case Data_shape::klein:
switch (dimension) {
diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt
index 5c1402a6..1314b444 100644
--- a/src/python/CMakeLists.txt
+++ b/src/python/CMakeLists.txt
@@ -14,13 +14,16 @@ function( add_GUDHI_PYTHON_lib THE_LIB )
endif(EXISTS ${THE_LIB})
endfunction( add_GUDHI_PYTHON_lib )
-function( add_GUDHI_PYTHON_lib_dir THE_LIB_DIR )
- # deals when it is not set - error on windows
- if(EXISTS ${THE_LIB_DIR})
- set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${THE_LIB_DIR}', " PARENT_SCOPE)
- else()
- message("add_GUDHI_PYTHON_lib_dir - '${THE_LIB_DIR}' does not exist")
- endif()
+function( add_GUDHI_PYTHON_lib_dir)
+ # Argument may be a list (specifically on windows with release/debug paths)
+ foreach(THE_LIB_DIR IN LISTS ARGN)
+ # deals when it is not set - error on windows
+ if(EXISTS ${THE_LIB_DIR})
+ set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${THE_LIB_DIR}', " PARENT_SCOPE)
+ else()
+ message("add_GUDHI_PYTHON_lib_dir - '${THE_LIB_DIR}' does not exist")
+ endif()
+ endforeach()
endfunction( add_GUDHI_PYTHON_lib_dir )
# THE_TEST is the python test file name (without .py extension) containing tests functions
@@ -41,13 +44,15 @@ function( add_gudhi_debug_info DEBUG_INFO )
endfunction( add_gudhi_debug_info )
if(PYTHONINTERP_FOUND)
- if(PYBIND11_FOUND)
+ if(PYBIND11_FOUND AND CYTHON_FOUND)
add_gudhi_debug_info("Pybind11 version ${PYBIND11_VERSION}")
+ # PyBind11 modules
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ")
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'hera', ")
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'clustering', ")
- endif()
- if(CYTHON_FOUND)
+ set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'datasets', ")
+
+ # Cython modules
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'off_reader', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'simplex_tree', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'rips_complex', ")
@@ -106,6 +111,16 @@ if(PYTHONINTERP_FOUND)
if(TENSORFLOW_FOUND)
add_gudhi_debug_info("TensorFlow version ${TENSORFLOW_VERSION}")
endif()
+ if(SPHINX_FOUND)
+ add_gudhi_debug_info("Sphinx version ${SPHINX_VERSION}")
+ endif()
+ if(SPHINX_PARAMLINKS_FOUND)
+ add_gudhi_debug_info("Sphinx-paramlinks version ${SPHINX_PARAMLINKS_VERSION}")
+ endif()
+ if(PYTHON_DOCS_THEME_FOUND)
+ # Does not have a version number...
+ add_gudhi_debug_info("python_docs_theme found")
+ endif()
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_RESULT_OF_USE_DECLTYPE', ")
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_ALL_NO_LIB', ")
@@ -151,6 +166,7 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/wasserstein', ")
set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/bottleneck', ")
if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
+ set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'datasets/generators/_points', ")
set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'bottleneck', ")
set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'nerve_gic', ")
endif ()
@@ -163,6 +179,10 @@ if(PYTHONINTERP_FOUND)
endif ()
if(CGAL_FOUND)
+ if(NOT CGAL_VERSION VERSION_LESS 5.3.0)
+ # CGAL_HEADER_ONLY has been dropped for CGAL >= 5.3. Only the header-only version is supported.
+ set(CGAL_HEADER_ONLY True)
+ endif(NOT CGAL_VERSION VERSION_LESS 5.3.0)
# Add CGAL compilation args
if(CGAL_HEADER_ONLY)
add_gudhi_debug_info("CGAL header only version ${CGAL_VERSION}")
@@ -170,7 +190,7 @@ if(PYTHONINTERP_FOUND)
else(CGAL_HEADER_ONLY)
add_gudhi_debug_info("CGAL version ${CGAL_VERSION}")
add_GUDHI_PYTHON_lib("${CGAL_LIBRARY}")
- add_GUDHI_PYTHON_lib_dir("${CGAL_LIBRARIES_DIR}")
+ add_GUDHI_PYTHON_lib_dir(${CGAL_LIBRARIES_DIR})
message("** Add CGAL ${CGAL_LIBRARIES_DIR}")
# If CGAL is not header only, CGAL library may link with boost system,
if(CMAKE_BUILD_TYPE MATCHES Debug)
@@ -178,7 +198,7 @@ if(PYTHONINTERP_FOUND)
else()
add_GUDHI_PYTHON_lib("${Boost_SYSTEM_LIBRARY_RELEASE}")
endif()
- add_GUDHI_PYTHON_lib_dir("${Boost_LIBRARY_DIRS}")
+ add_GUDHI_PYTHON_lib_dir(${Boost_LIBRARY_DIRS})
message("** Add Boost ${Boost_LIBRARY_DIRS}")
endif(CGAL_HEADER_ONLY)
# GMP and GMPXX are not required, but if present, CGAL will link with them.
@@ -190,13 +210,13 @@ if(PYTHONINTERP_FOUND)
get_filename_component(GMP_LIBRARIES_DIR ${GMP_LIBRARIES} PATH)
message("GMP_LIBRARIES_DIR from GMP_LIBRARIES set to ${GMP_LIBRARIES_DIR}")
endif(NOT GMP_LIBRARIES_DIR)
- add_GUDHI_PYTHON_lib_dir("${GMP_LIBRARIES_DIR}")
+ add_GUDHI_PYTHON_lib_dir(${GMP_LIBRARIES_DIR})
message("** Add gmp ${GMP_LIBRARIES_DIR}")
if(GMPXX_FOUND)
add_gudhi_debug_info("GMPXX_LIBRARIES = ${GMPXX_LIBRARIES}")
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DCGAL_USE_GMPXX', ")
add_GUDHI_PYTHON_lib("${GMPXX_LIBRARIES}")
- add_GUDHI_PYTHON_lib_dir("${GMPXX_LIBRARIES_DIR}")
+ add_GUDHI_PYTHON_lib_dir(${GMPXX_LIBRARIES_DIR})
message("** Add gmpxx ${GMPXX_LIBRARIES_DIR}")
endif(GMPXX_FOUND)
endif(GMP_FOUND)
@@ -209,7 +229,7 @@ if(PYTHONINTERP_FOUND)
get_filename_component(MPFR_LIBRARIES_DIR ${MPFR_LIBRARIES} PATH)
message("MPFR_LIBRARIES_DIR from MPFR_LIBRARIES set to ${MPFR_LIBRARIES_DIR}")
endif(NOT MPFR_LIBRARIES_DIR)
- add_GUDHI_PYTHON_lib_dir("${MPFR_LIBRARIES_DIR}")
+ add_GUDHI_PYTHON_lib_dir(${MPFR_LIBRARIES_DIR})
message("** Add mpfr ${MPFR_LIBRARIES_DIR}")
endif(MPFR_FOUND)
endif(CGAL_FOUND)
@@ -230,14 +250,14 @@ if(PYTHONINTERP_FOUND)
if (TBB_FOUND AND WITH_GUDHI_USE_TBB)
add_gudhi_debug_info("TBB version ${TBB_INTERFACE_VERSION} found and used")
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DGUDHI_USE_TBB', ")
- if(CMAKE_BUILD_TYPE MATCHES Debug)
+ if((CMAKE_BUILD_TYPE MATCHES Debug) AND TBB_DEBUG_LIBRARY)
add_GUDHI_PYTHON_lib("${TBB_DEBUG_LIBRARY}")
add_GUDHI_PYTHON_lib("${TBB_MALLOC_DEBUG_LIBRARY}")
else()
add_GUDHI_PYTHON_lib("${TBB_RELEASE_LIBRARY}")
add_GUDHI_PYTHON_lib("${TBB_MALLOC_RELEASE_LIBRARY}")
endif()
- add_GUDHI_PYTHON_lib_dir("${TBB_LIBRARY_DIRS}")
+ add_GUDHI_PYTHON_lib_dir(${TBB_LIBRARY_DIRS})
message("** Add tbb ${TBB_LIBRARY_DIRS}")
set(GUDHI_PYTHON_INCLUDE_DIRS "${GUDHI_PYTHON_INCLUDE_DIRS}'${TBB_INCLUDE_DIRS}', ")
endif()
@@ -262,9 +282,12 @@ if(PYTHONINTERP_FOUND)
file(COPY "gudhi/weighted_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/dtm_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/hera/__init__.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/hera")
+ file(COPY "gudhi/datasets" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi" FILES_MATCHING PATTERN "*.py")
+
# Some files for pip package
file(COPY "introduction.rst" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/")
+ file(COPY "pyproject.toml" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/")
add_custom_command(
OUTPUT gudhi.so
@@ -274,67 +297,74 @@ if(PYTHONINTERP_FOUND)
add_custom_target(python ALL DEPENDS gudhi.so
COMMENT "Do not forget to add ${CMAKE_CURRENT_BINARY_DIR}/ to your PYTHONPATH before using examples or tests")
- install(CODE "execute_process(COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/setup.py install)")
-
+ # Path separator management for windows
+ if (WIN32)
+ set(GUDHI_PYTHON_PATH_ENV "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR};$ENV{PYTHONPATH}")
+ else(WIN32)
+ set(GUDHI_PYTHON_PATH_ENV "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}:$ENV{PYTHONPATH}")
+ endif(WIN32)
# Documentation generation is available through sphinx - requires all modules
# Make it first as sphinx test is by far the longest test which is nice when testing in parallel
if(SPHINX_PATH)
- if(MATPLOTLIB_FOUND)
- if(NUMPY_FOUND)
- if(SCIPY_FOUND)
- if(SKLEARN_FOUND)
- if(OT_FOUND)
- if(PYBIND11_FOUND)
- if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/")
- # User warning - Sphinx is a static pages generator, and configured to work fine with user_version
- # Images and biblio warnings because not found on developper version
- if (GUDHI_PYTHON_PATH STREQUAL "src/python")
- set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developper version. Images and biblio will miss")
- endif()
- # sphinx target requires gudhi.so, because conf.py reads gudhi version from it
- add_custom_target(sphinx
- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/doc
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${SPHINX_PATH} -b html ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/sphinx
- DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/gudhi.so"
- COMMENT "${GUDHI_SPHINX_MESSAGE}" VERBATIM)
-
- add_test(NAME sphinx_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${SPHINX_PATH} -b doctest ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/doctest)
-
- # Set missing or not modules
- set(GUDHI_MODULES ${GUDHI_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MODULES")
- else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 4.11.0")
+ if(SPHINX_PARAMLINKS_FOUND)
+ if(PYTHON_DOCS_THEME_FOUND)
+ if(MATPLOTLIB_FOUND)
+ if(NUMPY_FOUND)
+ if(SCIPY_FOUND)
+ if(SKLEARN_FOUND)
+ if(OT_FOUND)
+ if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/")
+ # User warning - Sphinx is a static pages generator, and configured to work fine with user_version
+ # Images and biblio warnings because not found on developper version
+ if (GUDHI_PYTHON_PATH STREQUAL "src/python")
+ set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developper version. Images and biblio will miss")
+ endif()
+ # sphinx target requires gudhi.so, because conf.py reads gudhi version from it
+ add_custom_target(sphinx
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/doc
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${SPHINX_PATH} -b html ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/sphinx
+ DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/gudhi.so"
+ COMMENT "${GUDHI_SPHINX_MESSAGE}" VERBATIM)
+ add_test(NAME sphinx_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${SPHINX_PATH} -b doctest ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/doctest)
+ # Set missing or not modules
+ set(GUDHI_MODULES ${GUDHI_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MODULES")
+ else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 4.11.0")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ else(OT_FOUND)
+ message("++ Python documentation module will not be compiled because POT was not found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- else(PYBIND11_FOUND)
- message("++ Python documentation module will not be compiled because pybind11 was not found")
+ endif(OT_FOUND)
+ else(SKLEARN_FOUND)
+ message("++ Python documentation module will not be compiled because scikit-learn was not found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(PYBIND11_FOUND)
- else(OT_FOUND)
- message("++ Python documentation module will not be compiled because POT was not found")
+ endif(SKLEARN_FOUND)
+ else(SCIPY_FOUND)
+ message("++ Python documentation module will not be compiled because scipy was not found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(OT_FOUND)
- else(SKLEARN_FOUND)
- message("++ Python documentation module will not be compiled because scikit-learn was not found")
+ endif(SCIPY_FOUND)
+ else(NUMPY_FOUND)
+ message("++ Python documentation module will not be compiled because numpy was not found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(SKLEARN_FOUND)
- else(SCIPY_FOUND)
- message("++ Python documentation module will not be compiled because scipy was not found")
+ endif(NUMPY_FOUND)
+ else(MATPLOTLIB_FOUND)
+ message("++ Python documentation module will not be compiled because matplotlib was not found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(SCIPY_FOUND)
- else(NUMPY_FOUND)
- message("++ Python documentation module will not be compiled because numpy was not found")
+ endif(MATPLOTLIB_FOUND)
+ else(PYTHON_DOCS_THEME_FOUND)
+ message("++ Python documentation module will not be compiled because python-docs-theme was not found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(NUMPY_FOUND)
- else(MATPLOTLIB_FOUND)
- message("++ Python documentation module will not be compiled because matplotlib was not found")
+ endif(PYTHON_DOCS_THEME_FOUND)
+ else(SPHINX_PARAMLINKS_FOUND)
+ message("++ Python documentation module will not be compiled because sphinxcontrib-paramlinks was not found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(MATPLOTLIB_FOUND)
+ endif(SPHINX_PARAMLINKS_FOUND)
else(SPHINX_PATH)
message("++ Python documentation module will not be compiled because sphinx and sphinxcontrib-bibtex were not found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
@@ -346,13 +376,13 @@ if(PYTHONINTERP_FOUND)
# Bottleneck and Alpha
add_test(NAME alpha_rips_persistence_bottleneck_distance_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_rips_persistence_bottleneck_distance.py"
-f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -t 0.15 -d 3)
# Tangential
add_test(NAME tangential_complex_plain_homology_from_off_file_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/tangential_complex_plain_homology_from_off_file_example.py"
--no-diagram -i 2 -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off)
@@ -361,13 +391,13 @@ if(PYTHONINTERP_FOUND)
# Witness complex
add_test(NAME euclidean_strong_witness_complex_diagram_persistence_from_off_file_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py"
--no-diagram -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -a 1.0 -n 20 -d 2)
add_test(NAME euclidean_witness_complex_diagram_persistence_from_off_file_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py"
--no-diagram -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -a 1.0 -n 20 -d 2)
@@ -379,12 +409,10 @@ if(PYTHONINTERP_FOUND)
# Bottleneck
add_test(NAME bottleneck_basic_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/bottleneck_basic_example.py")
- if (PYBIND11_FOUND)
- add_gudhi_py_test(test_bottleneck_distance)
- endif()
+ add_gudhi_py_test(test_bottleneck_distance)
# Cover complex
file(COPY ${CMAKE_SOURCE_DIR}/data/points/human.off DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
@@ -392,26 +420,26 @@ if(PYTHONINTERP_FOUND)
file(COPY ${CMAKE_SOURCE_DIR}/data/points/COIL_database/lucky_cat_PCA1 DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
add_test(NAME cover_complex_nerve_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/nerve_of_a_covering.py"
-f human.off -c 2 -r 10 -g 0.3)
add_test(NAME cover_complex_coordinate_gic_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/coordinate_graph_induced_complex.py"
-f human.off -c 0 -v)
add_test(NAME cover_complex_functional_gic_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/functional_graph_induced_complex.py"
-o lucky_cat.off
-f lucky_cat_PCA1 -v)
add_test(NAME cover_complex_voronoi_gic_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/voronoi_graph_induced_complex.py"
-f human.off -n 700 -v)
@@ -422,13 +450,17 @@ if(PYTHONINTERP_FOUND)
# Alpha
add_test(NAME alpha_complex_from_points_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_complex_from_points_example.py")
+ add_test(NAME alpha_complex_from_generated_points_on_sphere_example_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
+ ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_complex_from_generated_points_on_sphere_example.py")
add_test(NAME alpha_complex_diagram_persistence_from_off_file_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_complex_diagram_persistence_from_off_file_example.py"
- --no-diagram -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -a 0.6)
+ --no-diagram -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off)
add_gudhi_py_test(test_alpha_complex)
endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
@@ -436,18 +468,21 @@ if(PYTHONINTERP_FOUND)
# Euclidean witness
add_gudhi_py_test(test_euclidean_witness_complex)
+ # Datasets generators
+ add_gudhi_py_test(test_datasets_generators) # TODO separate full python datasets generators in another test file independant from CGAL ?
+
endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
# Cubical
add_test(NAME periodic_cubical_complex_barcode_persistence_from_perseus_file_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py"
--no-barcode -f ${CMAKE_SOURCE_DIR}/data/bitmap/CubicalTwoSphere.txt)
add_test(NAME random_cubical_complex_persistence_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/random_cubical_complex_persistence_example.py"
10 10 10)
@@ -456,19 +491,19 @@ if(PYTHONINTERP_FOUND)
# Rips
add_test(NAME rips_complex_diagram_persistence_from_distance_matrix_file_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py"
- --no-diagram -f ${CMAKE_SOURCE_DIR}/data/distance_matrix/lower_triangular_distance_matrix.csv -e 12.0 -d 3)
+ --no-diagram -f ${CMAKE_SOURCE_DIR}/data/distance_matrix/lower_triangular_distance_matrix.csv -s , -e 12.0 -d 3)
add_test(NAME rips_complex_diagram_persistence_from_off_file_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/example/rips_complex_diagram_persistence_from_off_file_example.py
--no-diagram -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -e 0.25 -d 3)
add_test(NAME rips_complex_from_points_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/example/rips_complex_from_points_example.py)
add_gudhi_py_test(test_rips_complex)
@@ -476,7 +511,7 @@ if(PYTHONINTERP_FOUND)
# Simplex tree
add_test(NAME simplex_tree_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/example/simplex_tree_example.py)
add_gudhi_py_test(test_simplex_tree)
@@ -485,7 +520,7 @@ if(PYTHONINTERP_FOUND)
# Witness
add_test(NAME witness_complex_from_nearest_landmark_table_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/example/witness_complex_from_nearest_landmark_table.py)
add_gudhi_py_test(test_witness_complex)
@@ -494,14 +529,14 @@ if(PYTHONINTERP_FOUND)
add_gudhi_py_test(test_reader_utils)
# Wasserstein
- if(OT_FOUND AND PYBIND11_FOUND)
+ if(OT_FOUND)
# EagerPy dependency because of enable_autodiff=True
if(EAGERPY_FOUND)
add_gudhi_py_test(test_wasserstein_distance)
endif()
+
add_gudhi_py_test(test_wasserstein_barycenter)
- endif()
- if(OT_FOUND)
+
if(TORCH_FOUND AND TENSORFLOW_FOUND AND EAGERPY_FOUND)
add_gudhi_py_test(test_wasserstein_with_tensors)
endif()
@@ -522,7 +557,7 @@ if(PYTHONINTERP_FOUND)
endif()
# Tomato
- if(SCIPY_FOUND AND SKLEARN_FOUND AND PYBIND11_FOUND)
+ if(SCIPY_FOUND AND SKLEARN_FOUND)
add_gudhi_py_test(test_tomato)
endif()
@@ -539,10 +574,10 @@ if(PYTHONINTERP_FOUND)
# Set missing or not modules
set(GUDHI_MODULES ${GUDHI_MODULES} "python" CACHE INTERNAL "GUDHI_MODULES")
- else(CYTHON_FOUND)
- message("++ Python module will not be compiled because cython was not found")
+ else(PYBIND11_FOUND AND CYTHON_FOUND)
+ message("++ Python module will not be compiled because cython and/or pybind11 was/were not found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(CYTHON_FOUND)
+ endif(PYBIND11_FOUND AND CYTHON_FOUND)
else(PYTHONINTERP_FOUND)
message("++ Python module will not be compiled because no Python interpreter was found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python" CACHE INTERNAL "GUDHI_MISSING_MODULES")
diff --git a/src/python/doc/_templates/layout.html b/src/python/doc/_templates/layout.html
index cd40a51b..e074b6c7 100644
--- a/src/python/doc/_templates/layout.html
+++ b/src/python/doc/_templates/layout.html
@@ -194,6 +194,7 @@
<li><a href="/relatedprojects/">Related projects</a></li>
<li><a href="/theyaretalkingaboutus/">They are talking about us</a></li>
<li><a href="/inaction/">GUDHI in action</a></li>
+ <li><a href="/etymology/">Etymology</a></li>
</ul>
</li>
<li class="divider"></li>
diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst
index fffcb3db..96e267ef 100644
--- a/src/python/doc/alpha_complex_user.rst
+++ b/src/python/doc/alpha_complex_user.rst
@@ -163,7 +163,10 @@ As the squared radii computed by CGAL are an approximation, it might happen that
:math:`\alpha^2` values do not quite define a proper filtration (i.e. non-decreasing with
respect to inclusion).
We fix that up by calling :func:`~gudhi.SimplexTree.make_filtration_non_decreasing` (cf.
-`C++ version <http://gudhi.gforge.inria.fr/doc/latest/index.html>`_).
+`C++ version <https://gudhi.inria.fr/doc/latest/class_gudhi_1_1_simplex__tree.html>`_).
+
+.. note::
+ This is not the case in `exact` version, this is the reason why it is not called in this case.
Prune above given filtration value
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/src/python/doc/conf.py b/src/python/doc/conf.py
index b06baf9c..e69e2751 100755
--- a/src/python/doc/conf.py
+++ b/src/python/doc/conf.py
@@ -120,15 +120,12 @@ pygments_style = 'sphinx'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = 'classic'
+html_theme = 'python_docs_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
- "sidebarbgcolor": "#A1ADCD",
- "sidebartextcolor": "black",
- "sidebarlinkcolor": "#334D5C",
"body_max_width": "100%",
}
diff --git a/src/python/doc/datasets_generators.inc b/src/python/doc/datasets_generators.inc
new file mode 100644
index 00000000..8d169275
--- /dev/null
+++ b/src/python/doc/datasets_generators.inc
@@ -0,0 +1,14 @@
+.. table::
+ :widths: 30 40 30
+
+ +-----------------------------------+--------------------------------------------+--------------------------------------------------------------------------------------+
+ | .. figure:: | Datasets generators (points). | :Authors: Hind Montassif |
+ | img/sphere_3d.png | | |
+ | | | :Since: GUDHI 3.5.0 |
+ | | | |
+ | | | :License: MIT (`LGPL v3 </licensing/>`_) |
+ | | | |
+ | | | :Requires: `CGAL <installation.html#cgal>`_ |
+ +-----------------------------------+--------------------------------------------+--------------------------------------------------------------------------------------+
+ | * :doc:`datasets_generators` |
+ +-----------------------------------+-----------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/datasets_generators.rst b/src/python/doc/datasets_generators.rst
new file mode 100644
index 00000000..260c3882
--- /dev/null
+++ b/src/python/doc/datasets_generators.rst
@@ -0,0 +1,105 @@
+
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+===========================
+Datasets generators manual
+===========================
+
+We provide the generation of different customizable datasets to use as inputs for Gudhi complexes and data structures.
+
+
+Points generators
+------------------
+
+The module **points** enables the generation of random points on a sphere, random points on a torus and as a grid.
+
+Points on sphere
+^^^^^^^^^^^^^^^^
+
+The function **sphere** enables the generation of random i.i.d. points uniformly on a (d-1)-sphere in :math:`R^d`.
+The user should provide the number of points to be generated on the sphere :code:`n_samples` and the ambient dimension :code:`ambient_dim`.
+The :code:`radius` of sphere is optional and is equal to **1** by default.
+Only random points generation is currently available.
+
+The generated points are given as an array of shape :math:`(n\_samples, ambient\_dim)`.
+
+Example
+"""""""
+
+.. code-block:: python
+
+ from gudhi.datasets.generators import points
+ from gudhi import AlphaComplex
+
+ # Generate 50 points on a sphere in R^2
+ gen_points = points.sphere(n_samples = 50, ambient_dim = 2, radius = 1, sample = "random")
+
+ # Create an alpha complex from the generated points
+ alpha_complex = AlphaComplex(points = gen_points)
+
+.. autofunction:: gudhi.datasets.generators.points.sphere
+
+Points on a flat torus
+^^^^^^^^^^^^^^^^^^^^^^
+
+You can also generate points on a torus.
+
+Two functions are available and give the same output: the first one depends on **CGAL** and the second does not and consists of full python code.
+
+On another hand, two sample types are provided: you can either generate i.i.d. points on a d-torus in :math:`R^{2d}` *randomly* or on a *grid*.
+
+First function: **ctorus**
+"""""""""""""""""""""""""""
+
+The user should provide the number of points to be generated on the torus :code:`n_samples`, and the dimension :code:`dim` of the torus on which points would be generated in :math:`R^{2dim}`.
+The :code:`sample` argument is optional and is set to **'random'** by default.
+In this case, the returned generated points would be an array of shape :math:`(n\_samples, 2*dim)`.
+Otherwise, if set to **'grid'**, the points are generated on a grid and would be given as an array of shape:
+
+.. math::
+
+ ( ⌊n\_samples^{1 \over {dim}}⌋^{dim}, 2*dim )
+
+**Note 1:** The output array first shape is rounded down to the closest perfect :math:`dim^{th}` power.
+
+**Note 2:** This version is recommended when the user wishes to use **'grid'** as sample type, or **'random'** with a relatively small number of samples (~ less than 150).
+
+Example
+"""""""
+.. code-block:: python
+
+ from gudhi.datasets.generators import points
+
+ # Generate 50 points randomly on a torus in R^6
+ gen_points = points.ctorus(n_samples = 50, dim = 3)
+
+ # Generate 27 points on a torus as a grid in R^6
+ gen_points = points.ctorus(n_samples = 50, dim = 3, sample = 'grid')
+
+.. autofunction:: gudhi.datasets.generators.points.ctorus
+
+Second function: **torus**
+"""""""""""""""""""""""""""
+
+The user should provide the number of points to be generated on the torus :code:`n_samples` and the dimension :code:`dim` of the torus on which points would be generated in :math:`R^{2dim}`.
+The :code:`sample` argument is optional and is set to **'random'** by default.
+The other allowed value of sample type is **'grid'**.
+
+**Note:** This version is recommended when the user wishes to use **'random'** as sample type with a great number of samples and a low dimension.
+
+Example
+"""""""
+.. code-block:: python
+
+ from gudhi.datasets.generators import points
+
+ # Generate 50 points randomly on a torus in R^6
+ gen_points = points.torus(n_samples = 50, dim = 3)
+
+ # Generate 27 points on a torus as a grid in R^6
+ gen_points = points.torus(n_samples = 50, dim = 3, sample = 'grid')
+
+
+.. autofunction:: gudhi.datasets.generators.points.torus
diff --git a/src/python/doc/examples.rst b/src/python/doc/examples.rst
index 76e5d4c7..1442f185 100644
--- a/src/python/doc/examples.rst
+++ b/src/python/doc/examples.rst
@@ -8,6 +8,7 @@ Examples
.. only:: builder_html
* :download:`alpha_complex_diagram_persistence_from_off_file_example.py <../example/alpha_complex_diagram_persistence_from_off_file_example.py>`
+ * :download:`alpha_complex_from_generated_points_on_sphere_example.py <../example/alpha_complex_from_generated_points_on_sphere_example.py>`
* :download:`alpha_complex_from_points_example.py <../example/alpha_complex_from_points_example.py>`
* :download:`alpha_rips_persistence_bottleneck_distance.py <../example/alpha_rips_persistence_bottleneck_distance.py>`
* :download:`bottleneck_basic_example.py <../example/bottleneck_basic_example.py>`
diff --git a/src/python/doc/img/sphere_3d.png b/src/python/doc/img/sphere_3d.png
new file mode 100644
index 00000000..70f3184f
--- /dev/null
+++ b/src/python/doc/img/sphere_3d.png
Binary files differ
diff --git a/src/python/doc/index.rst b/src/python/doc/index.rst
index 040e57a4..2d7921ae 100644
--- a/src/python/doc/index.rst
+++ b/src/python/doc/index.rst
@@ -91,3 +91,8 @@ Clustering
**********
.. include:: clustering.inc
+
+Datasets generators
+*******************
+
+.. include:: datasets_generators.inc
diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst
index 66efe45a..35c344e3 100644
--- a/src/python/doc/installation.rst
+++ b/src/python/doc/installation.rst
@@ -41,7 +41,7 @@ there.
The library uses c++14 and requires `Boost <https://www.boost.org/>`_ :math:`\geq` 1.56.0,
`CMake <https://www.cmake.org/>`_ :math:`\geq` 3.5 to generate makefiles,
-`NumPy <http://numpy.org>`_, `Cython <https://www.cython.org/>`_ and
+`NumPy <http://numpy.org>`_ :math:`\geq` 1.15.0, `Cython <https://www.cython.org/>`_ and
`pybind11 <https://github.com/pybind/pybind11>`_ to compile
the GUDHI Python module.
It is a multi-platform library and compiles on Linux, Mac OSX and Visual
@@ -99,20 +99,14 @@ Or install it definitely in your Python packages folder:
.. code-block:: bash
cd /path-to-gudhi/build/python
- # May require sudo or administrator privileges
- make install
+ python setup.py install # add --user to the command if you do not have the permission
+ # Or 'pip install .'
.. note::
- :code:`make install` is only a
- `CMake custom targets <https://cmake.org/cmake/help/latest/command/add_custom_target.html>`_
- to shortcut :code:`python setup.py install` command.
It does not take into account :code:`CMAKE_INSTALL_PREFIX`.
- But one can use :code:`python setup.py install ...` specific options in the python directory:
-
-.. code-block:: bash
-
- python setup.py install --prefix /home/gudhi # Install in /home/gudhi directory
+ But one can use
+ `alternate location installation <https://docs.python.org/3/install/#alternate-installation>`_.
Test suites
===========
@@ -200,8 +194,10 @@ A complete configuration would be :
Documentation
=============
-To build the documentation, `sphinx-doc <http://www.sphinx-doc.org>`_ and
-`sphinxcontrib-bibtex <https://sphinxcontrib-bibtex.readthedocs.io>`_ are
+To build the documentation, `sphinx-doc <http://www.sphinx-doc.org>`_,
+`sphinxcontrib-bibtex <https://sphinxcontrib-bibtex.readthedocs.io>`_,
+`sphinxcontrib-paramlinks <https://github.com/sqlalchemyorg/sphinx-paramlinks>`_ and
+`python-docs-theme <https://github.com/python/python-docs-theme>`_ are
required. As the documentation is auto-tested, `CGAL`_, `Eigen`_,
`Matplotlib`_, `NumPy`_, `POT`_, `Scikit-learn`_ and `SciPy`_ are
also mandatory to build the documentation.
@@ -363,7 +359,7 @@ Python Optimal Transport
------------------------
The :doc:`Wasserstein distance </wasserstein_distance_user>`
-module requires `POT <https://pot.readthedocs.io/>`_, a library that provides
+module requires `POT <https://pythonot.github.io/>`_, a library that provides
several solvers for optimization problems related to Optimal Transport.
PyTorch
@@ -402,8 +398,9 @@ TensorFlow
Bug reports and contributions
*****************************
-Please help us improving the quality of the GUDHI library. You may report bugs or suggestions to:
-
- Contact: gudhi-users@lists.gforge.inria.fr
+Please help us improving the quality of the GUDHI library.
+You may `report bugs <https://github.com/GUDHI/gudhi-devel/issues>`_ or
+`contact us <https://gudhi.inria.fr/contact/>`_ for any suggestions.
-GUDHI is open to external contributions. If you want to join our development team, please contact us.
+GUDHI is open to external contributions. If you want to join our development team, please take some time to read our
+`contributing guide <https://github.com/GUDHI/gudhi-devel/blob/master/.github/CONTRIBUTING.md>`_.
diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst
index 9ffc2759..76eb1469 100644
--- a/src/python/doc/wasserstein_distance_user.rst
+++ b/src/python/doc/wasserstein_distance_user.rst
@@ -44,7 +44,7 @@ Basic example
*************
This example computes the 1-Wasserstein distance from 2 persistence diagrams with Euclidean ground metric.
-Note that persistence diagrams must be submitted as (n x 2) numpy arrays and must not contain inf values.
+Note that persistence diagrams must be submitted as (n x 2) numpy arrays.
.. testcode::
@@ -67,14 +67,16 @@ We can also have access to the optimal matching by letting `matching=True`.
It is encoded as a list of indices (i,j), meaning that the i-th point in X
is mapped to the j-th point in Y.
An index of -1 represents the diagonal.
+It handles essential parts (points with infinite coordinates). However if the cardinalities of the essential parts differ,
+any matching has a cost +inf and thus can be considered to be optimal. In such a case, the function returns `(np.inf, None)`.
.. testcode::
import gudhi.wasserstein
import numpy as np
- dgm1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974]])
- dgm2 = np.array([[2.8, 4.45], [5, 6], [9.5, 14.1]])
+ dgm1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974], [3, np.inf]])
+ dgm2 = np.array([[2.8, 4.45], [5, 6], [9.5, 14.1], [4, np.inf]])
cost, matchings = gudhi.wasserstein.wasserstein_distance(dgm1, dgm2, matching=True, order=1, internal_p=2)
message_cost = "Wasserstein distance value = %.2f" %cost
@@ -90,16 +92,31 @@ An index of -1 represents the diagonal.
for j in dgm2_to_diagonal:
print("point %s in dgm2 is matched to the diagonal" %j)
-The output is:
+ # An example where essential part cardinalities differ
+ dgm3 = np.array([[1, 2], [0, np.inf]])
+ dgm4 = np.array([[1, 2], [0, np.inf], [1, np.inf]])
+ cost, matchings = gudhi.wasserstein.wasserstein_distance(dgm3, dgm4, matching=True, order=1, internal_p=2)
+ print("\nSecond example:")
+ print("cost:", cost)
+ print("matchings:", matchings)
+
+
+The output is:
.. testoutput::
- Wasserstein distance value = 2.15
+ Wasserstein distance value = 3.15
point 0 in dgm1 is matched to point 0 in dgm2
point 1 in dgm1 is matched to point 2 in dgm2
+ point 3 in dgm1 is matched to point 3 in dgm2
point 2 in dgm1 is matched to the diagonal
point 1 in dgm2 is matched to the diagonal
+ Second example:
+ cost: inf
+ matchings: None
+
+
Barycenters
-----------
@@ -181,4 +198,4 @@ Tutorial
This
`notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-Barycenters-of-persistence-diagrams.ipynb>`_
-presents the concept of barycenter, or Fréchet mean, of a family of persistence diagrams. \ No newline at end of file
+presents the concept of barycenter, or Fréchet mean, of a family of persistence diagrams.
diff --git a/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py b/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py
index 1e0273b3..fe03be31 100755
--- a/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py
+++ b/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py
@@ -25,12 +25,12 @@ parser = argparse.ArgumentParser(
description="AlphaComplex creation from " "points read in a OFF file.",
epilog="Example: "
"example/alpha_complex_diagram_persistence_from_off_file_example.py "
- "-f ../data/points/tore3D_300.off -a 0.6"
+ "-f ../data/points/tore3D_300.off"
"- Constructs a alpha complex with the "
"points from the given OFF file.",
)
parser.add_argument("-f", "--file", type=str, required=True)
-parser.add_argument("-a", "--max_alpha_square", type=float, default=0.5)
+parser.add_argument("-a", "--max_alpha_square", type=float, required=False)
parser.add_argument("-b", "--band", type=float, default=0.0)
parser.add_argument(
"--no-diagram",
@@ -47,21 +47,20 @@ with open(args.file, "r") as f:
print("##############################################################")
print("AlphaComplex creation from points read in a OFF file")
- message = "AlphaComplex with max_edge_length=" + repr(args.max_alpha_square)
- print(message)
-
alpha_complex = gudhi.AlphaComplex(off_file=args.file)
- simplex_tree = alpha_complex.create_simplex_tree(
- max_alpha_square=args.max_alpha_square
- )
+ if args.max_alpha_square is not None:
+ print("with max_edge_length=", args.max_alpha_square)
+ simplex_tree = alpha_complex.create_simplex_tree(
+ max_alpha_square=args.max_alpha_square
+ )
+ else:
+ simplex_tree = alpha_complex.create_simplex_tree()
- message = "Number of simplices=" + repr(simplex_tree.num_simplices())
- print(message)
+ print("Number of simplices=", simplex_tree.num_simplices())
diag = simplex_tree.persistence()
- print("betti_numbers()=")
- print(simplex_tree.betti_numbers())
+ print("betti_numbers()=", simplex_tree.betti_numbers())
if args.no_diagram == False:
import matplotlib.pyplot as plot
diff --git a/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py b/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py
new file mode 100644
index 00000000..3558077e
--- /dev/null
+++ b/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+from gudhi.datasets.generators import _points
+from gudhi import AlphaComplex
+
+
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Hind Montassif
+
+ Copyright (C) 2021 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+__author__ = "Hind Montassif"
+__copyright__ = "Copyright (C) 2021 Inria"
+__license__ = "MIT"
+
+print("#####################################################################")
+print("AlphaComplex creation from generated points on sphere")
+
+
+gen_points = _points.sphere(n_samples = 50, ambient_dim = 2, radius = 1, sample = "random")
+
+# Create an alpha complex
+alpha_complex = AlphaComplex(points = gen_points)
+simplex_tree = alpha_complex.create_simplex_tree()
+
+result_str = 'Alpha complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \
+ repr(simplex_tree.num_simplices()) + ' simplices - ' + \
+ repr(simplex_tree.num_vertices()) + ' vertices.'
+print(result_str)
+
diff --git a/src/python/example/alpha_complex_from_points_example.py b/src/python/example/alpha_complex_from_points_example.py
index 465632eb..5d5ca66a 100755
--- a/src/python/example/alpha_complex_from_points_example.py
+++ b/src/python/example/alpha_complex_from_points_example.py
@@ -19,7 +19,7 @@ __license__ = "MIT"
print("#####################################################################")
print("AlphaComplex creation from points")
alpha_complex = AlphaComplex(points=[[0, 0], [1, 0], [0, 1], [1, 1]])
-simplex_tree = alpha_complex.create_simplex_tree(max_alpha_square=60.0)
+simplex_tree = alpha_complex.create_simplex_tree()
if simplex_tree.find([0, 1]):
print("[0, 1] Found !!")
diff --git a/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py b/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py
index 236d085d..8a9cc857 100755
--- a/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py
+++ b/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py
@@ -21,11 +21,12 @@ parser = argparse.ArgumentParser(
description="RipsComplex creation from " "a distance matrix read in a csv file.",
epilog="Example: "
"example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py "
- "-f ../data/distance_matrix/lower_triangular_distance_matrix.csv -e 12.0 -d 3"
+ "-f ../data/distance_matrix/lower_triangular_distance_matrix.csv -s , -e 12.0 -d 3"
"- Constructs a Rips complex with the "
"distance matrix from the given csv file.",
)
parser.add_argument("-f", "--file", type=str, required=True)
+parser.add_argument("-s", "--separator", type=str, required=True)
parser.add_argument("-e", "--max_edge_length", type=float, default=0.5)
parser.add_argument("-d", "--max_dimension", type=int, default=1)
parser.add_argument("-b", "--band", type=float, default=0.0)
@@ -44,7 +45,7 @@ print("RipsComplex creation from distance matrix read in a csv file")
message = "RipsComplex with max_edge_length=" + repr(args.max_edge_length)
print(message)
-distance_matrix = gudhi.read_lower_triangular_matrix_from_csv_file(csv_file=args.file)
+distance_matrix = gudhi.read_lower_triangular_matrix_from_csv_file(csv_file=args.file, separator=args.separator)
rips_complex = gudhi.RipsComplex(
distance_matrix=distance_matrix, max_edge_length=args.max_edge_length
)
diff --git a/src/python/gudhi/clustering/tomato.py b/src/python/gudhi/clustering/tomato.py
index fbba3cc8..d0e9995c 100644
--- a/src/python/gudhi/clustering/tomato.py
+++ b/src/python/gudhi/clustering/tomato.py
@@ -271,7 +271,7 @@ class Tomato:
l = self.max_weight_per_cc_.min()
r = self.max_weight_per_cc_.max()
if self.diagram_.size > 0:
- plt.plot(self.diagram_[:, 0], self.diagram_[:, 1], "ro")
+ plt.plot(self.diagram_[:, 0], self.diagram_[:, 1], "o", color="red")
l = min(l, self.diagram_[:, 1].min())
r = max(r, self.diagram_[:, 0].max())
if l == r:
@@ -283,7 +283,7 @@ class Tomato:
l, r = -1.0, 1.0
plt.plot([l, r], [l, r])
plt.plot(
- self.max_weight_per_cc_, numpy.full(self.max_weight_per_cc_.shape, 1.1 * l - 0.1 * r), "ro", color="green"
+ self.max_weight_per_cc_, numpy.full(self.max_weight_per_cc_.shape, 1.1 * l - 0.1 * r), "o", color="green"
)
plt.show()
diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx
index 28fbe3af..8e244bb8 100644
--- a/src/python/gudhi/cubical_complex.pyx
+++ b/src/python/gudhi/cubical_complex.pyx
@@ -35,7 +35,7 @@ cdef extern from "Cubical_complex_interface.h" namespace "Gudhi":
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Cubical_complex::Cubical_complex_interface<>>":
Cubical_complex_persistence_interface(Bitmap_cubical_complex_base_interface * st, bool persistence_dim_max) nogil
- void compute_persistence(int homology_coeff_field, double min_persistence) nogil
+ void compute_persistence(int homology_coeff_field, double min_persistence) nogil except+
vector[pair[int, pair[double, double]]] get_persistence() nogil
vector[vector[int]] cofaces_of_cubical_persistence_pairs() nogil
vector[int] betti_numbers() nogil
@@ -147,7 +147,7 @@ cdef class CubicalComplex:
:func:`persistence` returns.
:param homology_coeff_field: The homology coefficient field. Must be a
- prime number
+ prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int.
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
@@ -169,7 +169,7 @@ cdef class CubicalComplex:
"""This function computes and returns the persistence of the complex.
:param homology_coeff_field: The homology coefficient field. Must be a
- prime number
+ prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int.
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
@@ -281,4 +281,8 @@ cdef class CubicalComplex:
launched first.
"""
assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()"
- return np.array(self.pcohptr.intervals_in_dimension(dimension))
+ piid = np.array(self.pcohptr.intervals_in_dimension(dimension))
+ # Workaround https://github.com/GUDHI/gudhi-devel/issues/507
+ if len(piid) == 0:
+ return np.empty(shape = [0, 2])
+ return piid
diff --git a/src/python/gudhi/datasets/__init__.py b/src/python/gudhi/datasets/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/python/gudhi/datasets/__init__.py
diff --git a/src/python/gudhi/datasets/generators/__init__.py b/src/python/gudhi/datasets/generators/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/python/gudhi/datasets/generators/__init__.py
diff --git a/src/python/gudhi/datasets/generators/_points.cc b/src/python/gudhi/datasets/generators/_points.cc
new file mode 100644
index 00000000..82fea25b
--- /dev/null
+++ b/src/python/gudhi/datasets/generators/_points.cc
@@ -0,0 +1,121 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Hind Montassif
+ *
+ * Copyright (C) 2021 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <pybind11/pybind11.h>
+#include <pybind11/numpy.h>
+
+#include <gudhi/random_point_generators.h>
+#include <gudhi/Debug_utils.h>
+
+#include <CGAL/Epick_d.h>
+
+namespace py = pybind11;
+
+
+typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Kern;
+
+py::array_t<double> generate_points_on_sphere(size_t n_samples, int ambient_dim, double radius, std::string sample) {
+
+ if (sample != "random") {
+ throw pybind11::value_error("This sample type is not supported");
+ }
+
+ py::array_t<double> points({n_samples, (size_t)ambient_dim});
+
+ py::buffer_info buf = points.request();
+ double *ptr = static_cast<double *>(buf.ptr);
+
+ GUDHI_CHECK(n_samples == buf.shape[0], "Py array first dimension not matching n_samples on sphere");
+ GUDHI_CHECK(ambient_dim == buf.shape[1], "Py array second dimension not matching the ambient space dimension");
+
+
+ std::vector<typename Kern::Point_d> points_generated;
+
+ {
+ py::gil_scoped_release release;
+ points_generated = Gudhi::generate_points_on_sphere_d<Kern>(n_samples, ambient_dim, radius);
+ }
+
+ for (size_t i = 0; i < n_samples; i++)
+ for (int j = 0; j < ambient_dim; j++)
+ ptr[i*ambient_dim+j] = points_generated[i][j];
+
+ return points;
+}
+
+py::array_t<double> generate_points_on_torus(size_t n_samples, int dim, std::string sample) {
+
+ if ( (sample != "random") && (sample != "grid")) {
+ throw pybind11::value_error("This sample type is not supported");
+ }
+
+ std::vector<typename Kern::Point_d> points_generated;
+
+ {
+ py::gil_scoped_release release;
+ points_generated = Gudhi::generate_points_on_torus_d<Kern>(n_samples, dim, sample);
+ }
+
+ size_t npoints = points_generated.size();
+
+ GUDHI_CHECK(2*dim == points_generated[0].size(), "Py array second dimension not matching the double torus dimension");
+
+ py::array_t<double> points({npoints, (size_t)2*dim});
+
+ py::buffer_info buf = points.request();
+ double *ptr = static_cast<double *>(buf.ptr);
+
+ for (size_t i = 0; i < npoints; i++)
+ for (int j = 0; j < 2*dim; j++)
+ ptr[i*(2*dim)+j] = points_generated[i][j];
+
+ return points;
+}
+
+PYBIND11_MODULE(_points, m) {
+ m.attr("__license__") = "LGPL v3";
+
+ m.def("sphere", &generate_points_on_sphere,
+ py::arg("n_samples"), py::arg("ambient_dim"),
+ py::arg("radius") = 1., py::arg("sample") = "random",
+ R"pbdoc(
+ Generate random i.i.d. points uniformly on a (d-1)-sphere in R^d
+
+ :param n_samples: The number of points to be generated.
+ :type n_samples: integer
+ :param ambient_dim: The ambient dimension d.
+ :type ambient_dim: integer
+ :param radius: The radius. Default value is `1.`.
+ :type radius: float
+ :param sample: The sample type. Default and only available value is `"random"`.
+ :type sample: string
+ :returns: the generated points on a sphere.
+ )pbdoc");
+
+ m.def("ctorus", &generate_points_on_torus,
+ py::arg("n_samples"), py::arg("dim"), py::arg("sample") = "random",
+ R"pbdoc(
+ Generate random i.i.d. points on a d-torus in R^2d or as a grid
+
+ :param n_samples: The number of points to be generated.
+ :type n_samples: integer
+ :param dim: The dimension of the torus on which points would be generated in R^2*dim.
+ :type dim: integer
+ :param sample: The sample type. Available values are: `"random"` and `"grid"`. Default value is `"random"`.
+ :type sample: string
+ :returns: the generated points on a torus.
+
+ The shape of returned numpy array is:
+
+ If sample is 'random': (n_samples, 2*dim).
+
+ If sample is 'grid': (⌊n_samples**(1./dim)⌋**dim, 2*dim), where shape[0] is rounded down to the closest perfect 'dim'th power.
+ )pbdoc");
+}
diff --git a/src/python/gudhi/datasets/generators/points.py b/src/python/gudhi/datasets/generators/points.py
new file mode 100644
index 00000000..9bb2799d
--- /dev/null
+++ b/src/python/gudhi/datasets/generators/points.py
@@ -0,0 +1,59 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Hind Montassif
+#
+# Copyright (C) 2021 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+
+from ._points import ctorus
+from ._points import sphere
+
+def _generate_random_points_on_torus(n_samples, dim):
+
+ # Generate random angles of size n_samples*dim
+ alpha = 2*np.pi*np.random.rand(n_samples*dim)
+
+ # Based on angles, construct points of size n_samples*dim on a circle and reshape the result in a n_samples*2*dim array
+ array_points = np.column_stack([np.cos(alpha), np.sin(alpha)]).reshape(-1, 2*dim)
+
+ return array_points
+
+def _generate_grid_points_on_torus(n_samples, dim):
+
+ # Generate points on a dim-torus as a grid
+ n_samples_grid = int((n_samples+.5)**(1./dim)) # add .5 to avoid rounding down with numerical approximations
+ alpha = np.linspace(0, 2*np.pi, n_samples_grid, endpoint=False)
+
+ array_points = np.column_stack([np.cos(alpha), np.sin(alpha)])
+ array_points_idx = np.empty([n_samples_grid]*dim + [dim], dtype=int)
+ for i, x in enumerate(np.ix_(*([np.arange(n_samples_grid)]*dim))):
+ array_points_idx[...,i] = x
+ return array_points[array_points_idx].reshape(-1, 2*dim)
+
+def torus(n_samples, dim, sample='random'):
+ """
+ Generate points on a flat dim-torus in R^2dim either randomly or on a grid
+
+ :param n_samples: The number of points to be generated.
+ :param dim: The dimension of the torus on which points would be generated in R^2*dim.
+ :param sample: The sample type of the generated points. Can be 'random' or 'grid'.
+ :returns: numpy array containing the generated points on a torus.
+
+ The shape of returned numpy array is:
+
+ If sample is 'random': (n_samples, 2*dim).
+
+ If sample is 'grid': (⌊n_samples**(1./dim)⌋**dim, 2*dim), where shape[0] is rounded down to the closest perfect 'dim'th power.
+ """
+ if sample == 'random':
+ # Generate points randomly
+ return _generate_random_points_on_torus(n_samples, dim)
+ elif sample == 'grid':
+ # Generate points on a grid
+ return _generate_grid_points_on_torus(n_samples, dim)
+ else:
+ raise ValueError("Sample type '{}' is not supported".format(sample))
diff --git a/src/python/gudhi/periodic_cubical_complex.pyx b/src/python/gudhi/periodic_cubical_complex.pyx
index d353d2af..6c21e902 100644
--- a/src/python/gudhi/periodic_cubical_complex.pyx
+++ b/src/python/gudhi/periodic_cubical_complex.pyx
@@ -32,7 +32,7 @@ cdef extern from "Cubical_complex_interface.h" namespace "Gudhi":
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Periodic_cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Cubical_complex::Cubical_complex_interface<Gudhi::cubical_complex::Bitmap_cubical_complex_periodic_boundary_conditions_base<double>>>":
Periodic_cubical_complex_persistence_interface(Periodic_cubical_complex_base_interface * st, bool persistence_dim_max) nogil
- void compute_persistence(int homology_coeff_field, double min_persistence) nogil
+ void compute_persistence(int homology_coeff_field, double min_persistence) nogil except +
vector[pair[int, pair[double, double]]] get_persistence() nogil
vector[vector[int]] cofaces_of_cubical_persistence_pairs() nogil
vector[int] betti_numbers() nogil
@@ -148,7 +148,7 @@ cdef class PeriodicCubicalComplex:
:func:`persistence` returns.
:param homology_coeff_field: The homology coefficient field. Must be a
- prime number
+ prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int.
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
@@ -170,7 +170,7 @@ cdef class PeriodicCubicalComplex:
"""This function computes and returns the persistence of the complex.
:param homology_coeff_field: The homology coefficient field. Must be a
- prime number
+ prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int.
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
@@ -280,4 +280,8 @@ cdef class PeriodicCubicalComplex:
launched first.
"""
assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()"
- return np.array(self.pcohptr.intervals_in_dimension(dimension))
+ piid = np.array(self.pcohptr.intervals_in_dimension(dimension))
+ # Workaround https://github.com/GUDHI/gudhi-devel/issues/507
+ if len(piid) == 0:
+ return np.empty(shape = [0, 2])
+ return piid
diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py
index 994be3b6..de5844f9 100644
--- a/src/python/gudhi/point_cloud/knn.py
+++ b/src/python/gudhi/point_cloud/knn.py
@@ -8,6 +8,7 @@
# - YYYY/MM Author: Description of the modification
import numpy
+import warnings
# TODO: https://github.com/facebookresearch/faiss
@@ -111,7 +112,7 @@ class KNearestNeighbors:
nargs = {
k: v for k, v in self.params.items() if k in {"p", "n_jobs", "metric_params", "algorithm", "leaf_size"}
}
- self.nn = NearestNeighbors(self.k, metric=self.metric, **nargs)
+ self.nn = NearestNeighbors(n_neighbors=self.k, metric=self.metric, **nargs)
self.nn.fit(X)
if self.params["implementation"] == "hnsw":
@@ -257,6 +258,9 @@ class KNearestNeighbors:
if ef is not None:
self.graph.set_ef(ef)
neighbors, distances = self.graph.knn_query(X, k, num_threads=self.params["num_threads"])
+ with warnings.catch_warnings():
+ if not(numpy.all(numpy.isfinite(distances))):
+ warnings.warn("Overflow/infinite value encountered while computing 'distances'", RuntimeWarning)
# The k nearest neighbors are always sorted. I couldn't find it in the doc, but the code calls searchKnn,
# which returns a priority_queue, and then fills the return array backwards with top/pop on the queue.
if self.return_index:
@@ -290,6 +294,9 @@ class KNearestNeighbors:
if self.return_index:
if self.return_distance:
distances, neighbors = mat.Kmin_argKmin(k, dim=1)
+ with warnings.catch_warnings():
+ if not(torch.isfinite(distances).all()):
+ warnings.warn("Overflow/infinite value encountered while computing 'distances'", RuntimeWarning)
if p != numpy.inf:
distances = distances ** (1.0 / p)
return neighbors, distances
@@ -298,6 +305,9 @@ class KNearestNeighbors:
return neighbors
if self.return_distance:
distances = mat.Kmin(k, dim=1)
+ with warnings.catch_warnings():
+ if not(torch.isfinite(distances).all()):
+ warnings.warn("Overflow/infinite value encountered while computing 'distances'", RuntimeWarning)
if p != numpy.inf:
distances = distances ** (1.0 / p)
return distances
diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py
index 84bc99a2..e883b5dd 100644
--- a/src/python/gudhi/representations/vector_methods.py
+++ b/src/python/gudhi/representations/vector_methods.py
@@ -6,6 +6,7 @@
#
# Modification(s):
# - 2020/06 Martin: ATOL integration
+# - 2021/11 Vincent Rouvreau: factorize _automatic_sample_range
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
@@ -45,10 +46,14 @@ class PersistenceImage(BaseEstimator, TransformerMixin):
y (n x 1 array): persistence diagram labels (unused).
"""
if np.isnan(np.array(self.im_range)).any():
- new_X = BirthPersistenceTransform().fit_transform(X)
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(new_X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.im_range = np.where(np.isnan(np.array(self.im_range)), np.array([mx, Mx, my, My]), np.array(self.im_range))
+ try:
+ new_X = BirthPersistenceTransform().fit_transform(X)
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(new_X,y)
+ [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ self.im_range = np.where(np.isnan(np.array(self.im_range)), np.array([mx, Mx, my, My]), np.array(self.im_range))
+ except ValueError:
+ # Empty persistence diagram case - https://github.com/GUDHI/gudhi-devel/issues/507
+ pass
return self
def transform(self, X):
@@ -94,6 +99,28 @@ class PersistenceImage(BaseEstimator, TransformerMixin):
"""
return self.fit_transform([diag])[0,:]
+def _automatic_sample_range(sample_range, X, y):
+ """
+ Compute and returns sample range from the persistence diagrams if one of the sample_range values is numpy.nan.
+
+ Parameters:
+ sample_range (a numpy array of 2 float): minimum and maximum of all piecewise-linear function domains, of
+ the form [x_min, x_max].
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ nan_in_range = np.isnan(sample_range)
+ if nan_in_range.any():
+ try:
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
+ [mx,my] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]]
+ [Mx,My] = [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ return np.where(nan_in_range, np.array([mx, My]), sample_range)
+ except ValueError:
+ # Empty persistence diagram case - https://github.com/GUDHI/gudhi-devel/issues/507
+ pass
+ return sample_range
+
class Landscape(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence landscapes from a list of persistence diagrams. A persistence landscape is a collection of 1D piecewise-linear functions computed from the rank function associated to the persistence diagram. These piecewise-linear functions are then sampled evenly on a given range and the corresponding vectors of samples are concatenated and returned. See http://jmlr.org/papers/v16/bubenik15a.html for more details.
@@ -119,10 +146,7 @@ class Landscape(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- if self.nan_in_range.any():
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.sample_range = np.where(self.nan_in_range, np.array([mx, My]), np.array(self.sample_range))
+ self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
return self
def transform(self, X):
@@ -218,10 +242,7 @@ class Silhouette(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- if np.isnan(np.array(self.sample_range)).any():
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
return self
def transform(self, X):
@@ -307,10 +328,7 @@ class BettiCurve(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- if np.isnan(np.array(self.sample_range)).any():
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
return self
def transform(self, X):
@@ -374,10 +392,7 @@ class Entropy(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- if np.isnan(np.array(self.sample_range)).any():
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
return self
def transform(self, X):
@@ -396,9 +411,13 @@ class Entropy(BaseEstimator, TransformerMixin):
new_X = BirthPersistenceTransform().fit_transform(X)
for i in range(num_diag):
-
orig_diagram, diagram, num_pts_in_diag = X[i], new_X[i], X[i].shape[0]
- new_diagram = DiagramScaler(use=True, scalers=[([1], MaxAbsScaler())]).fit_transform([diagram])[0]
+ try:
+ new_diagram = DiagramScaler(use=True, scalers=[([1], MaxAbsScaler())]).fit_transform([diagram])[0]
+ except ValueError:
+ # Empty persistence diagram case - https://github.com/GUDHI/gudhi-devel/issues/507
+ assert len(diagram) == 0
+ new_diagram = np.empty(shape = [0, 2])
if self.mode == "scalar":
ent = - np.sum( np.multiply(new_diagram[:,1], np.log(new_diagram[:,1])) )
@@ -412,12 +431,11 @@ class Entropy(BaseEstimator, TransformerMixin):
max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
for k in range(min_idx, max_idx):
ent[k] += (-1) * new_diagram[j,1] * np.log(new_diagram[j,1])
- if self.normalized:
- ent = ent / np.linalg.norm(ent, ord=1)
- Xfit.append(np.reshape(ent,[1,-1]))
-
- Xfit = np.concatenate(Xfit, 0)
+ if self.normalized:
+ ent = ent / np.linalg.norm(ent, ord=1)
+ Xfit.append(np.reshape(ent,[1,-1]))
+ Xfit = np.concatenate(Xfit, axis=0)
return Xfit
def __call__(self, diag):
@@ -478,7 +496,13 @@ class TopologicalVector(BaseEstimator, TransformerMixin):
diagram, num_pts_in_diag = X[i], X[i].shape[0]
pers = 0.5 * (diagram[:,1]-diagram[:,0])
min_pers = np.minimum(pers,np.transpose(pers))
- distances = DistanceMetric.get_metric("chebyshev").pairwise(diagram)
+ # Works fine with sklearn 1.0, but an ValueError exception is thrown on past versions
+ try:
+ distances = DistanceMetric.get_metric("chebyshev").pairwise(diagram)
+ except ValueError:
+ # Empty persistence diagram case - https://github.com/GUDHI/gudhi-devel/issues/507
+ assert len(diagram) == 0
+ distances = np.empty(shape = [0, 0])
vect = np.flip(np.sort(np.triu(np.minimum(distances, min_pers)), axis=None), 0)
dim = min(len(vect), thresh)
Xfit[i, :dim] = vect[:dim]
diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd
index 000323af..006a24ed 100644
--- a/src/python/gudhi/simplex_tree.pxd
+++ b/src/python/gudhi/simplex_tree.pxd
@@ -44,7 +44,7 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
cdef cppclass Simplex_tree_interface_full_featured "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>":
- Simplex_tree() nogil
+ Simplex_tree_interface_full_featured() nogil
double simplex_filtration(vector[int] simplex) nogil
void assign_simplex_filtration(vector[int] simplex, double filtration) nogil
void initialize_filtration() nogil
@@ -78,7 +78,7 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Simplex_tree<Gudhi::Simplex_tree_options_full_featured>>":
Simplex_tree_persistence_interface(Simplex_tree_interface_full_featured * st, bool persistence_dim_max) nogil
- void compute_persistence(int homology_coeff_field, double min_persistence) nogil
+ void compute_persistence(int homology_coeff_field, double min_persistence) nogil except +
vector[pair[int, pair[double, double]]] get_persistence() nogil
vector[int] betti_numbers() nogil
vector[int] persistent_betti_numbers(double from_value, double to_value) nogil
diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx
index d7991417..c3720936 100644
--- a/src/python/gudhi/simplex_tree.pyx
+++ b/src/python/gudhi/simplex_tree.pyx
@@ -9,9 +9,8 @@
from cython.operator import dereference, preincrement
from libc.stdint cimport intptr_t
-import numpy
-from numpy import array as np_array
-cimport simplex_tree
+import numpy as np
+cimport gudhi.simplex_tree
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
@@ -412,7 +411,7 @@ cdef class SimplexTree:
"""This function retrieves good values for extended persistence, and separate the diagrams into the Ordinary,
Relative, Extended+ and Extended- subdiagrams.
- :param homology_coeff_field: The homology coefficient field. Must be a prime number. Default value is 11.
+ :param homology_coeff_field: The homology coefficient field. Must be a prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int
:param min_persistence: The minimum persistence value (i.e., the absolute value of the difference between the
persistence diagram point coordinates) to take into account (strictly greater than min_persistence).
@@ -449,7 +448,7 @@ cdef class SimplexTree:
"""This function computes and returns the persistence of the simplicial complex.
:param homology_coeff_field: The homology coefficient field. Must be a
- prime number. Default value is 11.
+ prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
@@ -472,7 +471,7 @@ cdef class SimplexTree:
when you do not want the list :func:`persistence` returns.
:param homology_coeff_field: The homology coefficient field. Must be a
- prime number. Default value is 11.
+ prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
@@ -542,7 +541,11 @@ cdef class SimplexTree:
function to be launched first.
"""
assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()"
- return np_array(self.pcohptr.intervals_in_dimension(dimension))
+ piid = np.array(self.pcohptr.intervals_in_dimension(dimension))
+ # Workaround https://github.com/GUDHI/gudhi-devel/issues/507
+ if len(piid) == 0:
+ return np.empty(shape = [0, 2])
+ return piid
def persistence_pairs(self):
"""This function returns a list of persistence birth and death simplices pairs.
@@ -583,8 +586,8 @@ cdef class SimplexTree:
"""
assert self.pcohptr != NULL, "lower_star_persistence_generators() requires that persistence() be called first."
gen = self.pcohptr.lower_star_generators()
- normal = [np_array(d).reshape(-1,2) for d in gen.first]
- infinite = [np_array(d) for d in gen.second]
+ normal = [np.array(d).reshape(-1,2) for d in gen.first]
+ infinite = [np.array(d) for d in gen.second]
return (normal, infinite)
def flag_persistence_generators(self):
@@ -602,19 +605,19 @@ cdef class SimplexTree:
assert self.pcohptr != NULL, "flag_persistence_generators() requires that persistence() be called first."
gen = self.pcohptr.flag_generators()
if len(gen.first) == 0:
- normal0 = numpy.empty((0,3))
+ normal0 = np.empty((0,3))
normals = []
else:
l = iter(gen.first)
- normal0 = np_array(next(l)).reshape(-1,3)
- normals = [np_array(d).reshape(-1,4) for d in l]
+ normal0 = np.array(next(l)).reshape(-1,3)
+ normals = [np.array(d).reshape(-1,4) for d in l]
if len(gen.second) == 0:
- infinite0 = numpy.empty(0)
+ infinite0 = np.empty(0)
infinites = []
else:
l = iter(gen.second)
- infinite0 = np_array(next(l))
- infinites = [np_array(d).reshape(-1,2) for d in l]
+ infinite0 = np.array(next(l))
+ infinites = [np.array(d).reshape(-1,2) for d in l]
return (normal0, normals, infinite0, infinites)
def collapse_edges(self, nb_iterations = 1):
diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py
index a9d1cdff..dc18806e 100644
--- a/src/python/gudhi/wasserstein/wasserstein.py
+++ b/src/python/gudhi/wasserstein/wasserstein.py
@@ -9,6 +9,7 @@
import numpy as np
import scipy.spatial.distance as sc
+import warnings
try:
import ot
@@ -70,6 +71,7 @@ def _perstot_autodiff(X, order, internal_p):
'''
return _dist_to_diag(X, internal_p).norms.lp(order)
+
def _perstot(X, order, internal_p, enable_autodiff):
'''
:param X: (n x 2) numpy.array (points of a given diagram).
@@ -79,6 +81,9 @@ def _perstot(X, order, internal_p, enable_autodiff):
transparent to automatic differentiation.
:type enable_autodiff: bool
:returns: float, the total persistence of the diagram (that is, its distance to the empty diagram).
+
+ .. note::
+ Can be +inf if the diagram has an essential part (points with infinite coordinates).
'''
if enable_autodiff:
import eagerpy as ep
@@ -88,32 +93,163 @@ def _perstot(X, order, internal_p, enable_autodiff):
return np.linalg.norm(_dist_to_diag(X, internal_p), ord=order)
-def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enable_autodiff=False):
+def _get_essential_parts(a):
'''
- :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points
- (i.e. with infinite coordinate).
- :param Y: (m x 2) numpy.array encoding the second diagram.
- :param matching: if True, computes and returns the optimal matching between X and Y, encoded as
- a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to
- the j-th point in Y, with the convention (-1) represents the diagonal.
- :param order: exponent for Wasserstein; Default value is 1.
- :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2);
- Default value is `np.inf`.
- :param enable_autodiff: If X and Y are torch.tensor or tensorflow.Tensor, make the computation
+ :param a: (n x 2) numpy.array (point of a diagram)
+ :returns: five lists of indices (between 0 and len(a)) accounting for the five types of points with infinite
+ coordinates that can occur in a diagram, namely:
+ type0 : (-inf, finite)
+ type1 : (finite, +inf)
+ type2 : (-inf, +inf)
+ type3 : (-inf, -inf)
+ type4 : (+inf, +inf)
+ .. note::
+ For instance, a[_get_essential_parts(a)[0]] returns the points in a of coordinates (-inf, x) for some finite x.
+ Note also that points with (+inf, -inf) are not handled (points (x,y) in dgm satisfy by assumption (y >= x)).
+
+ Finally, we consider that points with coordinates (-inf,-inf) and (+inf, +inf) belong to the diagonal.
+ '''
+ if len(a):
+ first_coord_finite = np.isfinite(a[:,0])
+ second_coord_finite = np.isfinite(a[:,1])
+ first_coord_infinite_positive = (a[:,0] == np.inf)
+ second_coord_infinite_positive = (a[:,1] == np.inf)
+ first_coord_infinite_negative = (a[:,0] == -np.inf)
+ second_coord_infinite_negative = (a[:,1] == -np.inf)
+
+ ess_first_type = np.where(second_coord_finite & first_coord_infinite_negative)[0] # coord (-inf, x)
+ ess_second_type = np.where(first_coord_finite & second_coord_infinite_positive)[0] # coord (x, +inf)
+ ess_third_type = np.where(first_coord_infinite_negative & second_coord_infinite_positive)[0] # coord (-inf, +inf)
+
+ ess_fourth_type = np.where(first_coord_infinite_negative & second_coord_infinite_negative)[0] # coord (-inf, -inf)
+ ess_fifth_type = np.where(first_coord_infinite_positive & second_coord_infinite_positive)[0] # coord (+inf, +inf)
+ return ess_first_type, ess_second_type, ess_third_type, ess_fourth_type, ess_fifth_type
+ else:
+ return [], [], [], [], []
+
+
+def _cost_and_match_essential_parts(X, Y, idX, idY, order, axis):
+ '''
+ :param X: (n x 2) numpy.array (dgm points)
+ :param Y: (n x 2) numpy.array (dgm points)
+ :param idX: indices to consider for this one dimensional OT problem (in X)
+ :param idY: indices to consider for this one dimensional OT problem (in Y)
+ :param order: exponent for Wasserstein distance computation
+ :param axis: must be 0 or 1, correspond to the coordinate which is finite.
+ :returns: cost (float) and match for points with *one* infinite coordinate.
+
+ .. note::
+ Assume idX, idY come when calling _handle_essential_parts, thus have same length.
+ '''
+ u = X[idX, axis]
+ v = Y[idY, axis]
+
+ cost = np.sum(np.abs(np.sort(u) - np.sort(v))**(order)) # OT cost in 1D
+
+ sortidX = idX[np.argsort(u)]
+ sortidY = idY[np.argsort(v)]
+ # We return [i,j] sorted per value
+ match = list(zip(sortidX, sortidY))
+
+ return cost, match
+
+
+def _handle_essential_parts(X, Y, order):
+ '''
+ :param X: (n x 2) numpy array, first diagram.
+ :param Y: (n x 2) numpy array, second diagram.
+ :order: Wasserstein order for cost computation.
+ :returns: cost and matching due to essential parts. If cost is +inf, matching will be set to None.
+ '''
+ ess_parts_X = _get_essential_parts(X)
+ ess_parts_Y = _get_essential_parts(Y)
+
+ # Treats the case of infinite cost (cardinalities of essential parts differ).
+ for u, v in list(zip(ess_parts_X, ess_parts_Y))[:3]: # ignore types 4 and 5 as they belong to the diagonal
+ if len(u) != len(v):
+ return np.inf, None
+
+ # Now we know each essential part has the same number of points in both diagrams.
+ # Handle type 0 and type 1 essential parts (those with one finite coordinates)
+ c1, m1 = _cost_and_match_essential_parts(X, Y, ess_parts_X[0], ess_parts_Y[0], axis=1, order=order)
+ c2, m2 = _cost_and_match_essential_parts(X, Y, ess_parts_X[1], ess_parts_Y[1], axis=0, order=order)
+
+ c = c1 + c2
+ m = m1 + m2
+
+ # Handle type3 (coordinates (-inf,+inf), so we just align points)
+ m += list(zip(ess_parts_X[2], ess_parts_Y[2]))
+
+ # Handle type 4 and 5, considered as belonging to the diagonal so matched to (-1) with cost 0.
+ for z in ess_parts_X[3:]:
+ m += [(u, -1) for u in z] # points in X are matched to -1
+ for z in ess_parts_Y[3:]:
+ m += [(-1, v) for v in z] # -1 is match to points in Y
+
+ return c, np.array(m)
+
+
+def _finite_part(X):
+ '''
+ :param X: (n x 2) numpy array encoding a persistence diagram.
+ :returns: The finite part of a diagram `X` (points with finite coordinates).
+ '''
+ return X[np.where(np.isfinite(X[:,0]) & np.isfinite(X[:,1]))]
+
+
+def _warn_infty(matching):
+ '''
+ Handle essential parts with different cardinalities. Warn the user about cost being infinite and (if
+ `matching=True`) about the returned matching being `None`.
+ '''
+ if matching:
+ warnings.warn('Cardinality of essential parts differs. Distance (cost) is +inf, and the returned matching is None.')
+ return np.inf, None
+ else:
+ warnings.warn('Cardinality of essential parts differs. Distance (cost) is +inf.')
+ return np.inf
+
+
+def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enable_autodiff=False,
+ keep_essential_parts=True):
+ '''
+ Compute the Wasserstein distance between persistence diagram using Python Optimal Transport backend.
+ Diagrams can contain points with infinity coordinates (essential parts).
+ Points with (-inf,-inf) and (+inf,+inf) coordinates are considered as belonging to the diagonal.
+ If the distance between two diagrams is +inf (which happens if the cardinalities of essential
+ parts differ) and optimal matching is required, it will be set to ``None``.
+
+ :param X: The first diagram.
+ :type X: n x 2 numpy.array
+ :param Y: The second diagram.
+ :type Y: m x 2 numpy.array
+ :param matching: if ``True``, computes and returns the optimal matching between X and Y, encoded as
+ a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to
+ the j-th point in Y, with the convention that (-1) represents the diagonal.
+ :param order: Wasserstein exponent q (1 <= q < infinity).
+ :type order: float
+ :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2).
+ :type internal_p: float
+ :param enable_autodiff: If X and Y are ``torch.tensor`` or ``tensorflow.Tensor``, make the computation
transparent to automatic differentiation. This requires the package EagerPy and is currently incompatible
- with `matching=True`.
+ with ``matching=True`` and with ``keep_essential_parts=True``.
- .. note:: This considers the function defined on the coordinates of the off-diagonal points of X and Y
+ .. note:: This considers the function defined on the coordinates of the off-diagonal finite points of X and Y
and lets the various frameworks compute its gradient. It never pulls new points from the diagonal.
:type enable_autodiff: bool
- :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with
+ :param keep_essential_parts: If ``False``, only considers the finite points in the diagrams.
+ Otherwise, include essential parts in cost and matching computation.
+ :type keep_essential_parts: bool
+ :returns: The Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with
respect to the internal_p-norm as ground metric.
If matching is set to True, also returns the optimal matching between X and Y.
+ If cost is +inf, any matching is optimal and thus it returns `None` instead.
'''
+
+ # First step: handle empty diagrams
n = len(X)
m = len(Y)
- # handle empty diagrams
if n == 0:
if m == 0:
if not matching:
@@ -122,16 +258,45 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab
else:
return 0., np.array([])
else:
- if not matching:
- return _perstot(Y, order, internal_p, enable_autodiff)
+ cost = _perstot(Y, order, internal_p, enable_autodiff)
+ if cost == np.inf:
+ return _warn_infty(matching)
else:
- return _perstot(Y, order, internal_p, enable_autodiff), np.array([[-1, j] for j in range(m)])
+ if not matching:
+ return cost
+ else:
+ return cost, np.array([[-1, j] for j in range(m)])
elif m == 0:
- if not matching:
- return _perstot(X, order, internal_p, enable_autodiff)
+ cost = _perstot(X, order, internal_p, enable_autodiff)
+ if cost == np.inf:
+ return _warn_infty(matching)
else:
- return _perstot(X, order, internal_p, enable_autodiff), np.array([[i, -1] for i in range(n)])
+ if not matching:
+ return cost
+ else:
+ return cost, np.array([[i, -1] for i in range(n)])
+
+ # Check essential part and enable autodiff together
+ if enable_autodiff and keep_essential_parts:
+ warnings.warn('''enable_autodiff=True and keep_essential_parts=True are incompatible together.
+ keep_essential_parts is set to False: only points with finite coordinates are considered
+ in the following.
+ ''')
+ keep_essential_parts = False
+
+ # Second step: handle essential parts if needed.
+ if keep_essential_parts:
+ essential_cost, essential_matching = _handle_essential_parts(X, Y, order=order)
+ if (essential_cost == np.inf):
+ return _warn_infty(matching) # Tells the user that cost is infty and matching (if True) is None.
+ # avoid computing transport cost between the finite parts if essential parts
+ # cardinalities do not match (saves time)
+ else:
+ essential_cost = 0
+ essential_matching = None
+
+ # Now the standard pipeline for finite parts
if enable_autodiff:
import eagerpy as ep
@@ -139,6 +304,12 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab
Y_orig = ep.astensor(Y)
X = X_orig.numpy()
Y = Y_orig.numpy()
+
+ # Extract finite points of the diagrams.
+ X, Y = _finite_part(X), _finite_part(Y)
+ n = len(X)
+ m = len(Y)
+
M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p)
a = np.ones(n+1) # weight vector of the input diagram. Uniform here.
a[-1] = m
@@ -154,7 +325,10 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab
# Now we turn to -1 points encoding the diagonal
match[:,0][match[:,0] >= n] = -1
match[:,1][match[:,1] >= m] = -1
- return ot_cost ** (1./order) , match
+ # Finally incorporate the essential part matching
+ if essential_matching is not None:
+ match = np.concatenate([match, essential_matching]) if essential_matching.size else match
+ return (ot_cost + essential_cost) ** (1./order) , match
if enable_autodiff:
P = ot.emd(a=a, b=b, M=M, numItermax=2000000)
@@ -173,9 +347,9 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab
return ep.concatenate(dists).norms.lp(order).raw
# We can also concatenate the 3 vectors to compute just one norm.
- # Comptuation of the otcost using the ot.emd2 library.
+ # Comptuation of the ot cost using the ot.emd2 library.
# Note: it is the Wasserstein distance to the power q.
# The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value?
ot_cost = ot.emd2(a, b, M, numItermax=2000000)
- return ot_cost ** (1./order)
+ return (ot_cost + essential_cost) ** (1./order)
diff --git a/src/python/pyproject.toml b/src/python/pyproject.toml
new file mode 100644
index 00000000..a9fb4985
--- /dev/null
+++ b/src/python/pyproject.toml
@@ -0,0 +1,3 @@
+[build-system]
+requires = ["setuptools", "wheel", "numpy>=1.15.0", "cython", "pybind11"]
+build-backend = "setuptools.build_meta"
diff --git a/src/python/setup.py.in b/src/python/setup.py.in
index 98d058fc..23746998 100644
--- a/src/python/setup.py.in
+++ b/src/python/setup.py.in
@@ -41,10 +41,9 @@ for module in cython_modules:
libraries=libraries,
library_dirs=library_dirs,
include_dirs=include_dirs,
- runtime_library_dirs=runtime_library_dirs,
- cython_directives = {'language_level': str(sys.version_info[0])},))
+ runtime_library_dirs=runtime_library_dirs,))
-ext_modules = cythonize(ext_modules)
+ext_modules = cythonize(ext_modules, compiler_directives={'language_level': str(sys.version_info[0])})
for module in pybind11_modules:
my_include_dirs = include_dirs + [pybind11.get_include(False), pybind11.get_include(True)]
@@ -72,7 +71,7 @@ setup(
name = 'gudhi',
packages=find_packages(), # find_namespace_packages(include=["gudhi*"])
author='GUDHI Editorial Board',
- author_email='gudhi-contact@lists.gforge.inria.fr',
+ author_email='gudhi-contact@inria.fr',
version='@GUDHI_VERSION@',
url='https://gudhi.inria.fr/',
project_urls={
@@ -83,10 +82,10 @@ setup(
},
description='The Gudhi library is an open source library for ' \
'Computational Topology and Topological Data Analysis (TDA).',
+ data_files=[('.', ['./introduction.rst'])],
long_description_content_type='text/x-rst',
long_description=long_description,
ext_modules = ext_modules,
- install_requires = ['numpy >= 1.9',],
- setup_requires = ['cython','numpy >= 1.9','pybind11',],
+ install_requires = ['numpy >= 1.15.0',],
package_data={"": ["*.dll"], },
)
diff --git a/src/python/test/test_cubical_complex.py b/src/python/test/test_cubical_complex.py
index d0e4e9e8..29d559b3 100755
--- a/src/python/test/test_cubical_complex.py
+++ b/src/python/test/test_cubical_complex.py
@@ -174,3 +174,28 @@ def test_periodic_cofaces_of_persistence_pairs_when_pd_has_no_paired_birth_and_d
assert np.array_equal(pairs[1][0], np.array([0]))
assert np.array_equal(pairs[1][1], np.array([0, 1]))
assert np.array_equal(pairs[1][2], np.array([1]))
+
+def test_cubical_persistence_intervals_in_dimension():
+ cub = CubicalComplex(
+ dimensions=[3, 3],
+ top_dimensional_cells=[1, 2, 3, 4, 5, 6, 7, 8, 9],
+ )
+ cub.compute_persistence()
+ H0 = cub.persistence_intervals_in_dimension(0)
+ assert np.array_equal(H0, np.array([[ 1., float("inf")]]))
+ assert cub.persistence_intervals_in_dimension(1).shape == (0, 2)
+
+def test_periodic_cubical_persistence_intervals_in_dimension():
+ cub = PeriodicCubicalComplex(
+ dimensions=[3, 3],
+ top_dimensional_cells=[1, 2, 3, 4, 5, 6, 7, 8, 9],
+ periodic_dimensions = [True, True]
+ )
+ cub.compute_persistence()
+ H0 = cub.persistence_intervals_in_dimension(0)
+ assert np.array_equal(H0, np.array([[ 1., float("inf")]]))
+ H1 = cub.persistence_intervals_in_dimension(1)
+ assert np.array_equal(H1, np.array([[ 3., float("inf")], [ 7., float("inf")]]))
+ H2 = cub.persistence_intervals_in_dimension(2)
+ assert np.array_equal(H2, np.array([[ 9., float("inf")]]))
+ assert cub.persistence_intervals_in_dimension(3).shape == (0, 2)
diff --git a/src/python/test/test_datasets_generators.py b/src/python/test/test_datasets_generators.py
new file mode 100755
index 00000000..91ec4a65
--- /dev/null
+++ b/src/python/test/test_datasets_generators.py
@@ -0,0 +1,39 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Hind Montassif
+
+ Copyright (C) 2021 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.datasets.generators import points
+
+import pytest
+
+def test_sphere():
+ assert points.sphere(n_samples = 10, ambient_dim = 2, radius = 1., sample = 'random').shape == (10, 2)
+
+ with pytest.raises(ValueError):
+ points.sphere(n_samples = 10, ambient_dim = 2, radius = 1., sample = 'other')
+
+def _basic_torus(impl):
+ assert impl(n_samples = 64, dim = 3, sample = 'random').shape == (64, 6)
+ assert impl(n_samples = 64, dim = 3, sample = 'grid').shape == (64, 6)
+
+ assert impl(n_samples = 10, dim = 4, sample = 'random').shape == (10, 8)
+
+ # Here 1**dim < n_samples < 2**dim, the output shape is therefore (1, 2*dim) = (1, 8), where shape[0] is rounded down to the closest perfect 'dim'th power
+ assert impl(n_samples = 10, dim = 4, sample = 'grid').shape == (1, 8)
+
+ with pytest.raises(ValueError):
+ impl(n_samples = 10, dim = 4, sample = 'other')
+
+def test_torus():
+ for torus_impl in [points.torus, points.ctorus]:
+ _basic_torus(torus_impl)
+ # Check that the two versions (torus and ctorus) generate the same output
+ assert points.ctorus(n_samples = 64, dim = 3, sample = 'random').all() == points.torus(n_samples = 64, dim = 3, sample = 'random').all()
+ assert points.ctorus(n_samples = 64, dim = 3, sample = 'grid').all() == points.torus(n_samples = 64, dim = 3, sample = 'grid').all()
+ assert points.ctorus(n_samples = 10, dim = 3, sample = 'grid').all() == points.torus(n_samples = 10, dim = 3, sample = 'grid').all()
diff --git a/src/python/test/test_dtm.py b/src/python/test/test_dtm.py
index 0a52279e..e46d616c 100755
--- a/src/python/test/test_dtm.py
+++ b/src/python/test/test_dtm.py
@@ -13,6 +13,7 @@ import numpy
import pytest
import torch
import math
+import warnings
def test_dtm_compare_euclidean():
@@ -87,3 +88,14 @@ def test_density():
assert density == pytest.approx(expected)
density = DTMDensity(weights=[0.5, 0.5], metric="neighbors", dim=1).fit_transform(distances)
assert density == pytest.approx(expected)
+
+def test_dtm_overflow_warnings():
+ pts = numpy.array([[10., 100000000000000000000000000000.], [1000., 100000000000000000000000000.]])
+
+ with warnings.catch_warnings(record=True) as w:
+ # TODO Test "keops" implementation as well when next version of pykeops (current is 1.5) is released (should fix the problem (cf. issue #543))
+ dtm = DistanceToMeasure(2, implementation="hnsw")
+ r = dtm.fit_transform(pts)
+ assert len(w) == 1
+ assert issubclass(w[0].category, RuntimeWarning)
+ assert "Overflow" in str(w[0].message)
diff --git a/src/python/test/test_reader_utils.py b/src/python/test/test_reader_utils.py
index 90da6651..e96e0569 100755
--- a/src/python/test/test_reader_utils.py
+++ b/src/python/test/test_reader_utils.py
@@ -30,7 +30,7 @@ def test_full_square_distance_matrix_csv_file():
test_file.write("0;1;2;3;\n1;0;4;5;\n2;4;0;6;\n3;5;6;0;")
test_file.close()
matrix = gudhi.read_lower_triangular_matrix_from_csv_file(
- csv_file="full_square_distance_matrix.csv"
+ csv_file="full_square_distance_matrix.csv", separator=";"
)
assert matrix == [[], [1.0], [2.0, 4.0], [3.0, 5.0, 6.0]]
diff --git a/src/python/test/test_representations.py b/src/python/test/test_representations.py
index cda1a15b..93461f1e 100755
--- a/src/python/test/test_representations.py
+++ b/src/python/test/test_representations.py
@@ -3,9 +3,23 @@ import sys
import matplotlib.pyplot as plt
import numpy as np
import pytest
+import random
from sklearn.cluster import KMeans
+# Vectorization
+from gudhi.representations import (Landscape, Silhouette, BettiCurve, ComplexPolynomial,\
+ TopologicalVector, PersistenceImage, Entropy)
+
+# Preprocessing
+from gudhi.representations import (BirthPersistenceTransform, Clamping, DiagramScaler, Padding, ProminentPoints, \
+ DiagramSelector)
+
+# Kernel
+from gudhi.representations import (PersistenceWeightedGaussianKernel, \
+ PersistenceScaleSpaceKernel, SlicedWassersteinDistance,\
+ SlicedWassersteinKernel, PersistenceFisherKernel, WassersteinDistance)
+
def test_representations_examples():
# Disable graphics for testing purposes
@@ -98,3 +112,60 @@ def test_infinity():
assert c[1] == 0
assert c[7] == 3
assert c[9] == 2
+
+
+def test_preprocessing_empty_diagrams():
+ empty_diag = np.empty(shape = [0, 2])
+ assert not np.any(BirthPersistenceTransform()(empty_diag))
+ assert not np.any(Clamping().fit_transform(empty_diag))
+ assert not np.any(DiagramScaler()(empty_diag))
+ assert not np.any(Padding()(empty_diag))
+ assert not np.any(ProminentPoints()(empty_diag))
+ assert not np.any(DiagramSelector()(empty_diag))
+
+def pow(n):
+ return lambda x: np.power(x[1]-x[0],n)
+
+def test_vectorization_empty_diagrams():
+ empty_diag = np.empty(shape = [0, 2])
+ random_resolution = random.randint(50,100)*10 # between 500 and 1000
+ print("resolution = ", random_resolution)
+ lsc = Landscape(resolution=random_resolution)(empty_diag)
+ assert not np.any(lsc)
+ assert lsc.shape[0]%random_resolution == 0
+ slt = Silhouette(resolution=random_resolution, weight=pow(2))(empty_diag)
+ assert not np.any(slt)
+ assert slt.shape[0] == random_resolution
+ btc = BettiCurve(resolution=random_resolution)(empty_diag)
+ assert not np.any(btc)
+ assert btc.shape[0] == random_resolution
+ cpp = ComplexPolynomial(threshold=random_resolution, polynomial_type="T")(empty_diag)
+ assert not np.any(cpp)
+ assert cpp.shape[0] == random_resolution
+ tpv = TopologicalVector(threshold=random_resolution)(empty_diag)
+ assert tpv.shape[0] == random_resolution
+ assert not np.any(tpv)
+ prmg = PersistenceImage(resolution=[random_resolution,random_resolution])(empty_diag)
+ assert not np.any(prmg)
+ assert prmg.shape[0] == random_resolution * random_resolution
+ sce = Entropy(mode="scalar", resolution=random_resolution)(empty_diag)
+ assert not np.any(sce)
+ assert sce.shape[0] == 1
+ scv = Entropy(mode="vector", normalized=False, resolution=random_resolution)(empty_diag)
+ assert not np.any(scv)
+ assert scv.shape[0] == random_resolution
+
+def test_kernel_empty_diagrams():
+ empty_diag = np.empty(shape = [0, 2])
+ assert SlicedWassersteinDistance(num_directions=100)(empty_diag, empty_diag) == 0.
+ assert SlicedWassersteinKernel(num_directions=100, bandwidth=1.)(empty_diag, empty_diag) == 1.
+ assert WassersteinDistance(mode="hera", delta=0.0001)(empty_diag, empty_diag) == 0.
+ assert WassersteinDistance(mode="pot")(empty_diag, empty_diag) == 0.
+ assert BottleneckDistance(epsilon=.001)(empty_diag, empty_diag) == 0.
+ assert BottleneckDistance()(empty_diag, empty_diag) == 0.
+# PersistenceWeightedGaussianKernel(bandwidth=1., kernel_approx=None, weight=arctan(1.,1.))(empty_diag, empty_diag)
+# PersistenceWeightedGaussianKernel(kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])), weight=arctan(1.,1.))(empty_diag, empty_diag)
+# PersistenceScaleSpaceKernel(bandwidth=1.)(empty_diag, empty_diag)
+# PersistenceScaleSpaceKernel(kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])))(empty_diag, empty_diag)
+# PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1.)(empty_diag, empty_diag)
+# PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1., kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])))(empty_diag, empty_diag)
diff --git a/src/python/test/test_rips_complex.py b/src/python/test/test_rips_complex.py
index b86e7498..a2f43a1b 100755
--- a/src/python/test/test_rips_complex.py
+++ b/src/python/test/test_rips_complex.py
@@ -133,3 +133,24 @@ def test_filtered_rips_from_distance_matrix():
assert simplex_tree.num_simplices() == 8
assert simplex_tree.num_vertices() == 4
+
+
+def test_sparse_with_multiplicity():
+ points = [
+ [3, 4],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [0.1, 2],
+ [3, 4.1],
+ ]
+ rips = RipsComplex(points=points, sparse=0.01)
+ simplex_tree = rips.create_simplex_tree(max_dimension=2)
+ assert simplex_tree.num_simplices() == 7
+ diag = simplex_tree.persistence()
diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py
index a3eacaa9..31c46213 100755
--- a/src/python/test/test_simplex_tree.py
+++ b/src/python/test/test_simplex_tree.py
@@ -9,6 +9,7 @@
"""
from gudhi import SimplexTree, __GUDHI_USE_EIGEN3
+import numpy as np
import pytest
__author__ = "Vincent Rouvreau"
@@ -404,3 +405,46 @@ def test_boundaries_iterator():
with pytest.raises(RuntimeError):
list(st.get_boundaries([6])) # (6) does not exist
+
+def test_persistence_intervals_in_dimension():
+ # Here is our triangulation of a 2-torus - taken from https://dioscuri-tda.org/Paris_TDA_Tutorial_2021.html
+ # 0-----3-----4-----0
+ # | \ | \ | \ | \ |
+ # | \ | \ | \| \ |
+ # 1-----8-----7-----1
+ # | \ | \ | \ | \ |
+ # | \ | \ | \ | \ |
+ # 2-----5-----6-----2
+ # | \ | \ | \ | \ |
+ # | \ | \ | \ | \ |
+ # 0-----3-----4-----0
+ st = SimplexTree()
+ st.insert([0,1,8])
+ st.insert([0,3,8])
+ st.insert([3,7,8])
+ st.insert([3,4,7])
+ st.insert([1,4,7])
+ st.insert([0,1,4])
+ st.insert([1,2,5])
+ st.insert([1,5,8])
+ st.insert([5,6,8])
+ st.insert([6,7,8])
+ st.insert([2,6,7])
+ st.insert([1,2,7])
+ st.insert([0,2,3])
+ st.insert([2,3,5])
+ st.insert([3,4,5])
+ st.insert([4,5,6])
+ st.insert([0,4,6])
+ st.insert([0,2,6])
+ st.compute_persistence(persistence_dim_max=True)
+
+ H0 = st.persistence_intervals_in_dimension(0)
+ assert np.array_equal(H0, np.array([[ 0., float("inf")]]))
+ H1 = st.persistence_intervals_in_dimension(1)
+ assert np.array_equal(H1, np.array([[ 0., float("inf")], [ 0., float("inf")]]))
+ H2 = st.persistence_intervals_in_dimension(2)
+ assert np.array_equal(H2, np.array([[ 0., float("inf")]]))
+ # Test empty case
+ assert st.persistence_intervals_in_dimension(3).shape == (0, 2)
+ \ No newline at end of file
diff --git a/src/python/test/test_tomato.py b/src/python/test/test_tomato.py
index ecab03c4..c571f799 100755
--- a/src/python/test/test_tomato.py
+++ b/src/python/test/test_tomato.py
@@ -37,7 +37,7 @@ def test_tomato_1():
t = Tomato(metric="euclidean", graph_type="radius", r=4.7, k=4)
t.fit(a)
assert t.max_weight_per_cc_.size == 2
- assert np.array_equal(t.neighbors_, [[0, 1, 2], [0, 1, 2], [0, 1, 2], [3, 4, 5, 6], [3, 4, 5], [3, 4, 5], [3, 6]])
+ assert t.neighbors_ == [[0, 1, 2], [0, 1, 2], [0, 1, 2], [3, 4, 5, 6], [3, 4, 5], [3, 4, 5], [3, 6]]
t.plot_diagram()
t = Tomato(graph_type="radius", r=4.7, k=4, symmetrize_graph=True)
diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py
index e3b521d6..3a004d77 100755
--- a/src/python/test/test_wasserstein_distance.py
+++ b/src/python/test/test_wasserstein_distance.py
@@ -5,25 +5,97 @@
Copyright (C) 2019 Inria
Modification(s):
+ - 2020/07 Théo Lacombe: Added tests about handling essential parts in diagrams.
- YYYY/MM Author: Description of the modification
"""
-from gudhi.wasserstein.wasserstein import _proj_on_diag
+from gudhi.wasserstein.wasserstein import _proj_on_diag, _finite_part, _handle_essential_parts, _get_essential_parts
+from gudhi.wasserstein.wasserstein import _warn_infty
from gudhi.wasserstein import wasserstein_distance as pot
from gudhi.hera import wasserstein_distance as hera
import numpy as np
import pytest
+
__author__ = "Theo Lacombe"
__copyright__ = "Copyright (C) 2019 Inria"
__license__ = "MIT"
+
def test_proj_on_diag():
dgm = np.array([[1., 1.], [1., 2.], [3., 5.]])
assert np.array_equal(_proj_on_diag(dgm), [[1., 1.], [1.5, 1.5], [4., 4.]])
empty = np.empty((0, 2))
assert np.array_equal(_proj_on_diag(empty), empty)
+
+def test_finite_part():
+ diag = np.array([[0, 1], [3, 5], [2, np.inf], [3, np.inf], [-np.inf, 8], [-np.inf, 12], [-np.inf, -np.inf],
+ [np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]])
+ assert np.array_equal(_finite_part(diag), [[0, 1], [3, 5]])
+
+
+def test_handle_essential_parts():
+ diag1 = np.array([[0, 1], [3, 5],
+ [2, np.inf], [3, np.inf],
+ [-np.inf, 8], [-np.inf, 12],
+ [-np.inf, -np.inf],
+ [np.inf, np.inf],
+ [-np.inf, np.inf], [-np.inf, np.inf]])
+
+ diag2 = np.array([[0, 2], [3, 5],
+ [2, np.inf], [4, np.inf],
+ [-np.inf, 8], [-np.inf, 11],
+ [-np.inf, -np.inf],
+ [np.inf, np.inf],
+ [-np.inf, np.inf], [-np.inf, np.inf]])
+
+ diag3 = np.array([[0, 2], [3, 5],
+ [2, np.inf], [4, np.inf], [6, np.inf],
+ [-np.inf, 8], [-np.inf, 11],
+ [-np.inf, -np.inf],
+ [np.inf, np.inf],
+ [-np.inf, np.inf], [-np.inf, np.inf]])
+
+ c, m = _handle_essential_parts(diag1, diag2, order=1)
+ assert c == pytest.approx(2, 0.0001) # Note: here c is only the cost due to essential part (thus 2, not 3)
+ # Similarly, the matching only corresponds to essential parts.
+ # Note that (-inf,-inf) and (+inf,+inf) coordinates are matched to the diagonal.
+ assert np.array_equal(m, [[4, 4], [5, 5], [2, 2], [3, 3], [8, 8], [9, 9], [6, -1], [7, -1], [-1, 6], [-1, 7]])
+
+ c, m = _handle_essential_parts(diag1, diag3, order=1)
+ assert c == np.inf
+ assert (m is None)
+
+
+def test_get_essential_parts():
+ diag1 = np.array([[0, 1], [3, 5], [2, np.inf], [3, np.inf], [-np.inf, 8], [-np.inf, 12], [-np.inf, -np.inf],
+ [np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]])
+
+ diag2 = np.array([[0, 1], [3, 5], [2, np.inf], [3, np.inf]])
+
+ res = _get_essential_parts(diag1)
+ res2 = _get_essential_parts(diag2)
+ assert np.array_equal(res[0], [4, 5])
+ assert np.array_equal(res[1], [2, 3])
+ assert np.array_equal(res[2], [8, 9])
+ assert np.array_equal(res[3], [6] )
+ assert np.array_equal(res[4], [7] )
+
+ assert np.array_equal(res2[0], [] )
+ assert np.array_equal(res2[1], [2, 3])
+ assert np.array_equal(res2[2], [] )
+ assert np.array_equal(res2[3], [] )
+ assert np.array_equal(res2[4], [] )
+
+
+def test_warn_infty():
+ assert _warn_infty(matching=False)==np.inf
+ c, m = _warn_infty(matching=True)
+ assert (c == np.inf)
+ assert (m is None)
+
+
def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_matching=True):
diag1 = np.array([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]])
diag2 = np.array([[2.8, 4.45], [9.5, 14.1]])
@@ -64,7 +136,7 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_mat
assert wasserstein_distance(diag4, diag5) == np.inf
assert wasserstein_distance(diag5, diag6, order=1, internal_p=np.inf) == approx(4.)
-
+ assert wasserstein_distance(diag5, emptydiag) == np.inf
if test_matching:
match = wasserstein_distance(emptydiag, emptydiag, matching=True, internal_p=1., order=2)[1]
@@ -78,6 +150,31 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_mat
match = wasserstein_distance(diag1, diag2, matching=True, internal_p=2., order=2.)[1]
assert np.array_equal(match, [[0, 0], [1, 1], [2, -1]])
+ if test_matching and test_infinity:
+ diag7 = np.array([[0, 3], [4, np.inf], [5, np.inf]])
+ diag8 = np.array([[0,1], [0, np.inf], [-np.inf, -np.inf], [np.inf, np.inf]])
+ diag9 = np.array([[-np.inf, -np.inf], [np.inf, np.inf]])
+ diag10 = np.array([[0,1], [-np.inf, -np.inf], [np.inf, np.inf]])
+
+ match = wasserstein_distance(diag5, diag6, matching=True, internal_p=2., order=2.)[1]
+ assert np.array_equal(match, [[0, -1], [-1,0], [-1, 1], [1, 2]])
+ match = wasserstein_distance(diag5, diag7, matching=True, internal_p=2., order=2.)[1]
+ assert (match is None)
+ cost, match = wasserstein_distance(diag7, emptydiag, matching=True, internal_p=2., order=2.3)
+ assert (cost == np.inf)
+ assert (match is None)
+ cost, match = wasserstein_distance(emptydiag, diag7, matching=True, internal_p=2.42, order=2.)
+ assert (cost == np.inf)
+ assert (match is None)
+ cost, match = wasserstein_distance(diag8, diag9, matching=True, internal_p=2., order=2.)
+ assert (cost == np.inf)
+ assert (match is None)
+ cost, match = wasserstein_distance(diag9, diag10, matching=True, internal_p=1., order=1.)
+ assert (cost == 1)
+ assert (match == [[0, -1],[1, -1],[-1, 0], [-1, 1], [-1, 2]]) # type 4 and 5 are match to the diag anyway.
+ cost, match = wasserstein_distance(diag9, emptydiag, matching=True, internal_p=2., order=2.)
+ assert (cost == 0.)
+ assert (match == [[0, -1], [1, -1]])
def hera_wrap(**extra):
@@ -85,15 +182,19 @@ def hera_wrap(**extra):
return hera(*kargs,**kwargs,**extra)
return fun
+
def pot_wrap(**extra):
def fun(*kargs,**kwargs):
return pot(*kargs,**kwargs,**extra)
return fun
+
def test_wasserstein_distance_pot():
- _basic_wasserstein(pot, 1e-15, test_infinity=False, test_matching=True)
- _basic_wasserstein(pot_wrap(enable_autodiff=True), 1e-15, test_infinity=False, test_matching=False)
+ _basic_wasserstein(pot, 1e-15, test_infinity=False, test_matching=True) # pot with its standard args
+ _basic_wasserstein(pot_wrap(enable_autodiff=True, keep_essential_parts=False), 1e-15, test_infinity=False, test_matching=False)
+
def test_wasserstein_distance_hera():
_basic_wasserstein(hera_wrap(delta=1e-12), 1e-12, test_matching=False)
_basic_wasserstein(hera_wrap(delta=.1), .1, test_matching=False)
+