summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHind-M <hind.montassif@gmail.com>2022-05-24 16:55:02 +0200
committerHind-M <hind.montassif@gmail.com>2022-05-24 16:55:02 +0200
commit050081c554416a90f2b4aee359f15c020af66303 (patch)
tree2b27233f55e4568ad010526e73be9b3b67246297
parenta6a68c11455a554619d8a5b5d2f92c1ddbf45e99 (diff)
parente415d500fe8739414803b8cc127484562079d27e (diff)
Merge remote-tracking branch 'upstream/master' into cech_optimization
-rw-r--r--.github/next_release.md7
-rw-r--r--.github/workflows/pip-build-windows.yml8
-rw-r--r--.github/workflows/pip-packaging-windows.yml8
-rw-r--r--azure-pipelines.yml19
-rw-r--r--biblio/bibliography.bib49
-rw-r--r--biblio/how_to_cite_gudhi.bib.in6
-rw-r--r--src/Alpha_complex/doc/Intro_alpha_complex.h6
-rw-r--r--src/Alpha_complex/include/gudhi/Alpha_complex_3d.h5
-rw-r--r--src/Alpha_complex/utilities/alphacomplex.md4
-rw-r--r--src/Collapse/doc/intro_edge_collapse.h72
-rw-r--r--src/Collapse/include/gudhi/Flag_complex_edge_collapser.h579
-rw-r--r--src/Collapse/test/collapse_unit_test.cpp16
-rw-r--r--src/Contraction/include/gudhi/Edge_contraction.h2
-rw-r--r--src/Doxyfile.in80
-rw-r--r--src/Nerve_GIC/doc/Intro_graph_induced_complex.h2
-rw-r--r--src/Persistent_cohomology/doc/Intro_persistent_cohomology.h21
-rw-r--r--src/Rips_complex/doc/Intro_rips_complex.h5
-rw-r--r--src/Simplex_tree/doc/Intro_simplex_tree.h12
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree.h29
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h115
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_siblings.h1
-rw-r--r--src/Simplex_tree/test/CMakeLists.txt6
-rw-r--r--src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp192
-rw-r--r--src/Simplex_tree/test/simplex_tree_unit_test.cpp47
-rw-r--r--src/Skeleton_blocker/concept/SkeletonBlockerDS.h2
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h2
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h2
-rwxr-xr-x[-rw-r--r--]src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h2
-rw-r--r--src/Subsampling/include/gudhi/sparsify_point_set.h5
-rw-r--r--src/Tangential_complex/benchmark/benchmark_tc.cpp1
-rw-r--r--src/Tangential_complex/example/example_basic.cpp4
-rw-r--r--src/Tangential_complex/example/example_with_perturb.cpp1
-rw-r--r--src/Tangential_complex/test/test_tangential_complex.cpp1
-rw-r--r--src/cmake/modules/GUDHI_third_party_libraries.cmake18
-rw-r--r--src/common/doc/examples.h2
-rw-r--r--src/common/doc/installation.h249
-rw-r--r--src/common/doc/main_page.md7
-rw-r--r--src/common/include/gudhi/reader_utils.h4
-rw-r--r--src/python/CMakeLists.txt27
-rw-r--r--src/python/doc/alpha_complex_user.rst7
-rw-r--r--src/python/doc/installation.rst99
-rw-r--r--src/python/doc/nerve_gic_complex_user.rst2
-rw-r--r--src/python/doc/persistence_graphical_tools_user.rst2
-rw-r--r--src/python/gudhi/__init__.py.in4
-rw-r--r--src/python/gudhi/alpha_complex.pyx29
-rw-r--r--src/python/gudhi/persistence_graphical_tools.py349
-rw-r--r--src/python/gudhi/simplex_tree.pxd5
-rw-r--r--src/python/gudhi/simplex_tree.pyx86
-rw-r--r--src/python/gudhi/weighted_rips_complex.py6
-rw-r--r--src/python/include/Alpha_complex_interface.h10
-rw-r--r--src/python/include/Simplex_tree_interface.h16
-rw-r--r--src/python/pyproject.toml2
-rw-r--r--src/python/setup.py.in4
-rwxr-xr-xsrc/python/test/test_alpha_complex.py27
-rw-r--r--src/python/test/test_persistence_graphical_tools.py121
-rwxr-xr-xsrc/python/test/test_simplex_tree.py131
56 files changed, 1495 insertions, 1023 deletions
diff --git a/.github/next_release.md b/.github/next_release.md
index 65a92816..f8085513 100644
--- a/.github/next_release.md
+++ b/.github/next_release.md
@@ -13,6 +13,13 @@ Below is a list of changes made since GUDHI 3.5.0:
- [Representations](https://gudhi.inria.fr/python/latest/representations.html#gudhi.representations.vector_methods.BettiCurve)
- A more flexible Betti curve class capable of computing exact curves
+- [Simplex tree](https://gudhi.inria.fr/python/latest/simplex_tree_ref.html)
+ - `__deepcopy__`, `copy` and copy constructors
+
+- Installation
+ - Boost &ge; 1.66.0 is now required (was &ge; 1.56.0).
+ - Python >= 3.5 and cython >= 0.27 are now required.
+
- [Module](link)
- ...
diff --git a/.github/workflows/pip-build-windows.yml b/.github/workflows/pip-build-windows.yml
index 954b59d5..30b0bd94 100644
--- a/.github/workflows/pip-build-windows.yml
+++ b/.github/workflows/pip-build-windows.yml
@@ -30,14 +30,14 @@ jobs:
run: |
mkdir build
cd ".\build\"
- cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=c:\vcpkg\scripts\buildsystems\vcpkg.cmake -DVCPKG_TARGET_TRIPLET=x64-windows ..
+ cmake -DCMAKE_BUILD_TYPE=Release -DFORCE_EIGEN_DEFAULT_DENSE_INDEX_TYPE_TO_INT=ON -DCMAKE_TOOLCHAIN_FILE=c:\vcpkg\scripts\buildsystems\vcpkg.cmake -DVCPKG_TARGET_TRIPLET=x64-windows ..
Get-Location
dir
cd ".\src\python\"
- cp "C:\vcpkg\installed\x64-windows\bin\mpfr-6.dll" ".\gudhi\"
- cp "C:\vcpkg\installed\x64-windows\bin\gmp.dll" ".\gudhi\"
+ cp "C:\vcpkg\installed\x64-windows\bin\mpfr*.dll" ".\gudhi\"
+ cp "C:\vcpkg\installed\x64-windows\bin\gmp*.dll" ".\gudhi\"
python setup.py bdist_wheel
- ls dist
+ ls ".\dist\"
cd ".\dist\"
Get-ChildItem *.whl | ForEach-Object{python -m pip install --user $_.Name}
- name: Test python wheel
diff --git a/.github/workflows/pip-packaging-windows.yml b/.github/workflows/pip-packaging-windows.yml
index 962ae68a..142a114c 100644
--- a/.github/workflows/pip-packaging-windows.yml
+++ b/.github/workflows/pip-packaging-windows.yml
@@ -33,14 +33,14 @@ jobs:
run: |
mkdir build
cd ".\build\"
- cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=c:\vcpkg\scripts\buildsystems\vcpkg.cmake -DVCPKG_TARGET_TRIPLET=x64-windows ..
+ cmake -DCMAKE_BUILD_TYPE=Release -DFORCE_EIGEN_DEFAULT_DENSE_INDEX_TYPE_TO_INT=ON -DCMAKE_TOOLCHAIN_FILE=c:\vcpkg\scripts\buildsystems\vcpkg.cmake -DVCPKG_TARGET_TRIPLET=x64-windows ..
Get-Location
dir
cd ".\src\python\"
- cp "C:\vcpkg\installed\x64-windows\bin\mpfr-6.dll" ".\gudhi\"
- cp "C:\vcpkg\installed\x64-windows\bin\gmp.dll" ".\gudhi\"
+ cp "C:\vcpkg\installed\x64-windows\bin\mpfr*.dll" ".\gudhi\"
+ cp "C:\vcpkg\installed\x64-windows\bin\gmp*.dll" ".\gudhi\"
python setup.py bdist_wheel
- ls dist
+ ls ".\dist\"
cd ".\dist\"
Get-ChildItem *.whl | ForEach-Object{python -m pip install --user $_.Name}
- name: Test python wheel
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 21664244..31264c37 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -5,7 +5,7 @@ jobs:
timeoutInMinutes: 0
cancelTimeoutInMinutes: 60
pool:
- vmImage: macOS-10.15
+ vmImage: macOS-latest
variables:
pythonVersion: '3.7'
cmakeBuildType: Release
@@ -30,7 +30,7 @@ jobs:
- bash: |
mkdir build
cd build
- cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON ..
+ cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON ..
make
make doxygen
ctest --output-on-failure
@@ -64,20 +64,27 @@ jobs:
vcpkg install boost-filesystem:x64-windows boost-test:x64-windows boost-program-options:x64-windows tbb:x64-windows eigen3:x64-windows cgal:x64-windows
displayName: 'Install build dependencies'
- script: |
- call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64
+ call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64
+ IF %errorlevel% NEQ 0 exit /b %errorlevel%
mkdir build
cd build
- cmake -G "Visual Studio 16 2019" -A x64 -DCMAKE_BUILD_TYPE=Release $(cmakeVcpkgFlags) $(cmakeFlags) ..
+ cmake -G "Visual Studio 17 2022" -A x64 -DCMAKE_BUILD_TYPE=Release -DFORCE_EIGEN_DEFAULT_DENSE_INDEX_TYPE_TO_INT=ON $(cmakeVcpkgFlags) $(cmakeFlags) ..
+ IF %errorlevel% NEQ 0 exit /b %errorlevel%
MSBuild GUDHIdev.sln /m /p:Configuration=Release /p:Platform=x64
+ IF %errorlevel% NEQ 0 exit /b %errorlevel%
ctest --output-on-failure -C Release -E diff_files
+ IF %errorlevel% NEQ 0 exit /b %errorlevel%
cmake -DWITH_GUDHI_PYTHON=ON .
+ IF %errorlevel% NEQ 0 exit /b %errorlevel%
cd src\python
- copy "C:\vcpkg\installed\x64-windows\bin\mpfr-6.dll" ".\gudhi\"
- copy "C:\vcpkg\installed\x64-windows\bin\gmp.dll" ".\gudhi\"
+ copy "C:\vcpkg\installed\x64-windows\bin\mpfr*.dll" ".\gudhi\"
+ copy "C:\vcpkg\installed\x64-windows\bin\gmp*.dll" ".\gudhi\"
copy "C:\vcpkg\installed\x64-windows\bin\tbb.dll" ".\gudhi\"
copy "C:\vcpkg\installed\x64-windows\bin\tbbmalloc.dll" ".\gudhi\"
python setup.py build_ext --inplace
+ IF %errorlevel% NEQ 0 exit /b %errorlevel%
SET PYTHONPATH=%CD%;%PYTHONPATH%
echo %PYTHONPATH%
ctest --output-on-failure -C Release
+ IF %errorlevel% NEQ 0 exit /b %errorlevel%
displayName: 'Build and test'
diff --git a/biblio/bibliography.bib b/biblio/bibliography.bib
index b5afff52..8462e731 100644
--- a/biblio/bibliography.bib
+++ b/biblio/bibliography.bib
@@ -14,7 +14,7 @@ publisher = {JMLR.org},
title = {{Statistical analysis and parameter selection for Mapper}},
volume = {19},
year = {2018},
-url = {http://jmlr.org/papers/v19/17-291.html},
+url = {https://jmlr.org/papers/v19/17-291.html},
}
@inproceedings{Dey13,
@@ -151,10 +151,10 @@ language={English},
%% hal-00922572, version 2
-%% http://hal.inria.fr/hal-00922572
+%% https://hal.inria.fr/hal-00922572
@techreport{boissonnat:hal-00922572,
hal_id = {hal-00922572},
- url = {http://hal.inria.fr/hal-00922572},
+ url = {https://hal.inria.fr/hal-00922572},
title = {Computing Persistent Homology with Various Coefficient Fields in a Single Pass},
author = {Boissonnat, Jean-Daniel and Maria, Cl{\'e}ment},
abstract = {{In this article, we introduce the multi-field persistence diagram for the persistence homology of a filtered complex. It encodes compactly the superimposition of the persistence diagrams of the complex with several field coefficients, and provides a substantially more precise description of the topology of the filtered complex. Specifically, the multi-field persistence diagram encodes the Betti numbers of integral homology and the prime divisors of the torsion coefficients of the underlying shape. Moreover, it enjoys similar stability properties as the ones of standard persistence diagrams, with the appropriate notion of distance. These properties make the multi-field persistence diagram a useful tool in computational topology.}},
@@ -167,7 +167,7 @@ language={English},
number = {RR-8436},
year = {2013},
month = Dec,
- pdf = {http://hal.inria.fr/hal-00922572/PDF/RR-8436.pdf},
+ pdf = {https://hal.inria.fr/hal-00922572v5/document},
}
@@ -323,7 +323,7 @@ language={English},
%------------------------------------------------------------------
@article{rips2012,
hal_id = {hal-00785072},
- url = {http://hal.archives-ouvertes.fr/hal-00785072},
+ url = {https://hal.archives-ouvertes.fr/hal-00785072},
title = {{Vietoris-Rips Complexes also Provide Topologically Correct Reconstructions of Sampled Shapes}},
author = {Attali, Dominique and Lieutier, Andr{\'e} and Salinas, David},
keywords = {Shape reconstruction \sep Rips complexes \sep clique complexes \sep \v Cech complexes ; homotopy equivalence ; collapses ; high dimensions},
@@ -1115,7 +1115,7 @@ language={English}
author = {Nicholas J. Cavanna and Mahmoodreza Jahanseir and Donald R. Sheehy},
booktitle = {Proceedings of the Canadian Conference on Computational Geometry},
title = {A Geometric Perspective on Sparse Filtrations},
- url = {http://research.cs.queensu.ca/cccg2015/CCCG15-papers/01.pdf},
+ url = {https://research.cs.queensu.ca/cccg2015/CCCG15-papers/01.pdf},
year = {2015}
}
@@ -1151,7 +1151,7 @@ language={English}
editor = {Lars Arge and J{\'a}nos Pach},
publisher = {Schloss Dagstuhl--Leibniz-Zentrum fuer Informatik},
address = {Dagstuhl, Germany},
- URL = {http://drops.dagstuhl.de/opus/volltexte/2015/5098},
+ URL = {https://drops.dagstuhl.de/opus/volltexte/2015/5098},
URN = {urn:nbn:de:0030-drops-50981},
doi = {10.4230/LIPIcs.SOCG.2015.642},
annote = {Keywords: Simplicial complex, Compact data structures, Automaton, NP-hard}
@@ -1164,7 +1164,7 @@ language={English}
journal = {CoRR},
volume = {abs/1607.08449},
year = {2016},
- url = {http://arxiv.org/abs/1607.08449},
+ url = {https://arxiv.org/abs/1607.08449},
archivePrefix = {arXiv},
eprint = {1607.08449},
timestamp = {Mon, 13 Aug 2018 16:46:26 +0200},
@@ -1280,7 +1280,7 @@ year = "2011"
publisher={Springer}
}
-@inproceedings{dtmfiltrations,
+@inproceedings{dtmfiltrationsconf,
author = {Hirokazu Anai and
Fr{\'{e}}d{\'{e}}ric Chazal and
Marc Glisse and
@@ -1305,6 +1305,29 @@ year = "2011"
bibsource = {dblp computer science bibliography, https://dblp.org}
}
+@InProceedings{dtmfiltrations,
+author="Anai, Hirokazu
+and Chazal, Fr{\'e}d{\'e}ric
+and Glisse, Marc
+and Ike, Yuichi
+and Inakoshi, Hiroya
+and Tinarrage, Rapha{\"e}l
+and Umeda, Yuhei",
+editor="Baas, Nils A.
+and Carlsson, Gunnar E.
+and Quick, Gereon
+and Szymik, Markus
+and Thaule, Marius",
+title="DTM-Based Filtrations",
+booktitle="Topological Data Analysis",
+year="2020",
+publisher="Springer International Publishing",
+address="Cham",
+pages="33--66",
+isbn="978-3-030-43408-3",
+doi="10.1007/978-3-030-43408-3_2",
+}
+
@InProceedings{edgecollapsesocg2020,
author = {Jean-Daniel Boissonnat and Siddharth Pritam},
title = {{Edge Collapse and Persistence of Flag Complexes}},
@@ -1324,6 +1347,12 @@ year = "2011"
annote = {Keywords: Computational Topology, Topological Data Analysis, Edge Collapse, Simple Collapse, Persistent homology}
}
+@misc{edgecollapsearxiv,
+ author = {Marc Glisse and Siddharth Pritam},
+ title = {{Swap, Shift and Trim to Edge Collapse a Filtration}},
+ url = {https://arxiv.org/abs/2203.07022},
+}
+
@phdthesis{KachanovichThesis,
TITLE = {{Meshing submanifolds using Coxeter triangulations}},
AUTHOR = {Kachanovich, Siargey},
@@ -1337,4 +1366,4 @@ year = "2011"
PDF = {https://hal.inria.fr/tel-02419148v2/file/2019AZUR4072.pdf},
HAL_ID = {tel-02419148},
HAL_VERSION = {v2},
-} \ No newline at end of file
+}
diff --git a/biblio/how_to_cite_gudhi.bib.in b/biblio/how_to_cite_gudhi.bib.in
index 54d10857..579dbf41 100644
--- a/biblio/how_to_cite_gudhi.bib.in
+++ b/biblio/how_to_cite_gudhi.bib.in
@@ -78,7 +78,7 @@
}
@incollection{gudhi:SubSampling
-, author = "Cl\'ement Jamin, Siargey Kachanovich"
+, author = "Cl\'ement Jamin and Siargey Kachanovich"
, title = "Subsampling"
, publisher = "{GUDHI Editorial Board}"
, edition = "{@GUDHI_VERSION@}"
@@ -108,7 +108,7 @@
}
@incollection{gudhi:RipsComplex
-, author = "Cl\'ement Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse"
+, author = "Cl\'ement Maria and Pawel Dlotko and Vincent Rouvreau and Marc Glisse"
, title = "Rips complex"
, publisher = "{GUDHI Editorial Board}"
, edition = "{@GUDHI_VERSION@}"
@@ -158,7 +158,7 @@
}
@incollection{gudhi:Collapse
-, author = "Siddharth Pritam"
+, author = "Siddharth Pritam and Marc Glisse"
, title = "Edge collapse"
, publisher = "{GUDHI Editorial Board}"
, edition = "{@GUDHI_VERSION@}"
diff --git a/src/Alpha_complex/doc/Intro_alpha_complex.h b/src/Alpha_complex/doc/Intro_alpha_complex.h
index 5ab23720..7f14f423 100644
--- a/src/Alpha_complex/doc/Intro_alpha_complex.h
+++ b/src/Alpha_complex/doc/Intro_alpha_complex.h
@@ -164,11 +164,11 @@ Table of Contents
* <b>Requires:</b> \ref eigen &ge; 3.1.0 and \ref cgal &ge; 5.1.0.
*
* A weighted version for Alpha complex is available (cf. Alpha_complex). It is like a usual Alpha complex, but based
- * on a <a href="https://doc.cgal.org/latest/Triangulation/index.html#title20">CGAL regular triangulation</a> instead
+ * on a <a href="https://doc.cgal.org/latest/Triangulation/index.html#TriangulationSecRT">CGAL regular triangulation</a> instead
* of Delaunay.
*
* This example builds the CGAL weighted alpha shapes from a small molecule, and initializes the alpha complex with
- * it. This example is taken from <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#title13">CGAL 3d
+ * it. This example is taken from <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#AlphaShape_3DExampleforWeightedAlphaShapes">CGAL 3d
* weighted alpha shapes</a>.
*
* Then, it is asked to display information about the alpha complex.
@@ -212,7 +212,7 @@ Table of Contents
* Gudhi::alpha_complex::complexity::EXACT.
*
* This example builds the CGAL 3d weighted alpha shapes from a small molecule, and initializes the alpha complex with
- * it. This example is taken from <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#title13">CGAL 3d
+ * it. This example is taken from <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#AlphaShape_3DExampleforWeightedAlphaShapes">CGAL 3d
* weighted alpha shapes</a>.
*
* Then, it is asked to display information about the alpha complex.
diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
index ccc3d852..562ef139 100644
--- a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
+++ b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
@@ -12,7 +12,6 @@
#ifndef ALPHA_COMPLEX_3D_H_
#define ALPHA_COMPLEX_3D_H_
-#include <boost/version.hpp>
#include <boost/variant.hpp>
#include <boost/range/size.hpp>
#include <boost/range/combine.hpp>
@@ -58,8 +57,6 @@ namespace Gudhi {
namespace alpha_complex {
-thread_local double RELATIVE_PRECISION_OF_TO_DOUBLE = 0.00001;
-
// Value_from_iterator returns the filtration value from an iterator on alpha shapes values
//
// FAST SAFE EXACT
@@ -101,7 +98,7 @@ struct Value_from_iterator<complexity::EXACT> {
* \tparam Periodic Boolean used to set/unset the periodic version of Alpha_complex_3d. Default value is false.
*
* For the weighted version, weights values are explained on CGAL
- * <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#title0">Alpha shapes 3d</a> and
+ * <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html#Alpha_shapes_3Definitions">Alpha shapes 3d</a> and
* <a href="https://doc.cgal.org/latest/Triangulation_3/index.html#Triangulation3secclassRegulartriangulation">Regular
* triangulation</a> documentation.
*
diff --git a/src/Alpha_complex/utilities/alphacomplex.md b/src/Alpha_complex/utilities/alphacomplex.md
index 0d3c6027..1e3b8fab 100644
--- a/src/Alpha_complex/utilities/alphacomplex.md
+++ b/src/Alpha_complex/utilities/alphacomplex.md
@@ -64,7 +64,7 @@ N.B.:
* Weights values are explained on CGAL
[dD Triangulations](https://doc.cgal.org/latest/Triangulation/index.html)
and
-[Regular triangulation](https://doc.cgal.org/latest/Triangulation/index.html#title20) documentation.
+[Regular triangulation](https://doc.cgal.org/latest/Triangulation/index.html#TriangulationSecRT) documentation.
## alpha_complex_3d_persistence ##
@@ -131,6 +131,6 @@ N.B.:
* `alpha_complex_3d_persistence` only accepts OFF files in dimension 3.
* Filtration values are alpha square values.
* Weights values are explained on CGAL
-[Alpha shape](https://doc.cgal.org/latest/Alpha_shapes_3/index.html#title0)
+[Alpha shape](https://doc.cgal.org/latest/Alpha_shapes_3/index.html#Alpha_shapes_3Definitions)
and
[Regular triangulation](https://doc.cgal.org/latest/Triangulation_3/index.html#Triangulation3secclassRegulartriangulation) documentation.
diff --git a/src/Collapse/doc/intro_edge_collapse.h b/src/Collapse/doc/intro_edge_collapse.h
index fde39707..12e909c8 100644
--- a/src/Collapse/doc/intro_edge_collapse.h
+++ b/src/Collapse/doc/intro_edge_collapse.h
@@ -17,68 +17,48 @@ namespace collapse {
/** \defgroup edge_collapse Edge collapse
*
- * \author Siddharth Pritam
+ * \author Siddharth Pritam and Marc Glisse
*
* @{
*
- * This module implements edge collapse of a filtered flag complex, in particular it reduces a filtration of
- * Vietoris-Rips complex from its graph to another smaller flag filtration with same persistence.
- * Where a filtration is a sequence of simplicial (here Rips) complexes connected with inclusions.
+ * This module implements edge collapse of a filtered flag complex as described in \cite edgecollapsearxiv, in
+ * particular it reduces a filtration of Vietoris-Rips complex represented by a graph to a smaller flag filtration with
+ * the same persistent homology.
*
* \section edge_collapse_definition Edge collapse definition
*
* An edge \f$e\f$ in a simplicial complex \f$K\f$ is called a <b>dominated edge</b> if the link of \f$e\f$ in
* \f$K\f$, \f$lk_K(e)\f$ is a simplicial cone, that is, there exists a vertex \f$v^{\prime} \notin e\f$ and a
- * subcomplex \f$L\f$ in \f$K\f$, such that \f$lk_K(e) = v^{\prime}L\f$. We say that the vertex \f$v^{\prime}\f$ is
- * {dominating} \f$e\f$ and \f$e\f$ is {dominated} by \f$v^{\prime}\f$.
- * An <b> elementary egde collapse </b> is the removal of a dominated edge \f$e\f$ from \f$K\f$,
- * which we denote with \f$K\f$ \f${\searrow\searrow}^1 \f$ \f$K\setminus e\f$.
- * The symbol \f$\mathbf{K\setminus e}\f$ (deletion of \f$e\f$ from \f$K\f$) refers to the subcomplex of \f$K\f$ which
- * has all simplices of \f$K\f$ except \f$e\f$ and the ones containing \f$e\f$.
- * There is an <b>edge collapse</b> from a simplicial complex \f$K\f$ to its subcomplex \f$L\f$,
- * if there exists a series of elementary edge collapses from \f$K\f$ to \f$L\f$, denoted as \f$K\f$
- * \f${\searrow\searrow}\f$ \f$L\f$.
- *
- * An edge collapse is a homotopy preserving operation, and it can be further expressed as sequence of the classical
- * elementary simple collapse.
- * A complex without any dominated edge is called a \f$1\f$- minimal complex and the core \f$K^1\f$ of simplicial
- * complex is a minimal complex such that \f$K\f$ \f${\searrow\searrow}\f$ \f$K^1\f$.
- * Computation of a core (not unique) involves computation of dominated edges and the dominated edges can be easily
- * characterized as follows:
+ * subcomplex \f$L\f$ in \f$K\f$, such that \f$lk_K(e) = v^{\prime}L\f$. We say that the vertex \f$v^{\prime}\f$
+ * \e dominates \f$e\f$ and \f$e\f$ is \e dominated by \f$v^{\prime}\f$.
+ * An <b> elementary edge collapse </b> is the removal of a dominated edge \f$e\f$ from \f$K\f$ (the cofaces of \f$e\f$
+ * are implicitly removed as well).
+ * Domination is used as a simple sufficient condition that ensures that this removal is a homotopy preserving
+ * operation.
+ *
+ * The dominated edges can be easily characterized as follows:
*
- * -- For general simplicial complex: An edge \f$e \in K\f$ is dominated by another vertex \f$v^{\prime} \in K\f$,
- * <i>if and only if</i> all the maximal simplices of \f$K\f$ that contain \f$e\f$ also contain \f$v^{\prime}\f$
+ * -- For a general simplicial complex: an edge \f$e \in K\f$ is dominated by another vertex \f$v^{\prime} \in K\f$,
+ * if and only if all the maximal simplices of \f$K\f$ that contain \f$e\f$ also contain \f$v^{\prime}\f$.
*
- * -- For a flag complex: An edge \f$e \in K\f$ is dominated by another vertex \f$v^{\prime} \in K\f$, <i>if and only
- * if</i> all the vertices in \f$K\f$ that has an edge with both vertices of \f$e\f$ also has an edge with
- * \f$v^{\prime}\f$.
+ * -- For a flag complex: an edge \f$e \in K\f$ is dominated by another vertex \f$v^{\prime} \in K\f$, if and only
+ * if all the vertices in \f$K\f$ that have an edge with both vertices of \f$e\f$ also have an edge with
+ * \f$v^{\prime}\f$. Notice that this only depends on the graph.
*
- * The algorithm to compute the smaller induced filtration is described in Section 5 \cite edgecollapsesocg2020.
- * Edge collapse can be successfully employed to reduce any given filtration of flag complexes to a smaller induced
+ * In the context of a filtration, an edge collapse may translate into an increase of the filtration value of an edge,
+ * or its removal if it already had the largest filtration value.
+ * The algorithm to compute the smaller induced filtration is described in \cite edgecollapsearxiv.
+ * Edge collapse can be successfully employed to reduce any input filtration of flag complexes to a smaller induced
* filtration which preserves the persistent homology of the original filtration and is a flag complex as well.
*
- * The general idea is that we consider edges in the filtered graph and sort them according to their filtration value
- * giving them a total order.
- * Each edge gets a unique index denoted as \f$i\f$ in this order. To reduce the filtration, we move forward with
- * increasing filtration value
- * in the graph and check if the current edge \f$e_i\f$ is dominated in the current graph \f$G_i := \{e_1, .. e_i\} \f$
- * or not.
- * If the edge \f$e_i\f$ is dominated we remove it from the filtration and move forward to the next edge \f$e_{i+1}\f$.
- * If \f$e_i\f$ is non-dominated then we keep it in the reduced filtration and then go backward in the current graph
- * \f$G_i\f$ to look for new non-dominated edges that was dominated before but might become non-dominated at this
- * point.
- * If an edge \f$e_j, j < i \f$ during the backward search is found to be non-dominated, we include \f$e_j\f$ in to the
- * reduced filtration and we set its new filtration value to be \f$i\f$ that is the index of \f$e_i\f$.
- * The precise mechanism for this reduction has been described in Section 5 \cite edgecollapsesocg2020.
- * Here we implement this mechanism for a filtration of Rips complex.
- * After perfoming the reduction the filtration reduces to a flag-filtration with the same persistence as the original
- * filtration.
- *
+ * The algorithm implemented here does not produce a minimal filtration. Taking its output and applying the algorithm a
+ * second time may further simplify the filtration.
+ *
* \subsection edgecollapseexample Basic edge collapse
*
* This example calls `Gudhi::collapse::flag_complex_collapse_edges()` from a proximity graph represented as a list of
* `Filtered_edge`.
- * Then it collapses edges and displays a new list of `Filtered_edge` (with less edges)
+ * Then it collapses edges and displays a new list of `Filtered_edge` (with fewer edges)
* that will preserve the persistence homology computation.
*
* \include edge_collapse_basic_example.cpp
@@ -88,7 +68,7 @@ namespace collapse {
* \code $> ./Edge_collapse_example_basic
* \endcode
*
- * the program output is:
+ * the program output could be:
*
* \include edge_collapse_example_basic.txt
*/
diff --git a/src/Collapse/include/gudhi/Flag_complex_edge_collapser.h b/src/Collapse/include/gudhi/Flag_complex_edge_collapser.h
index 713c6608..c823901f 100644
--- a/src/Collapse/include/gudhi/Flag_complex_edge_collapser.h
+++ b/src/Collapse/include/gudhi/Flag_complex_edge_collapser.h
@@ -1,11 +1,12 @@
/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
* See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- * Author(s): Siddharth Pritam
+ * Author(s): Siddharth Pritam, Marc Glisse
*
* Copyright (C) 2020 Inria
*
* Modification(s):
* - 2020/03 Vincent Rouvreau: integration to the gudhi library
+ * - 2021 Marc Glisse: complete rewrite
* - YYYY/MM Author: Description of the modification
*/
@@ -14,367 +15,319 @@
#include <gudhi/Debug_utils.h>
-#include <boost/functional/hash.hpp>
-#include <boost/iterator/iterator_facade.hpp>
-
-#include <Eigen/Sparse>
-#include <Eigen/src/Core/util/Macros.h> // for EIGEN_VERSION_AT_LEAST
+#include <boost/container/flat_map.hpp>
+#include <boost/container/flat_set.hpp>
#ifdef GUDHI_USE_TBB
#include <tbb/parallel_sort.h>
#endif
-#include <iostream>
-#include <utility> // for std::pair
+#include <utility>
#include <vector>
-#include <unordered_map>
-#include <unordered_set>
-#include <set>
-#include <tuple> // for std::tie
-#include <algorithm> // for std::includes
-#include <iterator> // for std::inserter
-#include <type_traits> // for std::decay
-
-// Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10
-#if !EIGEN_VERSION_AT_LEAST(3,1,0)
-# error Edge Collapse is only available for Eigen3 >= 3.1.0
-#endif
+#include <tuple>
+#include <algorithm>
+#include <limits>
namespace Gudhi {
namespace collapse {
/** \private
- *
- * \brief Flag complex sparse matrix data structure.
*
- * \details
- * This class stores a <a target="_blank" href="https://en.wikipedia.org/wiki/Clique_complex">Flag complex</a>
- * in an <a target="_blank" href="https://eigen.tuxfamily.org/dox/group__TutorialSparse.html">Eigen sparse matrix</a>.
+ * \brief Flag complex sparse matrix data structure.
*
- * \tparam Vertex type must be a signed integer type. It admits a total order <.
- * \tparam Filtration type for the value of the filtration function. Must be comparable with <.
+ * \tparam Vertex type must be an integer type.
+ * \tparam Filtration type for the value of the filtration function.
*/
-template<typename Vertex, typename Filtration>
-class Flag_complex_edge_collapser {
- public:
- /** \brief Re-define Vertex as Vertex_handle type to ease the interface with `Gudhi::Proximity_graph`. */
- using Vertex_handle = Vertex;
- /** \brief Re-define Filtration as Filtration_value type to ease the interface with `Gudhi::Proximity_graph`. */
- using Filtration_value = Filtration;
-
- private:
- // internal numbering of vertices and edges
- using IVertex = std::size_t;
- using Edge_index = std::size_t;
- using IEdge = std::pair<IVertex, IVertex>;
-
- // The sparse matrix data type
- // (Eigen::SparseMatrix<Edge_index, Eigen::RowMajor> has slow insertions)
- using Sparse_vector = Eigen::SparseVector<Edge_index>;
- using Sparse_row_matrix = std::vector<Sparse_vector>;
-
- // Range of neighbors of a vertex
- template<bool closed>
- struct Neighbours {
- class iterator : public boost::iterator_facade<iterator,
- IVertex, /* value_type */
- std::input_iterator_tag, // or boost::single_pass_traversal_tag
- IVertex /* reference */ >
- {
- public:
- iterator():ptr(nullptr){}
- iterator(Neighbours const*p):ptr(p){find_valid();}
- private:
- friend class boost::iterator_core_access;
- Neighbours const*ptr;
- void increment(){
- ++ptr->it;
- find_valid();
- }
- void find_valid(){
- auto& it = ptr->it;
- do {
- if(!it) { ptr=nullptr; break; }
- if(IVertex(it.index()) == ptr->u) {
- if(closed) break;
- else continue;
- }
- Edge_index e = it.value();
- if(e <= ptr->ec->current_backward || ptr->ec->critical_edge_indicator_[e]) break;
- } while(++it, true);
- }
- bool equal(iterator const& other) const { return ptr == other.ptr; }
- IVertex dereference() const { return ptr->it.index(); }
- };
- typedef iterator const_iterator;
- mutable typename Sparse_vector::InnerIterator it;
- Flag_complex_edge_collapser const*ec;
- IVertex u;
- iterator begin() const { return this; }
- iterator end() const { return {}; }
- explicit Neighbours(Flag_complex_edge_collapser const*p,IVertex u):it(p->sparse_row_adjacency_matrix_[u]),ec(p),u(u){}
- };
-
- // A range of row indices
- using IVertex_vector = std::vector<IVertex>;
-
- public:
- /** \brief Filtered_edge is a type to store an edge with its filtration value. */
- using Filtered_edge = std::tuple<Vertex_handle, Vertex_handle, Filtration_value>;
-
- private:
- // Map from row index to its vertex handle
- std::vector<Vertex_handle> row_to_vertex_;
-
- // Index of the current edge in the backwards walk. Edges <= current_backward are part of the temporary graph,
- // while edges > current_backward are removed unless critical_edge_indicator_.
- Edge_index current_backward = -1;
-
- // Map from IEdge to its index
- std::unordered_map<IEdge, Edge_index, boost::hash<IEdge>> iedge_to_index_map_;
-
- // Boolean vector to indicate if the edge is critical.
- std::vector<bool> critical_edge_indicator_;
-
- // Map from vertex handle to its row index
- std::unordered_map<Vertex_handle, IVertex> vertex_to_row_;
-
- // Stores the Sparse matrix of Filtration values representing the original graph.
- // The matrix rows and columns are indexed by IVertex.
- Sparse_row_matrix sparse_row_adjacency_matrix_;
-
- // The input, a vector of filtered edges.
- std::vector<Filtered_edge> f_edge_vector_;
-
- // Edge is the actual edge (u,v), with Vertex_handle u and v, not IVertex.
- bool edge_is_dominated(Vertex_handle u, Vertex_handle v) const
- {
- const IVertex rw_u = vertex_to_row_.at(u);
- const IVertex rw_v = vertex_to_row_.at(v);
-#ifdef DEBUG_TRACES
- std::cout << "The edge {" << u << ", " << v << "} is going for domination check." << std::endl;
-#endif // DEBUG_TRACES
- auto common_neighbours = open_common_neighbours_row_index(rw_u, rw_v);
-#ifdef DEBUG_TRACES
- std::cout << "And its common neighbours are." << std::endl;
- for (auto neighbour : common_neighbours) {
- std::cout << row_to_vertex_[neighbour] << ", " ;
- }
- std::cout<< std::endl;
-#endif // DEBUG_TRACES
- if (common_neighbours.size() == 1)
- return true;
- else
- for (auto rw_c : common_neighbours) {
- auto neighbours_c = neighbours_row_index<true>(rw_c);
- // If neighbours_c contains the common neighbours.
- if (std::includes(neighbours_c.begin(), neighbours_c.end(),
- common_neighbours.begin(), common_neighbours.end()))
- return true;
- }
- return false;
+template<typename Vertex, typename Filtration_value>
+struct Flag_complex_edge_collapser {
+ using Filtered_edge = std::tuple<Vertex, Vertex, Filtration_value>;
+ typedef std::pair<Vertex,Vertex> Edge;
+ struct Cmpi { template<class T, class U> bool operator()(T const&a, U const&b)const{return b<a; } };
+ typedef boost::container::flat_map<Vertex, Filtration_value> Ngb_list;
+ typedef std::vector<Ngb_list> Neighbors;
+ Neighbors neighbors; // closed neighborhood
+ std::size_t num_vertices;
+ std::vector<std::tuple<Vertex, Vertex, Filtration_value>> res;
+
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ // Minimal matrix interface
+ // Using this matrix generally helps performance, but the memory use may be excessive for a very sparse graph
+ // (and in extreme cases the constant initialization of the matrix may start to dominate the runnning time).
+ // Are there cases where the matrix is too big but a hash table would help?
+ std::vector<Filtration_value> neighbors_data;
+ void init_neighbors_dense(){
+ neighbors_data.clear();
+ neighbors_data.resize(num_vertices*num_vertices, std::numeric_limits<Filtration_value>::infinity());
}
+ Filtration_value& neighbors_dense(Vertex i, Vertex j){return neighbors_data[num_vertices*j+i];}
+#endif
- // Returns the edges connecting u and v (extremities of crit) to their common neighbors (not themselves)
- std::set<Edge_index> three_clique_indices(Edge_index crit) {
- std::set<Edge_index> edge_indices;
-
- Vertex_handle u = std::get<0>(f_edge_vector_[crit]);
- Vertex_handle v = std::get<1>(f_edge_vector_[crit]);
-
-#ifdef DEBUG_TRACES
- std::cout << "The current critical edge to re-check criticality with filt value is : f {" << u << "," << v
- << "} = " << std::get<2>(f_edge_vector_[crit]) << std::endl;
-#endif // DEBUG_TRACES
- auto rw_u = vertex_to_row_[u];
- auto rw_v = vertex_to_row_[v];
-
- IVertex_vector common_neighbours = open_common_neighbours_row_index(rw_u, rw_v);
+ // This does not touch the events list, only the adjacency matrix(es)
+ void delay_neighbor(Vertex u, Vertex v, Filtration_value f) {
+ neighbors[u][v]=f;
+ neighbors[v][u]=f;
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ neighbors_dense(u,v)=f;
+ neighbors_dense(v,u)=f;
+#endif
+ }
+ void remove_neighbor(Vertex u, Vertex v) {
+ neighbors[u].erase(v);
+ neighbors[v].erase(u);
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ neighbors_dense(u,v)=std::numeric_limits<Filtration_value>::infinity();
+ neighbors_dense(v,u)=std::numeric_limits<Filtration_value>::infinity();
+#endif
+ }
- for (auto rw_c : common_neighbours) {
- IEdge e_with_new_nbhr_v = std::minmax(rw_u, rw_c);
- IEdge e_with_new_nbhr_u = std::minmax(rw_v, rw_c);
- edge_indices.emplace(iedge_to_index_map_[e_with_new_nbhr_v]);
- edge_indices.emplace(iedge_to_index_map_[e_with_new_nbhr_u]);
+ template<class FilteredEdgeRange>
+ void read_edges(FilteredEdgeRange const&r){
+ neighbors.resize(num_vertices);
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ init_neighbors_dense();
+#endif
+ // Use the raw sequence to avoid maintaining the order
+ std::vector<typename Ngb_list::sequence_type> neighbors_seq(num_vertices);
+ for(auto&&e : r){
+ using std::get;
+ Vertex u = get<0>(e);
+ Vertex v = get<1>(e);
+ Filtration_value f = get<2>(e);
+ neighbors_seq[u].emplace_back(v, f);
+ neighbors_seq[v].emplace_back(u, f);
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ neighbors_dense(u,v)=f;
+ neighbors_dense(v,u)=f;
+#endif
+ }
+ for(std::size_t i=0;i<neighbors_seq.size();++i){
+ neighbors_seq[i].emplace_back(i, -std::numeric_limits<Filtration_value>::infinity());
+ neighbors[i].adopt_sequence(std::move(neighbors_seq[i])); // calls sort
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ neighbors_dense(i,i)=-std::numeric_limits<Filtration_value>::infinity();
+#endif
}
- return edge_indices;
}
- // Detect and set all edges that are becoming critical
- template<typename FilteredEdgeOutput>
- void set_edge_critical(Edge_index indx, Filtration_value filt, FilteredEdgeOutput filtered_edge_output) {
-#ifdef DEBUG_TRACES
- std::cout << "The curent index with filtration value " << indx << ", " << filt << " is primary critical" <<
- std::endl;
-#endif // DEBUG_TRACES
- std::set<Edge_index> effected_indices = three_clique_indices(indx);
- // Cannot use boost::adaptors::reverse in such dynamic cases apparently
- for (auto it = effected_indices.rbegin(); it != effected_indices.rend(); ++it) {
- current_backward = *it;
- Vertex_handle u = std::get<0>(f_edge_vector_[current_backward]);
- Vertex_handle v = std::get<1>(f_edge_vector_[current_backward]);
- // If current_backward is not critical so it should be processed, otherwise it stays in the graph
- if (!critical_edge_indicator_[current_backward]) {
- if (!edge_is_dominated(u, v)) {
-#ifdef DEBUG_TRACES
- std::cout << "The curent index became critical " << current_backward << std::endl;
-#endif // DEBUG_TRACES
- critical_edge_indicator_[current_backward] = true;
- filtered_edge_output(u, v, filt);
- std::set<Edge_index> inner_effected_indcs = three_clique_indices(current_backward);
- for (auto inr_idx : inner_effected_indcs) {
- if(inr_idx < current_backward) // && !critical_edge_indicator_[inr_idx]
- effected_indices.emplace(inr_idx);
- }
-#ifdef DEBUG_TRACES
- std::cout << "The following edge is critical with filt value: {" << u << "," << v << "}; "
- << filt << std::endl;
-#endif // DEBUG_TRACES
- }
+ // Open neighborhood
+ // At some point it helped gcc to add __attribute__((noinline)) here, otherwise we had +50% on the running time
+ // on one example. It looks ok now, or I forgot which example that was.
+ void common_neighbors(boost::container::flat_set<Vertex>& e_ngb,
+ std::vector<std::pair<Filtration_value, Vertex>>& e_ngb_later,
+ Vertex u, Vertex v, Filtration_value f_event){
+ // Using neighbors_dense here seems to hurt, even if we loop on the smaller of nu and nv.
+ Ngb_list const&nu = neighbors[u];
+ Ngb_list const&nv = neighbors[v];
+ auto ui = nu.begin();
+ auto ue = nu.end();
+ auto vi = nv.begin();
+ auto ve = nv.end();
+ assert(ui != ue && vi != ve);
+ while(ui != ue && vi != ve){
+ Vertex w = ui->first;
+ if(w < vi->first) { ++ui; continue; }
+ if(w > vi->first) { ++vi; continue; }
+ // nu and nv are closed, so we need to exclude e here.
+ if(w != u && w != v) {
+ Filtration_value f = std::max(ui->second, vi->second);
+ if(f > f_event)
+ e_ngb_later.emplace_back(f, w);
+ else
+ e_ngb.insert(e_ngb.end(), w);
}
+ ++ui; ++vi;
}
- // Clear the implicit "removed from graph" data structure
- current_backward = -1;
}
- // Returns list of neighbors of a particular vertex.
- template<bool closed>
- auto neighbours_row_index(IVertex rw_u) const
- {
- return Neighbours<closed>(this, rw_u);
+ // Test if the neighborhood of e is included in the closed neighborhood of c
+ template<class Ngb>
+ bool is_dominated_by(Ngb const& e_ngb, Vertex c, Filtration_value f){
+ // The best strategy probably depends on the distribution, how sparse / dense the adjacency matrix is,
+ // how (un)balanced the sizes of e_ngb and nc are.
+ // Some efficient operations on sets work best with bitsets, although the need for a map complicates things.
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ for(auto v : e_ngb) {
+ // if(v==c)continue;
+ if(neighbors_dense(v,c) > f) return false;
+ }
+ return true;
+#else
+ auto&&nc = neighbors[c];
+ // if few neighbors, use dichotomy? Seems slower.
+ // I tried storing a copy of neighbors as a vector<absl::flat_hash_map> and using it for nc, but it was
+ // a bit slower here. It did help with neighbors[dominator].find(w) in the main function though,
+ // sometimes enough, sometimes not.
+ auto ci = nc.begin();
+ auto ce = nc.end();
+ auto eni = e_ngb.begin();
+ auto ene = e_ngb.end();
+ assert(eni != ene);
+ assert(ci != ce);
+ // if(*eni == c && ++eni == ene) return true;
+ for(;;){
+ Vertex ve = *eni;
+ Vertex vc = ci->first;
+ while(ve > vc) {
+ // try a gallop strategy (exponential search)? Seems slower
+ if(++ci == ce) return false;
+ vc = ci->first;
+ }
+ if(ve < vc) return false;
+ // ve == vc
+ if(ci->second > f) return false;
+ if(++eni == ene)return true;
+ // If we stored an open neighborhood of c (excluding c), we would need to test for c here and before the loop
+ // if(*eni == c && ++eni == ene)return true;
+ if(++ci == ce) return false;
+ }
+#endif
}
- // Returns the list of open neighbours of the edge :{u,v}.
- IVertex_vector open_common_neighbours_row_index(IVertex rw_u, IVertex rw_v) const
- {
- auto non_zero_indices_u = neighbours_row_index<false>(rw_u);
- auto non_zero_indices_v = neighbours_row_index<false>(rw_v);
- IVertex_vector common;
- std::set_intersection(non_zero_indices_u.begin(), non_zero_indices_u.end(), non_zero_indices_v.begin(),
- non_zero_indices_v.end(), std::back_inserter(common));
-
- return common;
- }
+ template<class FilteredEdgeRange, class Delay>
+ void process_edges(FilteredEdgeRange const& edges, Delay&& delay) {
+ {
+ Vertex maxi = 0, maxj = 0;
+ for(auto& fe : edges) {
+ Vertex i = std::get<0>(fe);
+ Vertex j = std::get<1>(fe);
+ if (i > maxi) maxi = i;
+ if (j > maxj) maxj = j;
+ }
+ num_vertices = std::max(maxi, maxj) + 1;
+ }
- // Insert a vertex in the data structure
- IVertex insert_vertex(Vertex_handle vertex) {
- auto n = row_to_vertex_.size();
- auto result = vertex_to_row_.emplace(vertex, n);
- // If it was not already inserted - Value won't be updated by emplace if it is already present
- if (result.second) {
- // Expand the matrix. The size of rows is irrelevant.
- sparse_row_adjacency_matrix_.emplace_back((std::numeric_limits<Eigen::Index>::max)());
- // Initializing the diagonal element of the adjency matrix corresponding to rw_b.
- sparse_row_adjacency_matrix_[n].insert(n) = -1; // not an edge
- // Must be done after reading its size()
- row_to_vertex_.push_back(vertex);
+ read_edges(edges);
+
+ boost::container::flat_set<Vertex> e_ngb;
+ e_ngb.reserve(num_vertices);
+ std::vector<std::pair<Filtration_value, Vertex>> e_ngb_later;
+ for(auto&e:edges) {
+ {
+ Vertex u = std::get<0>(e);
+ Vertex v = std::get<1>(e);
+ Filtration_value input_time = std::get<2>(e);
+ auto time = delay(input_time);
+ auto start_time = time;
+ e_ngb.clear();
+ e_ngb_later.clear();
+ common_neighbors(e_ngb, e_ngb_later, u, v, time);
+ // If we identify a good candidate (the first common neighbor) for being a dominator of e until infinity,
+ // we could check that a bit more cheaply. It does not seem to help though.
+ auto cmp1=[](auto const&a, auto const&b){return a.first > b.first;};
+ auto e_ngb_later_begin=e_ngb_later.begin();
+ auto e_ngb_later_end=e_ngb_later.end();
+ bool heapified = false;
+
+ bool dead = false;
+ while(true) {
+ Vertex dominator = -1;
+ // special case for size 1
+ // if(e_ngb.size()==1){dominator=*e_ngb.begin();}else
+ // It is tempting to test the dominators in increasing order of filtration value, which is likely to reduce
+ // the number of calls to is_dominated_by before finding a dominator, but sorting, even partially / lazily,
+ // is very expensive.
+ for(auto c : e_ngb){
+ if(is_dominated_by(e_ngb, c, time)){
+ dominator = c;
+ break;
+ }
+ }
+ if(dominator==-1) break;
+ // Push as long as dominator remains a dominator.
+ // Iterate on times where at least one neighbor appears.
+ for (bool still_dominated = true; still_dominated; ) {
+ if(e_ngb_later_begin == e_ngb_later_end) {
+ dead = true; goto end_move;
+ }
+ if(!heapified) {
+ // Eagerly sorting can be slow
+ std::make_heap(e_ngb_later_begin, e_ngb_later_end, cmp1);
+ heapified=true;
+ }
+ time = e_ngb_later_begin->first; // first place it may become critical
+ // Update the neighborhood for this new time, while checking if any of the new neighbors break domination.
+ while (e_ngb_later_begin != e_ngb_later_end && e_ngb_later_begin->first <= time) {
+ Vertex w = e_ngb_later_begin->second;
+#ifdef GUDHI_COLLAPSE_USE_DENSE_ARRAY
+ if (neighbors_dense(dominator,w) > e_ngb_later_begin->first)
+ still_dominated = false;
+#else
+ auto& ngb_dom = neighbors[dominator];
+ auto wit = ngb_dom.find(w); // neighborhood may be open or closed, it does not matter
+ if (wit == ngb_dom.end() || wit->second > e_ngb_later_begin->first)
+ still_dominated = false;
+#endif
+ e_ngb.insert(w);
+ std::pop_heap(e_ngb_later_begin, e_ngb_later_end--, cmp1);
+ }
+ } // this doesn't seem to help that much...
+ }
+end_move:
+ if(dead) {
+ remove_neighbor(u, v);
+ } else if(start_time != time) {
+ delay_neighbor(u, v, time);
+ res.emplace_back(u, v, time);
+ } else {
+ res.emplace_back(u, v, input_time);
+ }
+ }
}
- return result.first->second;
}
- // Insert an edge in the data structure
- // @exception std::invalid_argument In debug mode, if u == v
- IEdge insert_new_edge(Vertex_handle u, Vertex_handle v, Edge_index idx)
- {
- GUDHI_CHECK((u != v), std::invalid_argument("Flag_complex_edge_collapser::insert_new_edge with u == v"));
- // The edge must not be added before, it should be a new edge.
- IVertex rw_u = insert_vertex(u);
- IVertex rw_v = insert_vertex(v);
-#ifdef DEBUG_TRACES
- std::cout << "Inserting the edge " << u <<", " << v << std::endl;
-#endif // DEBUG_TRACES
- sparse_row_adjacency_matrix_[rw_u].insert(rw_v) = idx;
- sparse_row_adjacency_matrix_[rw_v].insert(rw_u) = idx;
- return std::minmax(rw_u, rw_v);
+ std::vector<Filtered_edge> output() {
+ return std::move(res);
}
- public:
- /** \brief Flag_complex_edge_collapser constructor from a range of filtered edges.
- *
- * @param[in] edges Range of Filtered edges range.There is no need the range to be sorted, as it will be performed in
- * `Flag_complex_edge_collapser::process_edges`.
- *
- * \tparam FilteredEdgeRange must be a range for which std::begin and std::end return iterators on a
- * `Flag_complex_edge_collapser::Filtered_edge`.
- */
- template<typename FilteredEdgeRange>
- Flag_complex_edge_collapser(const FilteredEdgeRange& edges)
- : f_edge_vector_(std::begin(edges), std::end(edges)) { }
+};
- /** \brief Performs edge collapse in a increasing sequence of the filtration value.
- *
- * \tparam filtered_edge_output is a functor that is called on the output edges, in non-decreasing order of
- * filtration, as filtered_edge_output(u, v, f) where u and v are Vertex_handle representing the extremities of the
- * edge, and f is its new Filtration_value.
- */
- template<typename FilteredEdgeOutput>
- void process_edges(FilteredEdgeOutput filtered_edge_output) {
- // Sort edges
- auto sort_by_filtration = [](const Filtered_edge& edge_a, const Filtered_edge& edge_b) -> bool
- {
- return (std::get<2>(edge_a) < std::get<2>(edge_b));
- };
+template<class R> R to_range(R&& r) { return std::move(r); }
+template<class R, class T> R to_range(T&& t) { R r; r.insert(r.end(), t.begin(), t.end()); return r; }
+template<class FilteredEdgeRange, class Delay>
+auto flag_complex_collapse_edges(FilteredEdgeRange&& edges, Delay&&delay) {
+ // Would it help to label the points according to some spatial sorting?
+ auto first_edge_itr = std::begin(edges);
+ using Vertex = std::decay_t<decltype(std::get<0>(*first_edge_itr))>;
+ using Filtration_value = std::decay_t<decltype(std::get<2>(*first_edge_itr))>;
+ using Edge_collapser = Flag_complex_edge_collapser<Vertex, Filtration_value>;
+ if (first_edge_itr != std::end(edges)) {
+ auto edges2 = to_range<std::vector<typename Edge_collapser::Filtered_edge>>(std::forward<FilteredEdgeRange>(edges));
#ifdef GUDHI_USE_TBB
- tbb::parallel_sort(f_edge_vector_.begin(), f_edge_vector_.end(), sort_by_filtration);
+ // I think this sorting is always negligible compared to the collapse, but parallelizing it shouldn't hurt.
+ tbb::parallel_sort(edges2.begin(), edges2.end(),
+ [](auto const&a, auto const&b){return std::get<2>(a)>std::get<2>(b);});
#else
- std::sort(f_edge_vector_.begin(), f_edge_vector_.end(), sort_by_filtration);
+ std::sort(edges2.begin(), edges2.end(), [](auto const&a, auto const&b){return std::get<2>(a)>std::get<2>(b);});
#endif
-
- for (Edge_index endIdx = 0; endIdx < f_edge_vector_.size(); endIdx++) {
- Filtered_edge fec = f_edge_vector_[endIdx];
- Vertex_handle u = std::get<0>(fec);
- Vertex_handle v = std::get<1>(fec);
- Filtration_value filt = std::get<2>(fec);
-
- // Inserts the edge in the sparse matrix to update the graph (G_i)
- IEdge ie = insert_new_edge(u, v, endIdx);
-
- iedge_to_index_map_.emplace(ie, endIdx);
- critical_edge_indicator_.push_back(false);
-
- if (!edge_is_dominated(u, v)) {
- critical_edge_indicator_[endIdx] = true;
- filtered_edge_output(u, v, filt);
- if (endIdx > 1)
- set_edge_critical(endIdx, filt, filtered_edge_output);
- }
- }
+ Edge_collapser edge_collapser;
+ edge_collapser.process_edges(edges2, std::forward<Delay>(delay));
+ return edge_collapser.output();
}
-
-};
+ return std::vector<typename Edge_collapser::Filtered_edge>();
+}
/** \brief Implicitly constructs a flag complex from edges as an input, collapses edges while preserving the persistent
- * homology and returns the remaining edges as a range.
+ * homology and returns the remaining edges as a range. The filtration value of vertices is irrelevant to this function.
*
- * \param[in] edges Range of Filtered edges.There is no need the range to be sorted, as it will be performed.
+ * \param[in] edges Range of Filtered edges. There is no need for the range to be sorted, as it will be done internally.
*
- * \tparam FilteredEdgeRange furnishes `std::begin` and `std::end` methods and returns an iterator on a
- * FilteredEdge of type `std::tuple<Vertex_handle, Vertex_handle, Filtration_value>` where `Vertex_handle` is the type
- * of a vertex index and `Filtration_value` is the type of an edge filtration value.
+ * \tparam FilteredEdgeRange Range of `std::tuple<Vertex_handle, Vertex_handle, Filtration_value>`
+ * where `Vertex_handle` is the type of a vertex index.
*
* \return Remaining edges after collapse as a range of
* `std::tuple<Vertex_handle, Vertex_handle, Filtration_value>`.
- *
+ *
* \ingroup edge_collapse
- *
+ *
+ * \note
+ * Advanced: Defining the macro GUDHI_COLLAPSE_USE_DENSE_ARRAY tells gudhi to allocate a square table of size the
+ * maximum vertex index. This usually speeds up the computation for dense graphs. However, for sparse graphs, the memory
+ * use may be problematic and initializing this large table may be slow.
*/
template<class FilteredEdgeRange> auto flag_complex_collapse_edges(const FilteredEdgeRange& edges) {
- auto first_edge_itr = std::begin(edges);
- using Vertex_handle = std::decay_t<decltype(std::get<0>(*first_edge_itr))>;
- using Filtration_value = std::decay_t<decltype(std::get<2>(*first_edge_itr))>;
- using Edge_collapser = Flag_complex_edge_collapser<Vertex_handle, Filtration_value>;
- std::vector<typename Edge_collapser::Filtered_edge> remaining_edges;
- if (first_edge_itr != std::end(edges)) {
- Edge_collapser edge_collapser(edges);
- edge_collapser.process_edges(
- [&remaining_edges](Vertex_handle u, Vertex_handle v, Filtration_value filtration) {
- // insert the edge
- remaining_edges.emplace_back(u, v, filtration);
- });
- }
- return remaining_edges;
+ return flag_complex_collapse_edges(edges, [](auto const&d){return d;});
}
} // namespace collapse
diff --git a/src/Collapse/test/collapse_unit_test.cpp b/src/Collapse/test/collapse_unit_test.cpp
index b8876246..f41dbedd 100644
--- a/src/Collapse/test/collapse_unit_test.cpp
+++ b/src/Collapse/test/collapse_unit_test.cpp
@@ -98,8 +98,8 @@ BOOST_AUTO_TEST_CASE(collapse) {
// o---o
// 0 3
edges.emplace_back(0, 2, 2.);
- edges.emplace_back(1, 3, 2.);
- trace_and_check_collapse(edges, {{1, 3, 2.}});
+ edges.emplace_back(1, 3, 2.1);
+ trace_and_check_collapse(edges, {{1, 3, 2.1}});
// 1 2 4
// o---o---o
@@ -121,8 +121,8 @@ BOOST_AUTO_TEST_CASE(collapse) {
// o---o---o
// 0 3 5
edges.emplace_back(2, 5, 4.);
- edges.emplace_back(4, 3, 4.);
- trace_and_check_collapse(edges, {{1, 3, 2.}, {4, 3, 4.}});
+ edges.emplace_back(4, 3, 4.1);
+ trace_and_check_collapse(edges, {{1, 3, 2.}, {4, 3, 4.1}});
// 1 2 4
// o---o---o
@@ -132,8 +132,8 @@ BOOST_AUTO_TEST_CASE(collapse) {
// o---o---o
// 0 3 5
edges.emplace_back(1, 5, 5.);
- edges.emplace_back(0, 4, 5.);
- trace_and_check_collapse(edges, {{1, 3, 2.}, {4, 3, 4.}, {0, 4, 5.}});
+ edges.emplace_back(0, 4, 5.1);
+ trace_and_check_collapse(edges, {{1, 3, 2.}, {4, 3, 4.}, {0, 4, 5.1}});
}
BOOST_AUTO_TEST_CASE(collapse_from_array) {
@@ -150,8 +150,8 @@ BOOST_AUTO_TEST_CASE(collapse_from_array) {
{2, 3, 1.},
{3, 0, 1.},
{0, 2, 2.},
- {1, 3, 2.}}};
- trace_and_check_collapse(f_edge_array, {{1, 3, 2.}});
+ {1, 3, 2.1}}};
+ trace_and_check_collapse(f_edge_array, {{1, 3, 2.1}});
}
BOOST_AUTO_TEST_CASE(collapse_from_proximity_graph) {
diff --git a/src/Contraction/include/gudhi/Edge_contraction.h b/src/Contraction/include/gudhi/Edge_contraction.h
index 6c0f4c78..58d627c2 100644
--- a/src/Contraction/include/gudhi/Edge_contraction.h
+++ b/src/Contraction/include/gudhi/Edge_contraction.h
@@ -26,6 +26,7 @@ namespace contraction {
/** \defgroup contr Edge contraction
+@{
\author David Salinas
@@ -195,7 +196,6 @@ int main (int argc, char *argv[])
return EXIT_SUCCESS;
}
-}
\endcode
\verbatim
diff --git a/src/Doxyfile.in b/src/Doxyfile.in
index ae8db1a3..b15d47a6 100644
--- a/src/Doxyfile.in
+++ b/src/Doxyfile.in
@@ -229,13 +229,7 @@ TAB_SIZE = 2
# "Side Effects:". You can put \n's in the value part of an alias to insert
# newlines.
-ALIASES =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding "class=itcl::class"
-# will allow you to use the command class in the itcl::class meaning.
-
-TCL_SUBST =
+ALIASES = gudhi_example_link{2}="@ref \2 \"\1/\2\""
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
# only. Doxygen will then generate output that is more tailored for C. For
@@ -874,7 +868,7 @@ EXCLUDE_SYMBOLS =
# that contain example code fragments that are included (see the \include
# command).
-EXAMPLE_PATH = @CMAKE_SOURCE_DIR@/biblio/ \
+EXAMPLE_PATH = @CMAKE_SOURCE_DIR@ \
@CMAKE_SOURCE_DIR@/data/ \
@GUDHI_DOXYGEN_EXAMPLE_PATH@
@@ -1040,25 +1034,6 @@ USE_HTAGS = NO
VERBATIM_HEADERS = YES
-# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
-# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
-# cost of reduced performance. This can be particularly helpful with template
-# rich C++ code for which doxygen's built-in parser lacks the necessary type
-# information.
-# Note: The availability of this option depends on whether or not doxygen was
-# generated with the -Duse-libclang=ON option for CMake.
-# The default value is: NO.
-
-CLANG_ASSISTED_PARSING = NO
-
-# If clang assisted parsing is enabled you can provide the compiler with command
-# line options that you would normally use when invoking the compiler. Note that
-# the include paths will already be set by doxygen for the files and directories
-# specified with INPUT and INCLUDE_PATH.
-# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
-
-CLANG_OPTIONS =
-
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
@@ -1070,13 +1045,6 @@ CLANG_OPTIONS =
ALPHABETICAL_INDEX = YES
-# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
-# which the alphabetical index list will be split.
-# Minimum value: 1, maximum value: 20, default value: 5.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-COLS_IN_ALPHA_INDEX = 5
-
# In case all classes in a project start with a common prefix, all classes will
# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
# can be used to specify a prefix (or a list of prefixes) that should be ignored
@@ -1775,16 +1743,6 @@ LATEX_BATCHMODE = NO
LATEX_HIDE_INDICES = NO
-# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
-# code with syntax highlighting in the LaTeX output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_SOURCE_CODE = NO
-
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
@@ -1857,16 +1815,6 @@ RTF_STYLESHEET_FILE =
RTF_EXTENSIONS_FILE =
-# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
-# with syntax highlighting in the RTF output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_SOURCE_CODE = NO
-
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
@@ -1956,15 +1904,6 @@ GENERATE_DOCBOOK = NO
DOCBOOK_OUTPUT = docbook
-# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
-# program listings (including syntax highlighting and cross-referencing
-# information) to the DOCBOOK output. Note that enabling this will significantly
-# increase the size of the DOCBOOK output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
-
-DOCBOOK_PROGRAMLISTING = NO
-
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
@@ -2139,12 +2078,6 @@ EXTERNAL_GROUPS = YES
EXTERNAL_PAGES = YES
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of 'which perl').
-# The default file (with absolute path) is: /usr/bin/perl.
-
-PERL_PATH = /usr/bin/perl
-
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
@@ -2158,15 +2091,6 @@ PERL_PATH = /usr/bin/perl
CLASS_DIAGRAMS = NO
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see:
-# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH =
-
# You can include diagrams made with dia in doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
# DIA_PATH tag allows you to specify the directory where the dia binary resides.
diff --git a/src/Nerve_GIC/doc/Intro_graph_induced_complex.h b/src/Nerve_GIC/doc/Intro_graph_induced_complex.h
index a6098860..e1ab7cb3 100644
--- a/src/Nerve_GIC/doc/Intro_graph_induced_complex.h
+++ b/src/Nerve_GIC/doc/Intro_graph_induced_complex.h
@@ -24,7 +24,7 @@ namespace cover_complex {
* Visualizations of the simplicial complexes can be done with either
* neato (from <a target="_blank" href="http://www.graphviz.org/">graphviz</a>),
* <a target="_blank" href="http://www.geomview.org/">geomview</a>,
- * <a target="_blank" href="https://github.com/MLWave/kepler-mapper">KeplerMapper</a>.
+ * <a target="_blank" href="https://github.com/scikit-tda/kepler-mapper">KeplerMapper</a>.
* Input point clouds are assumed to be \ref FileFormatsOFF "OFF files"
*
* \section covers Covers
diff --git a/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h b/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h
index a3613d0d..94579564 100644
--- a/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h
+++ b/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h
@@ -131,8 +131,7 @@ namespace persistent_cohomology {
We provide several example files: run these examples with -h for details on their use, and read the README file.
-\li <a href="rips_persistence_8cpp-example.html">
-Rips_complex/rips_persistence.cpp</a> computes the Rips complex of a point cloud and outputs its persistence
+\li \gudhi_example_link{Rips_complex,rips_persistence.cpp} computes the Rips complex of a point cloud and outputs its persistence
diagram.
\code $> ./rips_persistence ../../data/points/tore3D_1307.off -r 0.25 -m 0.5 -d 3 -p 3 \endcode
\code The complex contains 177838 simplices
@@ -144,12 +143,10 @@ diagram.
More details on the <a href="../../ripscomplex/">Rips complex utilities</a> dedicated page.
-\li <a href="rips_multifield_persistence_8cpp-example.html">
-Persistent_cohomology/rips_multifield_persistence.cpp</a> computes the Rips complex of a point cloud and outputs its
+\li \gudhi_example_link{Persistent_cohomology,rips_multifield_persistence.cpp} computes the Rips complex of a point cloud and outputs its
persistence diagram with a family of field coefficients.
-\li <a href="rips_distance_matrix_persistence_8cpp-example.html">
-Rips_complex/rips_distance_matrix_persistence.cpp</a> computes the Rips complex of a distance matrix and
+\li \gudhi_example_link{Rips_complex,rips_distance_matrix_persistence.cpp} computes the Rips complex of a distance matrix and
outputs its persistence diagram.
The file should contain square or lower triangular distance matrix with semicolons as separators.
@@ -158,8 +155,7 @@ Please refer to data/distance_matrix/lower_triangular_distance_matrix.csv for an
More details on the <a href="../../ripscomplex/">Rips complex utilities</a> dedicated page.
-\li <a href="rips_correlation_matrix_persistence_8cpp-example.html">
-Rips_complex/rips_correlation_matrix_persistence.cpp</a>
+\li \gudhi_example_link{Rips_complex,rips_correlation_matrix_persistence.cpp}
computes the Rips complex of a correlation matrix and outputs its persistence diagram.
Note that no check is performed if the matrix given as the input is a correlation matrix.
@@ -169,8 +165,7 @@ Please refer to data/correlation_matrix/lower_triangular_correlation_matrix.csv
More details on the <a href="../../ripscomplex/">Rips complex utilities</a> dedicated page.
-\li <a href="alpha_complex_3d_persistence_8cpp-example.html">
-Alpha_complex/alpha_complex_3d_persistence.cpp</a> computes the persistent homology with
+\li \gudhi_example_link{Alpha_complex,alpha_complex_3d_persistence.cpp} computes the persistent homology with
\f$\mathbb{Z}/2\mathbb{Z}\f$ coefficients of the alpha complex on points sampling from an OFF file.
\code $> ./alpha_complex_3d_persistence ../../data/points/tore3D_300.off -p 2 -m 0.45 \endcode
\code Simplex_tree dim: 3
@@ -235,8 +230,7 @@ Note that the lengths of the sides of the periodic cuboid have to be the same.<b
3 2 36.8838 inf
3 3 58.6783 inf \endcode
-\li <a href="alpha_complex_persistence_8cpp-example.html">
-Alpha_complex/alpha_complex_persistence.cpp</a> computes the persistent homology with
+\li \gudhi_example_link{Alpha_complex,alpha_complex_persistence.cpp} computes the persistent homology with
\f$\mathbb{Z}/p\mathbb{Z}\f$ coefficients of the alpha complex on points sampling from an OFF file.
\code $> ./alpha_complex_persistence -r 32 -p 2 -m 0.45 ../../data/points/tore3D_300.off \endcode
\code Alpha complex is of dimension 3 - 9273 simplices - 300 vertices.
@@ -248,8 +242,7 @@ Simplex_tree dim: 3
More details on the <a href="../../alphacomplex/">Alpha complex utilities</a> dedicated page.
-\li <a href="plain_homology_8cpp-example.html">
-Persistent_cohomology/plain_homology.cpp</a> computes the plain homology of a simple simplicial complex without
+\li \gudhi_example_link{Persistent_cohomology,plain_homology.cpp} computes the plain homology of a simple simplicial complex without
filtration values.
*/
diff --git a/src/Rips_complex/doc/Intro_rips_complex.h b/src/Rips_complex/doc/Intro_rips_complex.h
index 3888ec8f..cd77b327 100644
--- a/src/Rips_complex/doc/Intro_rips_complex.h
+++ b/src/Rips_complex/doc/Intro_rips_complex.h
@@ -63,9 +63,8 @@ namespace rips_complex {
* value set with \f$max(filtration(4,5), filtration(4,6), filtration(5,6))\f$.
* And so on for simplex (0,1,2,3).
*
- * If the Rips_complex interfaces are not detailed enough for your need, please refer to
- * <a href="rips_persistence_step_by_step_8cpp-example.html">
- * rips_persistence_step_by_step.cpp</a> example, where the constructions of the graph and
+ * If the Rips_complex interfaces are not detailed enough for your need, please refer to the example
+ * \gudhi_example_link{Persistent_cohomology,rips_persistence_step_by_step.cpp} , where the constructions of the graph and
* the Simplex_tree are more detailed.
*
* \section sparserips Sparse Rips complex
diff --git a/src/Simplex_tree/doc/Intro_simplex_tree.h b/src/Simplex_tree/doc/Intro_simplex_tree.h
index ef8dec91..2d3ecdec 100644
--- a/src/Simplex_tree/doc/Intro_simplex_tree.h
+++ b/src/Simplex_tree/doc/Intro_simplex_tree.h
@@ -39,11 +39,9 @@ namespace Gudhi {
* \subsubsection filteredcomplexessimplextreeexamples Examples
*
* Here is a list of simplex tree examples :
- * \li <a href="simple_simplex_tree_8cpp-example.html">
- * Simplex_tree/simple_simplex_tree.cpp</a> - Simple simplex tree construction and basic function use.
+ * \li \gudhi_example_link{Simplex_tree,simple_simplex_tree.cpp} - Simple simplex tree construction and basic function use.
*
- * \li <a href="simplex_tree_from_cliques_of_graph_8cpp-example.html">
- * Simplex_tree/simplex_tree_from_cliques_of_graph.cpp</a> - Simplex tree construction from cliques of graph read in
+ * \li \gudhi_example_link{Simplex_tree,simplex_tree_from_cliques_of_graph.cpp} - Simplex tree construction from cliques of graph read in
* a file.
*
* Simplex tree construction with \f$\mathbb{Z}/3\mathbb{Z}\f$ coefficients on weighted graph Klein bottle file:
@@ -54,12 +52,10 @@ Expand the simplex tree in 3.8e-05 s.
Information of the Simplex Tree:
Number of vertices = 10 Number of simplices = 98 \endcode
*
- * \li <a href="example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
- * Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp</a> - Simplex tree is computed and displayed
+ * \li \gudhi_example_link{Simplex_tree,example_alpha_shapes_3_simplex_tree_from_off_file.cpp} - Simplex tree is computed and displayed
* from a 3D alpha complex (Requires CGAL, GMP and GMPXX to be installed).
*
- * \li <a href="graph_expansion_with_blocker_8cpp-example.html">
- * Simplex_tree/graph_expansion_with_blocker.cpp</a> - Simple simplex tree construction from a one-skeleton graph with
+ * \li \gudhi_example_link{Simplex_tree,graph_expansion_with_blocker.cpp} - Simple simplex tree construction from a one-skeleton graph with
* a simple blocker expansion method.
*
* \subsection filteredcomplexeshassecomplex Hasse complex
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h
index 85790baf..6dce947c 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h
@@ -187,6 +187,12 @@ class Simplex_tree {
typedef Simplex_tree_boundary_simplex_iterator<Simplex_tree> Boundary_simplex_iterator;
/** \brief Range over the simplices of the boundary of a simplex. */
typedef boost::iterator_range<Boundary_simplex_iterator> Boundary_simplex_range;
+ /** \brief Iterator over the simplices of the boundary of a simplex and their opposite vertices.
+ *
+ * 'value_type' is std::pair<Simplex_handle, Vertex_handle>. */
+ typedef Simplex_tree_boundary_opposite_vertex_simplex_iterator<Simplex_tree> Boundary_opposite_vertex_simplex_iterator;
+ /** \brief Range over the simplices of the boundary of a simplex and their opposite vertices. */
+ typedef boost::iterator_range<Boundary_opposite_vertex_simplex_iterator> Boundary_opposite_vertex_simplex_range;
/** \brief Iterator over the simplices of the simplicial complex.
*
* 'value_type' is Simplex_handle. */
@@ -296,6 +302,23 @@ class Simplex_tree {
Boundary_simplex_iterator(this));
}
+ /** \brief Given a simplex, returns a range over the simplices of its boundary and their opposite vertices.
+ *
+ * The boundary of a simplex is the set of codimension \f$1\f$ subsimplices of the simplex.
+ * If the simplex is \f$[v_0, \cdots ,v_d]\f$, with canonical orientation induced by \f$ v_0 < \cdots < v_d \f$, the
+ * iterator enumerates the simplices of the boundary in the order:
+ * \f$[v_0,\cdots,\widehat{v_i},\cdots,v_d]\f$ for \f$i\f$ from \f$d\f$ to \f$0\f$, where \f$\widehat{v_i}\f$ means
+ * that the vertex \f$v_i\f$, known as the opposite vertex, is omitted from boundary, but returned as the second
+ * element of a pair.
+ *
+ * @param[in] sh Simplex for which the boundary is computed.
+ */
+ template<class SimplexHandle>
+ Boundary_opposite_vertex_simplex_range boundary_opposite_vertex_simplex_range(SimplexHandle sh) {
+ return Boundary_opposite_vertex_simplex_range(Boundary_opposite_vertex_simplex_iterator(this, sh),
+ Boundary_opposite_vertex_simplex_iterator(this));
+ }
+
/** @} */ // end range and iterator methods
/** \name Constructor/Destructor
* @{ */
@@ -1060,8 +1083,8 @@ class Simplex_tree {
*
* Inserts all vertices and edges given by a OneSkeletonGraph.
* OneSkeletonGraph must be a model of
- * <a href="http://www.boost.org/doc/libs/1_76_0/libs/graph/doc/VertexAndEdgeListGraph.html">boost::VertexAndEdgeListGraph</a>
- * and <a href="http://www.boost.org/doc/libs/1_76_0/libs/graph/doc/PropertyGraph.html">boost::PropertyGraph</a>.
+ * <a href="https://www.boost.org/doc/libs/release/libs/graph/doc/VertexAndEdgeListGraph.html">boost::VertexAndEdgeListGraph</a>
+ * and <a href="https://www.boost.org/doc/libs/release/libs/graph/doc/PropertyGraph.html">boost::PropertyGraph</a>.
*
* The vertex filtration value is accessible through the property tag
* vertex_filtration_t.
@@ -1283,6 +1306,7 @@ class Simplex_tree {
Siblings * new_sib = new Siblings(siblings, // oncles
simplex->first, // parent
boost::adaptors::reverse(intersection)); // boost::container::ordered_unique_range_t
+ simplex->second.assign_children(new_sib);
std::vector<Vertex_handle> blocked_new_sib_vertex_list;
// As all intersections are inserted, we can call the blocker function on all new_sib members
for (auto new_sib_member = new_sib->members().begin();
@@ -1305,7 +1329,6 @@ class Simplex_tree {
new_sib->members().erase(blocked_new_sib_member);
}
// ensure recursive call
- simplex->second.assign_children(new_sib);
siblings_expansion_with_blockers(new_sib, max_dim, k - 1, block_simplex);
}
} else {
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h
index ee64a277..394c6ee1 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h
@@ -5,6 +5,7 @@
* Copyright (C) 2014 Inria
*
* Modification(s):
+ * - 2022/04 Vincent Rouvreau: Add Simplex_tree_boundary_opposite_vertex_simplex_iterator for alpha and cech purpose
* - YYYY/MM Author: Description of the modification
*/
@@ -14,10 +15,10 @@
#include <gudhi/Debug_utils.h>
#include <boost/iterator/iterator_facade.hpp>
-#include <boost/version.hpp>
#include <boost/container/static_vector.hpp>
#include <vector>
+#include <utility> // for std::pair
namespace Gudhi {
@@ -124,7 +125,7 @@ class Simplex_tree_boundary_simplex_iterator : public boost::iterator_facade<
private:
friend class boost::iterator_core_access;
-// valid when iterating along the SAME boundary.
+ // valid when iterating along the SAME boundary.
bool equal(Simplex_tree_boundary_simplex_iterator const& other) const {
return sh_ == other.sh_;
}
@@ -179,6 +180,116 @@ class Simplex_tree_boundary_simplex_iterator : public boost::iterator_facade<
Simplex_handle sh_; // current Simplex_handle in the boundary
SimplexTree * st_; // simplex containing the simplicial complex
};
+
+/* \brief Iterator over the simplices of the boundary of a simplex and their opposite vertices.
+ *
+ * Forward iterator, value_type is std::pair<SimplexTree::Simplex_handle, SimplexTree::Vertex_handle>.*/
+template<class SimplexTree>
+class Simplex_tree_boundary_opposite_vertex_simplex_iterator : public boost::iterator_facade<
+ Simplex_tree_boundary_opposite_vertex_simplex_iterator<SimplexTree>,
+ std::pair<typename SimplexTree::Simplex_handle, typename SimplexTree::Vertex_handle> const, boost::forward_traversal_tag> {
+ public:
+ using Simplex_handle = typename SimplexTree::Simplex_handle;
+ using Vertex_handle = typename SimplexTree::Vertex_handle;
+ using Siblings = typename SimplexTree::Siblings;
+
+ // For cython purpose only. The object it initializes should be overwritten ASAP and never used before it is
+ // overwritten.
+ Simplex_tree_boundary_opposite_vertex_simplex_iterator()
+ : sib_(nullptr),
+ st_(nullptr) {
+ }
+
+ // any end() iterator
+ explicit Simplex_tree_boundary_opposite_vertex_simplex_iterator(SimplexTree * st)
+ : last_(st->null_vertex()),
+ next_(st->null_vertex()),
+ sib_(nullptr),
+ baov_(st->null_simplex(), st->null_vertex()),
+ st_(st) {
+ }
+
+ template<class SimplexHandle>
+ Simplex_tree_boundary_opposite_vertex_simplex_iterator(SimplexTree * st, SimplexHandle sh)
+ : last_(sh->first),
+ next_(st->null_vertex()),
+ sib_(nullptr),
+ baov_(st->null_simplex(), sh->first),
+ st_(st) {
+ // Only check once at the beginning instead of for every increment, as this is expensive.
+ if (SimplexTree::Options::contiguous_vertices)
+ GUDHI_CHECK(st_->contiguous_vertices(), "The set of vertices is not { 0, ..., n } without holes");
+ Siblings * sib = st->self_siblings(sh);
+ next_ = sib->parent();
+ sib_ = sib->oncles();
+ if (sib_ != nullptr) {
+ if (SimplexTree::Options::contiguous_vertices && sib_->oncles() == nullptr)
+ // Only relevant for edges
+ baov_.first = sib_->members_.begin()+next_;
+ else
+ baov_.first = sib_->find(next_);
+ }
+ }
+
+ private:
+ friend class boost::iterator_core_access;
+
+ // valid when iterating along the SAME boundary.
+ bool equal(Simplex_tree_boundary_opposite_vertex_simplex_iterator const& other) const {
+ return (baov_.first == other.baov_.first);
+ }
+
+ std::pair<Simplex_handle, Vertex_handle> const& dereference() const {
+ return baov_;
+ }
+
+ void increment() {
+ if (sib_ == nullptr) {
+ baov_.first = st_->null_simplex();
+ return; // ------>>
+ }
+ Siblings * for_sib = sib_;
+ Siblings * new_sib = sib_->oncles();
+ auto rit = suffix_.rbegin();
+ if (SimplexTree::Options::contiguous_vertices && new_sib == nullptr) {
+ // We reached the root, use a short-cut to find a vertex.
+ if (rit == suffix_.rend()) {
+ baov_.second = baov_.first->first;
+ // Segment, this vertex is the last boundary simplex
+ baov_.first = for_sib->members_.begin()+last_;
+ sib_ = nullptr;
+ return;
+ } else {
+ // Dim >= 2, initial step of the descent
+ baov_.first = for_sib->members_.begin()+*rit;
+ for_sib = baov_.first->second.children();
+ ++rit;
+ }
+ }
+ for (; rit != suffix_.rend(); ++rit) {
+ baov_.first = for_sib->find(*rit);
+ for_sib = baov_.first->second.children();
+ }
+ baov_.first = for_sib->find(last_); // baov_.first points to the right simplex now
+ suffix_.push_back(next_);
+ next_ = sib_->parent();
+ sib_ = new_sib;
+ baov_.second = suffix_.back();
+ }
+
+ // Most of the storage should be moved to the range, iterators should be light.
+ Vertex_handle last_; // last vertex of the simplex
+ Vertex_handle next_; // next vertex to push in suffix_
+ // 40 seems a conservative bound on the dimension of a Simplex_tree for now,
+ // as it would not fit on the biggest hard-drive.
+ boost::container::static_vector<Vertex_handle, 40> suffix_;
+ // static_vector still has some overhead compared to a trivial hand-made
+ // version using std::aligned_storage, or compared to making suffix_ static.
+ Siblings * sib_; // where the next search will start from
+ std::pair<Simplex_handle, Vertex_handle> baov_; // a pair containing the current Simplex_handle in the boundary and its opposite vertex
+ SimplexTree * st_; // simplex containing the simplicial complex
+};
+
/*---------------------------------------------------------------------------*/
/* \brief Iterator over the simplices of a simplicial complex.
*
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_siblings.h b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_siblings.h
index b53bad29..ae25d290 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_siblings.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_siblings.h
@@ -36,6 +36,7 @@ class Simplex_tree_siblings {
template<class T> friend class Simplex_tree_boundary_simplex_iterator;
template<class T> friend class Simplex_tree_complex_simplex_iterator;
template<class T> friend class Simplex_tree_skeleton_simplex_iterator;
+ template<class T> friend class Simplex_tree_boundary_opposite_vertex_simplex_iterator;
typedef typename SimplexTree::Vertex_handle Vertex_handle;
typedef typename SimplexTree::Filtration_value Filtration_value;
diff --git a/src/Simplex_tree/test/CMakeLists.txt b/src/Simplex_tree/test/CMakeLists.txt
index cf2b0153..25b562e0 100644
--- a/src/Simplex_tree/test/CMakeLists.txt
+++ b/src/Simplex_tree/test/CMakeLists.txt
@@ -34,3 +34,9 @@ if (TBB_FOUND)
target_link_libraries(Simplex_tree_make_filtration_non_decreasing_test_unit ${TBB_LIBRARIES})
endif()
gudhi_add_boost_test(Simplex_tree_make_filtration_non_decreasing_test_unit)
+
+add_executable ( Simplex_tree_graph_expansion_test_unit simplex_tree_graph_expansion_unit_test.cpp )
+if (TBB_FOUND)
+ target_link_libraries(Simplex_tree_graph_expansion_test_unit ${TBB_LIBRARIES})
+endif()
+gudhi_add_boost_test(Simplex_tree_graph_expansion_test_unit)
diff --git a/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp
index 881a06ae..6d63d8ae 100644
--- a/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp
+++ b/src/Simplex_tree/test/simplex_tree_graph_expansion_unit_test.cpp
@@ -9,33 +9,62 @@
*/
#include <iostream>
-#include <fstream>
-#include <string>
-#include <algorithm>
-#include <utility> // std::pair, std::make_pair
-#include <cmath> // float comparison
-#include <limits>
-#include <functional> // greater
+#include <vector>
#define BOOST_TEST_DYN_LINK
-#define BOOST_TEST_MODULE "simplex_tree"
+#define BOOST_TEST_MODULE "simplex_tree_graph_expansion"
#include <boost/test/unit_test.hpp>
#include <boost/mpl/list.hpp>
-// ^
-// /!\ Nothing else from Simplex_tree shall be included to test includes are well defined.
#include "gudhi/Simplex_tree.h"
+#include <gudhi/Unitary_tests_utils.h>
using namespace Gudhi;
typedef boost::mpl::list<Simplex_tree<>, Simplex_tree<Simplex_tree_options_fast_persistence>> list_of_tested_variants;
+BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_all_is_blocked, typeST, list_of_tested_variants) {
+ std::clog << "********************************************************************\n";
+ std::clog << "simplex_tree_expansion_all_is_blocked\n";
+ std::clog << "********************************************************************\n";
+ using Simplex_handle = typename typeST::Simplex_handle;
+ // Construct the Simplex Tree with a 1-skeleton graph example
+ typeST simplex_tree;
+
+ simplex_tree.insert_simplex({0, 1}, 0.);
+ simplex_tree.insert_simplex({0, 2}, 1.);
+ simplex_tree.insert_simplex({0, 3}, 2.);
+ simplex_tree.insert_simplex({1, 2}, 3.);
+ simplex_tree.insert_simplex({1, 3}, 4.);
+ simplex_tree.insert_simplex({2, 3}, 5.);
+ simplex_tree.insert_simplex({2, 4}, 6.);
+ simplex_tree.insert_simplex({3, 6}, 7.);
+ simplex_tree.insert_simplex({4, 5}, 8.);
+ simplex_tree.insert_simplex({4, 6}, 9.);
+ simplex_tree.insert_simplex({5, 6}, 10.);
+ simplex_tree.insert_simplex({6}, 10.);
-bool AreAlmostTheSame(float a, float b) {
- return std::fabs(a - b) < std::numeric_limits<float>::epsilon();
+ typeST stree_copy = simplex_tree;
+
+ simplex_tree.expansion_with_blockers(3, [&](Simplex_handle sh){ return true; });
+
+ std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
+ std::clog << " - dimension " << simplex_tree.dimension() << "\n";
+ std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
+ for (auto f_simplex : simplex_tree.filtration_simplex_range()) {
+ std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
+ for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex))
+ std::clog << "(" << vertex << ")";
+ std::clog << std::endl;
+ }
+
+ BOOST_CHECK(stree_copy == simplex_tree);
}
BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_blockers_3, typeST, list_of_tested_variants) {
+ std::clog << "********************************************************************\n";
+ std::clog << "simplex_tree_expansion_with_blockers_3\n";
+ std::clog << "********************************************************************\n";
using Simplex_handle = typename typeST::Simplex_handle;
// Construct the Simplex Tree with a 1-skeleton graph example
typeST simplex_tree;
@@ -72,9 +101,6 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_blockers_3, typeST, li
return result;
});
- std::clog << "********************************************************************\n";
- std::clog << "simplex_tree_expansion_with_blockers_3\n";
- std::clog << "********************************************************************\n";
std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
std::clog << " - dimension " << simplex_tree.dimension() << "\n";
std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
@@ -89,15 +115,23 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_blockers_3, typeST, li
BOOST_CHECK(simplex_tree.dimension() == 3);
// {4, 5, 6} shall be blocked
BOOST_CHECK(simplex_tree.find({4, 5, 6}) == simplex_tree.null_simplex());
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,2})), 4.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,3})), 5.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,2,3})), 6.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({1,2,3})), 6.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,2,3})), 7.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,2})),
+ static_cast<typename typeST::Filtration_value>(4.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,3})),
+ static_cast<typename typeST::Filtration_value>(5.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,2,3})),
+ static_cast<typename typeST::Filtration_value>(6.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({1,2,3})),
+ static_cast<typename typeST::Filtration_value>(6.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,2,3})),
+ static_cast<typename typeST::Filtration_value>(7.));
}
BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_blockers_2, typeST, list_of_tested_variants) {
+ std::clog << "********************************************************************\n";
+ std::clog << "simplex_tree_expansion_with_blockers_2\n";
+ std::clog << "********************************************************************\n";
using Simplex_handle = typename typeST::Simplex_handle;
// Construct the Simplex Tree with a 1-skeleton graph example
typeST simplex_tree;
@@ -134,9 +168,6 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_blockers_2, typeST, li
return result;
});
- std::clog << "********************************************************************\n";
- std::clog << "simplex_tree_expansion_with_blockers_2\n";
- std::clog << "********************************************************************\n";
std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
std::clog << " - dimension " << simplex_tree.dimension() << "\n";
std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
@@ -151,14 +182,22 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_blockers_2, typeST, li
BOOST_CHECK(simplex_tree.dimension() == 2);
// {4, 5, 6} shall be blocked
BOOST_CHECK(simplex_tree.find({4, 5, 6}) == simplex_tree.null_simplex());
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,2})), 4.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,3})), 5.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,2,3})), 6.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({1,2,3})), 6.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,2})),
+ static_cast<typename typeST::Filtration_value>(4.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,3})),
+ static_cast<typename typeST::Filtration_value>(5.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,2,3})),
+ static_cast<typename typeST::Filtration_value>(6.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({1,2,3})),
+ static_cast<typename typeST::Filtration_value>(6.));
BOOST_CHECK(simplex_tree.find({0,1,2,3}) == simplex_tree.null_simplex());
}
-BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion, typeST, list_of_tested_variants) {
+BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_with_find_simplex_blockers, typeST, list_of_tested_variants) {
+ std::clog << "********************************************************************\n";
+ std::clog << "simplex_tree_expansion_with_find_simplex_blockers\n";
+ std::clog << "********************************************************************\n";
+ using Simplex_handle = typename typeST::Simplex_handle;
// Construct the Simplex Tree with a 1-skeleton graph example
typeST simplex_tree;
@@ -175,10 +214,66 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion, typeST, list_of_tested_var
simplex_tree.insert_simplex({5, 6}, 10.);
simplex_tree.insert_simplex({6}, 10.);
- simplex_tree.expansion(3);
+ simplex_tree.expansion_with_blockers(3, [&](Simplex_handle sh){
+ bool result = false;
+ std::clog << "Blocker on [";
+ std::vector<typename typeST::Vertex_handle> simplex;
+ // User can loop on the vertices from the given simplex_handle i.e.
+ for (auto vertex : simplex_tree.simplex_vertex_range(sh)) {
+ // We block the expansion, if the vertex '1' is in the given list of vertices
+ if (vertex == 1)
+ result = true;
+ std::clog << vertex << ", ";
+ simplex.push_back(vertex);
+ }
+ std::clog << "] => " << result << std::endl;
+ // Not efficient but test it works - required by the python interface
+ BOOST_CHECK(simplex_tree.find(simplex) == sh);
+ return result;
+ });
+
+ std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
+ std::clog << " - dimension " << simplex_tree.dimension() << "\n";
+ std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
+ for (auto f_simplex : simplex_tree.filtration_simplex_range()) {
+ std::clog << " " << "[" << simplex_tree.filtration(f_simplex) << "] ";
+ for (auto vertex : simplex_tree.simplex_vertex_range(f_simplex))
+ std::clog << "(" << vertex << ")";
+ std::clog << std::endl;
+ }
+
+ BOOST_CHECK(simplex_tree.num_simplices() == 20);
+ BOOST_CHECK(simplex_tree.dimension() == 2);
+
+ // {1, 2, 3}, {0, 1, 2} and {0, 1, 3} shall be blocked as it contains vertex 1
+ BOOST_CHECK(simplex_tree.find({4, 5, 6}) != simplex_tree.null_simplex());
+ BOOST_CHECK(simplex_tree.find({1, 2, 3}) == simplex_tree.null_simplex());
+ BOOST_CHECK(simplex_tree.find({0, 2, 3}) != simplex_tree.null_simplex());
+ BOOST_CHECK(simplex_tree.find({0, 1, 2}) == simplex_tree.null_simplex());
+ BOOST_CHECK(simplex_tree.find({0, 1, 3}) == simplex_tree.null_simplex());
+}
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_3, typeST, list_of_tested_variants) {
std::clog << "********************************************************************\n";
std::clog << "simplex_tree_expansion_3\n";
std::clog << "********************************************************************\n";
+ // Construct the Simplex Tree with a 1-skeleton graph example
+ typeST simplex_tree;
+
+ simplex_tree.insert_simplex({0, 1}, 0.);
+ simplex_tree.insert_simplex({0, 2}, 1.);
+ simplex_tree.insert_simplex({0, 3}, 2.);
+ simplex_tree.insert_simplex({1, 2}, 3.);
+ simplex_tree.insert_simplex({1, 3}, 4.);
+ simplex_tree.insert_simplex({2, 3}, 5.);
+ simplex_tree.insert_simplex({2, 4}, 6.);
+ simplex_tree.insert_simplex({3, 6}, 7.);
+ simplex_tree.insert_simplex({4, 5}, 8.);
+ simplex_tree.insert_simplex({4, 6}, 9.);
+ simplex_tree.insert_simplex({5, 6}, 10.);
+ simplex_tree.insert_simplex({6}, 10.);
+
+ simplex_tree.expansion(3);
std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
std::clog << " - dimension " << simplex_tree.dimension() << "\n";
std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
@@ -192,16 +287,25 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion, typeST, list_of_tested_var
BOOST_CHECK(simplex_tree.num_simplices() == 24);
BOOST_CHECK(simplex_tree.dimension() == 3);
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({4,5,6})), 10.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,2})), 3.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,3})), 4.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,2,3})), 5.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({1,2,3})), 5.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,2,3})), 5.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({4,5,6})),
+ static_cast<typename typeST::Filtration_value>(10.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,2})),
+ static_cast<typename typeST::Filtration_value>(3.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,3})),
+ static_cast<typename typeST::Filtration_value>(4.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,2,3})),
+ static_cast<typename typeST::Filtration_value>(5.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({1,2,3})),
+ static_cast<typename typeST::Filtration_value>(5.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,2,3})),
+ static_cast<typename typeST::Filtration_value>(5.));
}
BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_2, typeST, list_of_tested_variants) {
+ std::clog << "********************************************************************\n";
+ std::clog << "simplex_tree_expansion_2\n";
+ std::clog << "********************************************************************\n";
// Construct the Simplex Tree with a 1-skeleton graph example
typeST simplex_tree;
@@ -220,9 +324,6 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_2, typeST, list_of_tested_v
simplex_tree.expansion(2);
- std::clog << "********************************************************************\n";
- std::clog << "simplex_tree_expansion_2\n";
- std::clog << "********************************************************************\n";
std::clog << "* The complex contains " << simplex_tree.num_simplices() << " simplices";
std::clog << " - dimension " << simplex_tree.dimension() << "\n";
std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
@@ -236,10 +337,15 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_expansion_2, typeST, list_of_tested_v
BOOST_CHECK(simplex_tree.num_simplices() == 23);
BOOST_CHECK(simplex_tree.dimension() == 2);
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({4,5,6})), 10.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,2})), 3.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,1,3})), 4.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({0,2,3})), 5.));
- BOOST_CHECK(AreAlmostTheSame(simplex_tree.filtration(simplex_tree.find({1,2,3})), 5.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({4,5,6})),
+ static_cast<typename typeST::Filtration_value>(10.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,2})),
+ static_cast<typename typeST::Filtration_value>(3.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,1,3})),
+ static_cast<typename typeST::Filtration_value>(4.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({0,2,3})),
+ static_cast<typename typeST::Filtration_value>(5.));
+ GUDHI_TEST_FLOAT_EQUALITY_CHECK(simplex_tree.filtration(simplex_tree.find({1,2,3})),
+ static_cast<typename typeST::Filtration_value>(5.));
BOOST_CHECK(simplex_tree.find({0,1,2,3}) == simplex_tree.null_simplex());
}
diff --git a/src/Simplex_tree/test/simplex_tree_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_unit_test.cpp
index bdd41d34..b18e2ec4 100644
--- a/src/Simplex_tree/test/simplex_tree_unit_test.cpp
+++ b/src/Simplex_tree/test/simplex_tree_unit_test.cpp
@@ -17,6 +17,8 @@
#include <limits>
#include <functional> // greater
#include <tuple> // std::tie
+#include <iterator> // for std::distance
+#include <cstddef> // for std::size_t
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE "simplex_tree"
@@ -991,3 +993,48 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_reset_filtration, typeST, list_of_tes
}
+BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_tree_boundaries_and_opposite_vertex_iterator, typeST, list_of_tested_variants) {
+ std::clog << "********************************************************************" << std::endl;
+ std::clog << "TEST OF BOUNDARIES AND OPPOSITE VERTEX ITERATORS" << std::endl;
+ typeST st;
+
+ st.insert_simplex_and_subfaces({2, 1, 0}, 3.);
+ st.insert_simplex_and_subfaces({3, 0}, 2.);
+ st.insert_simplex_and_subfaces({3, 4, 5}, 3.);
+ st.insert_simplex_and_subfaces({0, 1, 6, 7}, 4.);
+
+ /* Inserted simplex: */
+ /* 1 6 */
+ /* o---o */
+ /* /X\7/ */
+ /* o---o---o---o */
+ /* 2 0 3\X/4 */
+ /* o */
+ /* 5 */
+ using Simplex = std::vector<typename typeST::Vertex_handle>;
+ // simplices must be kept sorted by vertex number for std::vector to use operator== - cf. last BOOST_CHECK
+ std::vector<Simplex> simplices = {{0, 1, 2}, {0, 3}, {0, 1, 6, 7}, {3, 4, 5}, {3, 5}, {2}};
+ for (auto simplex : simplices) {
+ Simplex opposite_vertices;
+ for(auto boundary_and_opposite_vertex : st.boundary_opposite_vertex_simplex_range(st.find(simplex))) {
+ Simplex output;
+ for (auto vertex : st.simplex_vertex_range(boundary_and_opposite_vertex.first)) {
+ std::clog << vertex << " ";
+ output.emplace_back(vertex);
+ }
+ std::clog << " - opposite vertex = " << boundary_and_opposite_vertex.second << std::endl;
+ // Check that boundary simplex + opposite vertex = simplex given as input
+ output.emplace_back(boundary_and_opposite_vertex.second);
+ std::sort(output.begin(), output.end());
+ BOOST_CHECK(simplex == output);
+ opposite_vertices.emplace_back(boundary_and_opposite_vertex.second);
+ }
+ // Check that the list of all opposite vertices = simplex given as input
+ // no opposite vertices if simplex given as input is of dimension 1
+ std::sort(opposite_vertices.begin(), opposite_vertices.end());
+ if (simplex.size() > 1)
+ BOOST_CHECK(simplex == opposite_vertices);
+ else
+ BOOST_CHECK(opposite_vertices.size() == 0);
+ }
+}
diff --git a/src/Skeleton_blocker/concept/SkeletonBlockerDS.h b/src/Skeleton_blocker/concept/SkeletonBlockerDS.h
index 0c2014bd..23eb3670 100644
--- a/src/Skeleton_blocker/concept/SkeletonBlockerDS.h
+++ b/src/Skeleton_blocker/concept/SkeletonBlockerDS.h
@@ -29,7 +29,7 @@ struct SkeletonBlockerDS {
/**
* @brief Root_vertex_handle and Vertex_handle are similar to global and local vertex descriptor
- * used in <a href="http://www.boost.org/doc/libs/1_38_0/libs/graph/doc/subgraph.html">boost subgraphs</a>
+ * used in <a href="https://www.boost.org/doc/libs/release/libs/graph/doc/subgraph.html">boost subgraphs</a>
* and allow to localize a vertex of a subcomplex on its parent root complex.
*
* In gross, vertices are stored in a vector
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h
index 0c0cc624..d091d7dd 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_simple_traits.h
@@ -28,7 +28,7 @@ namespace skeleton_blocker {
*/
struct Skeleton_blocker_simple_traits {
/**
- * @brief Global and local handle similar to <a href="http://www.boost.org/doc/libs/1_38_0/libs/graph/doc/subgraph.html">boost subgraphs</a>.
+ * @brief Global and local handle similar to <a href="https://www.boost.org/doc/libs/release/libs/graph/doc/subgraph.html">boost subgraphs</a>.
* Vertices are stored in a vector.
* For the root simplicial complex, the local and global descriptors are the same.
* For a subcomplex L and one of its vertices 'v', the local descriptor of 'v' is its position in
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h
index 125c6387..031bcb9c 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h
@@ -1071,7 +1071,6 @@ class Skeleton_blocker_complex {
/**
* Removes all the popable blockers of the complex and delete them.
- * @returns the number of popable blockers deleted
*/
void remove_popable_blockers();
@@ -1103,7 +1102,6 @@ class Skeleton_blocker_complex {
public:
/**
* Remove the star of the edge connecting vertices a and b.
- * @returns the number of blocker that have been removed
*/
void remove_star(Vertex_handle a, Vertex_handle b);
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h
index 404f04f9..5db9c2fd 100644..100755
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h
@@ -39,7 +39,6 @@ bool Skeleton_blocker_complex<SkeletonBlockerDS>::is_popable_blocker(Blocker_han
/**
* Removes all the popable blockers of the complex and delete them.
- * @returns the number of popable blockers deleted
*/
template<typename SkeletonBlockerDS>
void Skeleton_blocker_complex<SkeletonBlockerDS>::remove_popable_blockers() {
@@ -160,7 +159,6 @@ void Skeleton_blocker_complex<SkeletonBlockerDS>::update_blockers_after_remove_s
/**
* Remove the star of the edge connecting vertices a and b.
- * @returns the number of blocker that have been removed
*/
template<typename SkeletonBlockerDS>
void Skeleton_blocker_complex<SkeletonBlockerDS>::remove_star(Vertex_handle a, Vertex_handle b) {
diff --git a/src/Subsampling/include/gudhi/sparsify_point_set.h b/src/Subsampling/include/gudhi/sparsify_point_set.h
index 4571b8f3..b325fe3c 100644
--- a/src/Subsampling/include/gudhi/sparsify_point_set.h
+++ b/src/Subsampling/include/gudhi/sparsify_point_set.h
@@ -11,12 +11,7 @@
#ifndef SPARSIFY_POINT_SET_H_
#define SPARSIFY_POINT_SET_H_
-#include <boost/version.hpp>
-#if BOOST_VERSION < 106600
-# include <boost/function_output_iterator.hpp>
-#else
# include <boost/iterator/function_output_iterator.hpp>
-#endif
#include <gudhi/Kd_tree_search.h>
#ifdef GUDHI_SUBSAMPLING_PROFILING
diff --git a/src/Tangential_complex/benchmark/benchmark_tc.cpp b/src/Tangential_complex/benchmark/benchmark_tc.cpp
index 6da1425f..8e7c72ff 100644
--- a/src/Tangential_complex/benchmark/benchmark_tc.cpp
+++ b/src/Tangential_complex/benchmark/benchmark_tc.cpp
@@ -33,6 +33,7 @@ const std::size_t ONLY_LOAD_THE_FIRST_N_POINTS = 20000000;
#include <gudhi/sparsify_point_set.h>
#include <gudhi/random_point_generators.h>
#include <gudhi/Tangential_complex/utilities.h>
+#include <gudhi/Simplex_tree.h>
#include <CGAL/assertions_behaviour.h>
#include <CGAL/Epick_d.h>
diff --git a/src/Tangential_complex/example/example_basic.cpp b/src/Tangential_complex/example/example_basic.cpp
index ab35edf0..c50b9b8c 100644
--- a/src/Tangential_complex/example/example_basic.cpp
+++ b/src/Tangential_complex/example/example_basic.cpp
@@ -1,7 +1,6 @@
#include <gudhi/Tangential_complex.h>
#include <gudhi/sparsify_point_set.h>
-//#include <gudhi/Fake_simplex_tree.h>
-
+#include <gudhi/Simplex_tree.h>
#include <CGAL/Epick_d.h>
#include <CGAL/Random.h>
@@ -39,7 +38,6 @@ int main(void) {
// Export the TC into a Simplex_tree
Gudhi::Simplex_tree<> stree;
- //Gudhi::Fake_simplex_tree stree;
tc.create_complex(stree);
// Display stats about inconsistencies
diff --git a/src/Tangential_complex/example/example_with_perturb.cpp b/src/Tangential_complex/example/example_with_perturb.cpp
index d0d877ea..e70e2980 100644
--- a/src/Tangential_complex/example/example_with_perturb.cpp
+++ b/src/Tangential_complex/example/example_with_perturb.cpp
@@ -1,5 +1,6 @@
#include <gudhi/Tangential_complex.h>
#include <gudhi/sparsify_point_set.h>
+#include <gudhi/Simplex_tree.h>
#include <CGAL/Epick_d.h>
#include <CGAL/Random.h>
diff --git a/src/Tangential_complex/test/test_tangential_complex.cpp b/src/Tangential_complex/test/test_tangential_complex.cpp
index 023c1e1a..a24b9ae2 100644
--- a/src/Tangential_complex/test/test_tangential_complex.cpp
+++ b/src/Tangential_complex/test/test_tangential_complex.cpp
@@ -14,6 +14,7 @@
#include <gudhi/Tangential_complex.h>
#include <gudhi/sparsify_point_set.h>
+#include <gudhi/Simplex_tree.h>
#include <CGAL/Epick_d.h>
#include <CGAL/Random.h>
diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake
index b316740d..6a94d1f5 100644
--- a/src/cmake/modules/GUDHI_third_party_libraries.cmake
+++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake
@@ -1,12 +1,14 @@
# This files manage third party libraries required by GUDHI
-find_package(Boost 1.56.0 QUIET OPTIONAL_COMPONENTS filesystem unit_test_framework program_options)
+find_package(Boost 1.66.0 QUIET OPTIONAL_COMPONENTS filesystem unit_test_framework program_options)
# Boost_FOUND is not reliable
if(NOT Boost_VERSION)
message(FATAL_ERROR "NOTICE: This program requires Boost and will not be compiled.")
endif(NOT Boost_VERSION)
include_directories(${Boost_INCLUDE_DIRS})
+message(STATUS "boost include dirs:" ${Boost_INCLUDE_DIRS})
+message(STATUS "boost library dirs:" ${Boost_LIBRARY_DIRS})
find_package(GMP)
if(GMP_FOUND)
@@ -17,6 +19,15 @@ if(GMP_FOUND)
endif()
endif()
+# from windows vcpkg eigen 3.4.0#2 : build fails with
+# error C2440: '<function-style-cast>': cannot convert from 'Eigen::EigenBase<Derived>::Index' to '__gmp_expr<mpq_t,mpq_t>'
+# cf. https://gitlab.com/libeigen/eigen/-/issues/2476
+# Workaround is to compile with '-DEIGEN_DEFAULT_DENSE_INDEX_TYPE=int'
+if (FORCE_EIGEN_DEFAULT_DENSE_INDEX_TYPE_TO_INT)
+ message("++ User explicit demand to force EIGEN_DEFAULT_DENSE_INDEX_TYPE to int")
+ add_definitions(-DEIGEN_DEFAULT_DENSE_INDEX_TYPE=int)
+endif()
+
# In CMakeLists.txt, when include(${CGAL_USE_FILE}), CMAKE_CXX_FLAGS are overwritten.
# cf. http://doc.cgal.org/latest/Manual/installation.html#title40
# A workaround is to include(${CGAL_USE_FILE}) before adding "-std=c++11".
@@ -90,9 +101,6 @@ add_definitions( -DBOOST_ALL_DYN_LINK )
# problem on Mac with boost_system and boost_thread
add_definitions( -DBOOST_SYSTEM_NO_DEPRECATED )
-message(STATUS "boost include dirs:" ${Boost_INCLUDE_DIRS})
-message(STATUS "boost library dirs:" ${Boost_LIBRARY_DIRS})
-
if (WITH_GUDHI_PYTHON)
# Find the correct Python interpreter.
# Can be set with -DPYTHON_EXECUTABLE=/usr/bin/python3 or -DPython_ADDITIONAL_VERSIONS=3 for instance.
@@ -181,4 +189,4 @@ if (WITH_GUDHI_PYTHON)
endif(NOT SPHINX_PATH)
endif(SPHINX_FOUND)
endif(PYTHONINTERP_FOUND AND CYTHON_FOUND)
-endif (WITH_GUDHI_PYTHON) \ No newline at end of file
+endif (WITH_GUDHI_PYTHON)
diff --git a/src/common/doc/examples.h b/src/common/doc/examples.h
index 0c4320f6..1634b19e 100644
--- a/src/common/doc/examples.h
+++ b/src/common/doc/examples.h
@@ -1,6 +1,6 @@
// List of GUDHI examples and utils - Doxygen needs at least a file tag to analyse comments
// Generated from scripts/cpp_examples_for_doxygen.py
-/*! @file Examples
+/*! @file
* \section Witness_complex_example_section Witness_complex
* @example strong_witness_persistence.cpp
* @example weak_witness_persistence.cpp
diff --git a/src/common/doc/installation.h b/src/common/doc/installation.h
index ef668dfb..24a7fc7a 100644
--- a/src/common/doc/installation.h
+++ b/src/common/doc/installation.h
@@ -5,8 +5,8 @@
* Examples of GUDHI headers inclusion can be found in \ref utilities.
*
* \section compiling Compiling
- * The library uses c++14 and requires <a target="_blank" href="http://www.boost.org/">Boost</a> &ge; 1.56.0
- * and <a target="_blank" href="https://www.cmake.org/">CMake</a> &ge; 3.5.
+ * The library uses c++14 and requires <a target="_blank" href="https://www.boost.org/">Boost</a> &ge; 1.66.0
+ * and <a target="_blank" href="https://cmake.org/">CMake</a> &ge; 3.5.
* It is a multi-platform library and compiles on Linux, Mac OSX and Visual Studio 2015.
*
* \subsection utilities Utilities and examples
@@ -56,10 +56,9 @@ make \endverbatim
* The multi-field persistent homology algorithm requires GMP which is a free library for arbitrary-precision
* arithmetic, operating on signed integers, rational numbers, and floating point numbers.
*
- * The following example requires the <a target="_blank" href="http://gmplib.org/">GNU Multiple Precision Arithmetic
+ * The following example requires the <a target="_blank" href="https://gmplib.org/">GNU Multiple Precision Arithmetic
* Library</a> (GMP) and will not be built if GMP is not installed:
- * \li <a href="rips_multifield_persistence_8cpp-example.html">
- * Persistent_cohomology/rips_multifield_persistence.cpp</a>
+ * \li \gudhi_example_link{Persistent_cohomology,rips_multifield_persistence.cpp}
*
* Having GMP version 4.2 or higher installed is recommended.
*
@@ -76,179 +75,101 @@ make \endverbatim
*
* The following examples/utilities require the <a target="_blank" href="http://www.cgal.org/">Computational Geometry Algorithms
* Library</a> (CGAL \cite cgal:eb-19b) and will not be built if CGAL version 4.11.0 or higher is not installed:
- * \li <a href="example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
- * Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp</a>
- * \li <a href="strong_witness_persistence_8cpp-example.html">
- * Witness_complex/strong_witness_persistence.cpp</a>
- * \li <a href="weak_witness_persistence_8cpp-example.html">
- * Witness_complex/weak_witness_persistence.cpp</a>
- * \li <a href="example_strong_witness_complex_off_8cpp-example.html">
- * Witness_complex/example_strong_witness_complex_off.cpp</a>
- * \li <a href="example_witness_complex_off_8cpp-example.html">
- * Witness_complex/example_witness_complex_off.cpp</a>
- * \li <a href="example_witness_complex_sphere_8cpp-example.html">
- * Witness_complex/example_witness_complex_sphere.cpp</a>
- * \li <a href="_alpha_complex_from_off_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_off.cpp</a>
- * \li <a href="_alpha_complex_from_points_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_points.cpp</a>
- * \li <a href="alpha_complex_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_persistence.cpp</a>
- * \li <a href="custom_persistence_sort_8cpp-example.html">
- * Persistent_cohomology/custom_persistence_sort.cpp</a>
- * \li <a href="alpha_rips_persistence_bottleneck_distance_8cpp-example.html">
- * Bottleneck_distance/alpha_rips_persistence_bottleneck_distance.cpp.cpp</a>
- * \li <a href="bottleneck_basic_example_8cpp-example.html">
- * Bottleneck_distance/bottleneck_basic_example.cpp</a>
- * \li <a href="bottleneck_distance_8cpp-example.html">
- * Bottleneck_distance/bottleneck_distance.cpp</a>
- * \li <a href="_coord_g_i_c_8cpp-example.html">
- * Nerve_GIC/CoordGIC.cpp</a>
- * \li <a href="_func_g_i_c_8cpp-example.html">
- * Nerve_GIC/FuncGIC.cpp</a>
- * \li <a href="_nerve_8cpp-example.html">
- * Nerve_GIC/Nerve.cpp</a>
- * \li <a href="_voronoi_g_i_c_8cpp-example.html">
- * Nerve_GIC/VoronoiGIC.cpp</a>
- * \li <a href="example_spatial_searching_8cpp-example.html">
- * Spatial_searching/example_spatial_searching.cpp</a>
- * \li <a href="example_choose_n_farthest_points_8cpp-example.html">
- * Subsampling/example_choose_n_farthest_points.cpp</a>
- * \li <a href="example_pick_n_random_points_8cpp-example.html">
- * Subsampling/example_pick_n_random_points.cpp</a>
- * \li <a href="example_sparsify_point_set_8cpp-example.html">
- * Subsampling/example_sparsify_point_set.cpp</a>
- * \li <a href="example_basic_8cpp-example.html">
- * Tangential_complex/example_basic.cpp</a>
- * \li <a href="example_with_perturb_8cpp-example.html">
- * Tangential_complex/example_with_perturb.cpp</a>
- * \li <a href="_weighted_alpha_complex_3d_from_points_8cpp-example.html">
- * Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp</a>
- * \li <a href="alpha_complex_3d_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_3d_persistence.cpp</a>
- * \li <a href="_coxeter_triangulation_2manifold_tracing_flat_torus_with_boundary_8cpp-example.html">
- * Coxeter_triangulation/manifold_tracing_flat_torus_with_boundary.cpp</a>
+ * \li \gudhi_example_link{Simplex_tree,example_alpha_shapes_3_simplex_tree_from_off_file.cpp}
+ * \li \gudhi_example_link{Witness_complex,strong_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,weak_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_strong_witness_complex_off.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_witness_complex_off.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_witness_complex_sphere.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_off.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_points.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_persistence.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,custom_persistence_sort.cpp}
+ * \li \gudhi_example_link{Bottleneck_distance,alpha_rips_persistence_bottleneck_distance.cpp}
+ * \li \gudhi_example_link{Bottleneck_distance,bottleneck_basic_example.cpp}
+ * \li \gudhi_example_link{Bottleneck_distance,bottleneck_distance.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,CoordGIC.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,FuncGIC.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,Nerve.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,VoronoiGIC.cpp}
+ * \li \gudhi_example_link{Spatial_searching,example_spatial_searching.cpp}
+ * \li \gudhi_example_link{Subsampling,example_choose_n_farthest_points.cpp}
+ * \li \gudhi_example_link{Subsampling,example_pick_n_random_points.cpp}
+ * \li \gudhi_example_link{Subsampling,example_sparsify_point_set.cpp}
+ * \li \gudhi_example_link{Tangential_complex,example_basic.cpp}
+ * \li \gudhi_example_link{Tangential_complex,example_with_perturb.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Weighted_alpha_complex_3d_from_points.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_3d_persistence.cpp}
+ * \li \gudhi_example_link{Coxeter_triangulation,manifold_tracing_flat_torus_with_boundary.cpp}
*
* \subsection eigen Eigen
* Some GUDHI modules (cf. \ref main_page "modules list"), and few examples require
- * <a target="_blank" href="http://eigen.tuxfamily.org/">Eigen</a> is a C++ template library for linear algebra:
+ * <a target="_blank" href="https://eigen.tuxfamily.org">Eigen</a> is a C++ template library for linear algebra:
* matrices, vectors, numerical solvers, and related algorithms.
*
- * The following examples/utilities require the <a target="_blank" href="http://eigen.tuxfamily.org/">Eigen</a> and will not be
+ * The following examples/utilities require the <a target="_blank" href="https://eigen.tuxfamily.org">Eigen</a> and will not be
* built if Eigen is not installed:
- * \li <a href="_alpha_complex_from_off_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_off.cpp</a>
- * \li <a href="_alpha_complex_from_points_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_points.cpp</a>
- * \li <a href="alpha_complex_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_persistence.cpp</a>
- * \li <a href="alpha_complex_3d_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_3d_persistence.cpp</a>
- * \li <a href="_weighted_alpha_complex_3d_from_points_8cpp-example.html">
- * Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp</a>
- * \li <a href="alpha_rips_persistence_bottleneck_distance_8cpp-example.html">
- * Bottleneck_distance/alpha_rips_persistence_bottleneck_distance.cpp.cpp</a>
- * \li <a href="custom_persistence_sort_8cpp-example.html">
- * Persistent_cohomology/custom_persistence_sort.cpp</a>
- * \li <a href="example_spatial_searching_8cpp-example.html">
- * Spatial_searching/example_spatial_searching.cpp</a>
- * \li <a href="example_choose_n_farthest_points_8cpp-example.html">
- * Subsampling/example_choose_n_farthest_points.cpp</a>
- * \li <a href="example_pick_n_random_points_8cpp-example.html">
- * Subsampling/example_pick_n_random_points.cpp</a>
- * \li <a href="example_sparsify_point_set_8cpp-example.html">
- * Subsampling/example_sparsify_point_set.cpp</a>
- * \li <a href="example_basic_8cpp-example.html">
- * Tangential_complex/example_basic.cpp</a>
- * \li <a href="example_with_perturb_8cpp-example.html">
- * Tangential_complex/example_with_perturb.cpp</a>
- * \li <a href="strong_witness_persistence_8cpp-example.html">
- * Witness_complex/strong_witness_persistence.cpp</a>
- * \li <a href="weak_witness_persistence_8cpp-example.html">
- * Witness_complex/weak_witness_persistence.cpp</a>
- * \li <a href="example_strong_witness_complex_off_8cpp-example.html">
- * Witness_complex/example_strong_witness_complex_off.cpp</a>
- * \li <a href="example_witness_complex_off_8cpp-example.html">
- * Witness_complex/example_witness_complex_off.cpp</a>
- * \li <a href="example_witness_complex_sphere_8cpp-example.html">
- * Witness_complex/example_witness_complex_sphere.cpp</a>
- * \li <a href="_coxeter_triangulation_2cell_complex_from_basic_circle_manifold_8cpp-example.html">
- * Coxeter_triangulation/cell_complex_from_basic_circle_manifold.cpp</a>
- * \li <a href="_coxeter_triangulation_2manifold_tracing_custom_function_8cpp-example.html">
- * Coxeter_triangulation/manifold_tracing_custom_function.cpp</a>
- * \li <a href="_coxeter_triangulation_2manifold_tracing_flat_torus_with_boundary_8cpp-example.html">
- * Coxeter_triangulation/manifold_tracing_flat_torus_with_boundary.cpp</a>
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_off.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_points.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_persistence.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_3d_persistence.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Weighted_alpha_complex_3d_from_points.cpp}
+ * \li \gudhi_example_link{Bottleneck_distance,alpha_rips_persistence_bottleneck_distance.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,custom_persistence_sort.cpp}
+ * \li \gudhi_example_link{Spatial_searching,example_spatial_searching.cpp}
+ * \li \gudhi_example_link{Subsampling,example_choose_n_farthest_points.cpp}
+ * \li \gudhi_example_link{Subsampling,example_pick_n_random_points.cpp}
+ * \li \gudhi_example_link{Subsampling,example_sparsify_point_set.cpp}
+ * \li \gudhi_example_link{Tangential_complex,example_basic.cpp}
+ * \li \gudhi_example_link{Tangential_complex,example_with_perturb.cpp}
+ * \li \gudhi_example_link{Witness_complex,strong_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,weak_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_strong_witness_complex_off.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_witness_complex_off.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_witness_complex_sphere.cpp}
+ * \li \gudhi_example_link{Coxeter_triangulation,cell_complex_from_basic_circle_manifold.cpp}
+ * \li \gudhi_example_link{Coxeter_triangulation,manifold_tracing_custom_function.cpp}
+ * \li \gudhi_example_link{Coxeter_triangulation,manifold_tracing_flat_torus_with_boundary.cpp}
*
* \subsection tbb Threading Building Blocks
- * <a target="_blank" href="https://www.threadingbuildingblocks.org/">Intel&reg; TBB</a> lets you easily write parallel
+ * <a target="_blank" href="https://github.com/oneapi-src/oneTBB">Intel&reg; TBB</a> lets you easily write parallel
* C++ programs that take full advantage of multicore performance, that are portable and composable, and that have
* future-proof scalability.
*
* Having Intel&reg; TBB installed is recommended to parallelize and accelerate some GUDHI computations.
*
* The following examples/utilities are using Intel&reg; TBB if installed:
- * \li <a href="_alpha_complex_from_off_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_off.cpp</a>
- * \li <a href="_alpha_complex_from_points_8cpp-example.html">
- * Alpha_complex/Alpha_complex_from_points.cpp</a>
- * \li <a href="alpha_complex_3d_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_3d_persistence.cpp</a>
- * \li <a href="alpha_complex_persistence_8cpp-example.html">
- * Alpha_complex/alpha_complex_persistence.cpp</a>
- * \li <a href="cubical_complex_persistence_8cpp-example.html">
- * Bitmap_cubical_complex/cubical_complex_persistence.cpp</a>
- * \li <a href="periodic_cubical_complex_persistence_8cpp-example.html">
- * Bitmap_cubical_complex/periodic_cubical_complex_persistence.cpp</a>
- * \li <a href="_random_bitmap_cubical_complex_8cpp-example.html">
- * Bitmap_cubical_complex/Random_bitmap_cubical_complex.cpp</a>
- * \li <a href="_coord_g_i_c_8cpp-example.html">
- * Nerve_GIC/CoordGIC.cpp</a>
- * \li <a href="_func_g_i_c_8cpp-example.html">
- * Nerve_GIC/FuncGIC.cpp</a>
- * \li <a href="_nerve_8cpp-example.html">
- * Nerve_GIC/Nerve.cpp</a>
- * \li <a href="_voronoi_g_i_c_8cpp-example.html">
- * Nerve_GIC/VoronoiGIC.cpp</a>
- * \li <a href="simple_simplex_tree_8cpp-example.html">
- * Simplex_tree/simple_simplex_tree.cpp</a>
- * \li <a href="example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
- * Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp</a>
- * \li <a href="simplex_tree_from_cliques_of_graph_8cpp-example.html">
- * Simplex_tree/simplex_tree_from_cliques_of_graph.cpp</a>
- * \li <a href="graph_expansion_with_blocker_8cpp-example.html">
- * Simplex_tree/graph_expansion_with_blocker.cpp</a>
- * \li <a href="alpha_complex_3d_persistence_8cpp-example.html">
- * Persistent_cohomology/alpha_complex_3d_persistence.cpp</a>
- * \li <a href="alpha_complex_persistence_8cpp-example.html">
- * Persistent_cohomology/alpha_complex_persistence.cpp</a>
- * \li <a href="rips_persistence_via_boundary_matrix_8cpp-example.html">
- * Persistent_cohomology/rips_persistence_via_boundary_matrix.cpp</a>
- * \li <a href="persistence_from_file_8cpp-example.html">
- * Persistent_cohomology/persistence_from_file.cpp</a>
- * \li <a href="persistence_from_simple_simplex_tree_8cpp-example.html">
- * Persistent_cohomology/persistence_from_simple_simplex_tree.cpp</a>
- * \li <a href="plain_homology_8cpp-example.html">
- * Persistent_cohomology/plain_homology.cpp</a>
- * \li <a href="rips_multifield_persistence_8cpp-example.html">
- * Persistent_cohomology/rips_multifield_persistence.cpp</a>
- * \li <a href="rips_persistence_step_by_step_8cpp-example.html">
- * Persistent_cohomology/rips_persistence_step_by_step.cpp</a>
- * \li <a href="custom_persistence_sort_8cpp-example.html">
- * Persistent_cohomology/custom_persistence_sort.cpp</a>
- * \li <a href="example_one_skeleton_rips_from_points_8cpp-example.html">
- * Rips_complex/example_one_skeleton_rips_from_points.cpp</a>
- * \li <a href="example_rips_complex_from_off_file_8cpp-example.html">
- * Rips_complex/example_rips_complex_from_off_file.cpp</a>
- * \li <a href="rips_distance_matrix_persistence_8cpp-example.html">
- * Rips_complex/rips_distance_matrix_persistence.cpp</a>
- * \li <a href="rips_persistence_8cpp-example.html">
- * Rips_complex/rips_persistence.cpp</a>
- * \li <a href="strong_witness_persistence_8cpp-example.html">
- * Witness_complex/strong_witness_persistence.cpp</a>
- * \li <a href="weak_witness_persistence_8cpp-example.html">
- * Witness_complex/weak_witness_persistence.cpp</a>
- * \li <a href="example_nearest_landmark_table_8cpp-example.html">
- * Witness_complex/example_nearest_landmark_table.cpp</a>
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_off.cpp}
+ * \li \gudhi_example_link{Alpha_complex,Alpha_complex_from_points.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_3d_persistence.cpp}
+ * \li \gudhi_example_link{Alpha_complex,alpha_complex_persistence.cpp}
+ * \li \gudhi_example_link{Bitmap_cubical_complex,cubical_complex_persistence.cpp}
+ * \li \gudhi_example_link{Bitmap_cubical_complex,periodic_cubical_complex_persistence.cpp}
+ * \li \gudhi_example_link{Bitmap_cubical_complex,Random_bitmap_cubical_complex.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,CoordGIC.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,FuncGIC.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,Nerve.cpp}
+ * \li \gudhi_example_link{Nerve_GIC,VoronoiGIC.cpp}
+ * \li \gudhi_example_link{Simplex_tree,simple_simplex_tree.cpp}
+ * \li \gudhi_example_link{Simplex_tree,example_alpha_shapes_3_simplex_tree_from_off_file.cpp}
+ * \li \gudhi_example_link{Simplex_tree,simplex_tree_from_cliques_of_graph.cpp}
+ * \li \gudhi_example_link{Simplex_tree,graph_expansion_with_blocker.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,alpha_complex_3d_persistence.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,alpha_complex_persistence.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,rips_persistence_via_boundary_matrix.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,persistence_from_file.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,persistence_from_simple_simplex_tree.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,plain_homology.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,rips_multifield_persistence.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,rips_persistence_step_by_step.cpp}
+ * \li \gudhi_example_link{Persistent_cohomology,custom_persistence_sort.cpp}
+ * \li \gudhi_example_link{Rips_complex,example_one_skeleton_rips_from_points.cpp}
+ * \li \gudhi_example_link{Rips_complex,example_rips_complex_from_off_file.cpp}
+ * \li \gudhi_example_link{Rips_complex,rips_distance_matrix_persistence.cpp}
+ * \li \gudhi_example_link{Rips_complex,rips_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,strong_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,weak_witness_persistence.cpp}
+ * \li \gudhi_example_link{Witness_complex,example_nearest_landmark_table.cpp}
*
* \section Contributions Bug reports and contributions
* Please help us improving the quality of the GUDHI library.
diff --git a/src/common/doc/main_page.md b/src/common/doc/main_page.md
index 6f995fee..2cb02e3f 100644
--- a/src/common/doc/main_page.md
+++ b/src/common/doc/main_page.md
@@ -231,13 +231,12 @@
homology of the input sequence. The resulting method is simple and extremely efficient.
Computation of edge collapse and persistent homology of a filtered flag complex via edge collapse as described in
- \cite edgecollapsesocg2020.
+ \cite edgecollapsearxiv.
</td>
<td width="15%">
- <b>Author:</b> Siddharth Pritam<br>
+ <b>Author:</b> Siddharth Pritam, Marc Glisse<br>
<b>Introduced in:</b> GUDHI 3.3.0<br>
- <b>Copyright:</b> MIT<br>
- <b>Requires:</b> \ref eigen
+ <b>Copyright:</b> MIT
</td>
</tr>
<tr>
diff --git a/src/common/include/gudhi/reader_utils.h b/src/common/include/gudhi/reader_utils.h
index a1b104e2..29d5423d 100644
--- a/src/common/include/gudhi/reader_utils.h
+++ b/src/common/include/gudhi/reader_utils.h
@@ -14,11 +14,7 @@
#include <gudhi/graph_simplicial_complex.h>
#include <gudhi/Debug_utils.h>
-#if BOOST_VERSION < 106600
-# include <boost/function_output_iterator.hpp>
-#else
# include <boost/iterator/function_output_iterator.hpp>
-#endif
#include <boost/graph/adjacency_list.hpp>
#include <iostream>
diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt
index 8eb7478e..54221151 100644
--- a/src/python/CMakeLists.txt
+++ b/src/python/CMakeLists.txt
@@ -148,10 +148,6 @@ if(PYTHONINTERP_FOUND)
add_gudhi_debug_info("Eigen3 version ${EIGEN3_VERSION}")
# No problem, even if no CGAL found
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DCGAL_EIGEN3_ENABLED', ")
- set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DGUDHI_USE_EIGEN3', ")
- set(GUDHI_USE_EIGEN3 "True")
- else (EIGEN3_FOUND)
- set(GUDHI_USE_EIGEN3 "False")
endif (EIGEN3_FOUND)
set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'off_reader', ")
@@ -180,6 +176,16 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'alpha_complex', ")
endif ()
+ # from windows vcpkg eigen 3.4.0#2 : build fails with
+ # error C2440: '<function-style-cast>': cannot convert from 'Eigen::EigenBase<Derived>::Index' to '__gmp_expr<mpq_t,mpq_t>'
+ # cf. https://gitlab.com/libeigen/eigen/-/issues/2476
+ # Workaround is to compile with '-DEIGEN_DEFAULT_DENSE_INDEX_TYPE=int'
+ if (FORCE_EIGEN_DEFAULT_DENSE_INDEX_TYPE_TO_INT)
+ set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DEIGEN_DEFAULT_DENSE_INDEX_TYPE=int', ")
+ endif()
+
+
+ add_gudhi_debug_info("Boost version ${Boost_VERSION}")
if(CGAL_FOUND)
if(NOT CGAL_VERSION VERSION_LESS 5.3.0)
# CGAL_HEADER_ONLY has been dropped for CGAL >= 5.3. Only the header-only version is supported.
@@ -214,13 +220,14 @@ if(PYTHONINTERP_FOUND)
endif(NOT GMP_LIBRARIES_DIR)
add_GUDHI_PYTHON_lib_dir(${GMP_LIBRARIES_DIR})
message("** Add gmp ${GMP_LIBRARIES_DIR}")
+ # When FORCE_CGAL_NOT_TO_BUILD_WITH_GMPXX is set, not defining CGAL_USE_GMPXX is sufficient enough
if(GMPXX_FOUND)
add_gudhi_debug_info("GMPXX_LIBRARIES = ${GMPXX_LIBRARIES}")
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DCGAL_USE_GMPXX', ")
add_GUDHI_PYTHON_lib("${GMPXX_LIBRARIES}")
add_GUDHI_PYTHON_lib_dir(${GMPXX_LIBRARIES_DIR})
message("** Add gmpxx ${GMPXX_LIBRARIES_DIR}")
- endif(GMPXX_FOUND)
+ endif()
endif(GMP_FOUND)
if(MPFR_FOUND)
add_gudhi_debug_info("MPFR_LIBRARIES = ${MPFR_LIBRARIES}")
@@ -264,6 +271,10 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYTHON_INCLUDE_DIRS "${GUDHI_PYTHON_INCLUDE_DIRS}'${TBB_INCLUDE_DIRS}', ")
endif()
+ if(DEBUG_TRACES)
+ set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DDEBUG_TRACES', ")
+ endif(DEBUG_TRACES)
+
if(UNIX AND WITH_GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS)
set( GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}")
endif(UNIX AND WITH_GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS)
@@ -580,6 +591,10 @@ if(PYTHONINTERP_FOUND)
add_gudhi_py_test(test_dtm_rips_complex)
endif()
+ # persistence graphical tools
+ if(MATPLOTLIB_FOUND)
+ add_gudhi_py_test(test_persistence_graphical_tools)
+ endif()
# Set missing or not modules
set(GUDHI_MODULES ${GUDHI_MODULES} "python" CACHE INTERNAL "GUDHI_MODULES")
@@ -590,4 +605,4 @@ if(PYTHONINTERP_FOUND)
else(PYTHONINTERP_FOUND)
message("++ Python module will not be compiled because no Python interpreter was found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python" CACHE INTERNAL "GUDHI_MISSING_MODULES")
-endif(PYTHONINTERP_FOUND) \ No newline at end of file
+endif(PYTHONINTERP_FOUND)
diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst
index cfd22742..9e67d38a 100644
--- a/src/python/doc/alpha_complex_user.rst
+++ b/src/python/doc/alpha_complex_user.rst
@@ -27,7 +27,8 @@ Remarks
If you pass :code:`precision = 'exact'` to :func:`~gudhi.AlphaComplex.__init__`, the filtration values are the exact
ones converted to float. This can be very slow.
If you pass :code:`precision = 'safe'` (the default), the filtration values are only
- guaranteed to have a small multiplicative error compared to the exact value.
+ guaranteed to have a small multiplicative error compared to the exact value, see
+ :func:`~gudhi.AlphaComplex.set_float_relative_precision` to modify the precision.
A drawback, when computing persistence, is that an empty exact interval [10^12,10^12] may become a
non-empty approximate interval [10^12,10^12+10^6].
Using :code:`precision = 'fast'` makes the computations slightly faster, and the combinatorics are still exact, but
@@ -177,11 +178,11 @@ Weighted version
^^^^^^^^^^^^^^^^
A weighted version for Alpha complex is available. It is like a usual Alpha complex, but based on a
-`CGAL regular triangulation <https://doc.cgal.org/latest/Triangulation/index.html#title20>`_.
+`CGAL regular triangulation <https://doc.cgal.org/latest/Triangulation/index.html#TriangulationSecRT>`_.
This example builds the weighted alpha-complex of a small molecule, where atoms have different sizes.
It is taken from
-`CGAL 3d weighted alpha shapes <https://doc.cgal.org/latest/Alpha_shapes_3/index.html#title13>`_.
+`CGAL 3d weighted alpha shapes <https://doc.cgal.org/latest/Alpha_shapes_3/index.html#AlphaShape_3DExampleforWeightedAlphaShapes>`_.
Then, it is asked to display information about the alpha complex.
diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst
index 35c344e3..cff84691 100644
--- a/src/python/doc/installation.rst
+++ b/src/python/doc/installation.rst
@@ -33,25 +33,19 @@ Compiling
These instructions are for people who want to compile gudhi from source, they are
unnecessary if you installed a binary package of Gudhi as above. They assume that
you have downloaded a `release <https://github.com/GUDHI/gudhi-devel/releases>`_,
-with a name like `gudhi.3.2.0.tar.gz`, then run `tar xf gudhi.3.2.0.tar.gz`, which
-created a directory `gudhi.3.2.0`, hereinafter referred to as `/path-to-gudhi/`.
+with a name like `gudhi.3.X.Y.tar.gz`, then run `tar xf gudhi.3.X.Y.tar.gz`, which
+created a directory `gudhi.3.X.Y`, hereinafter referred to as `/path-to-gudhi/`.
If you are instead using a git checkout, beware that the paths are a bit
different, and in particular the `python/` subdirectory is actually `src/python/`
there.
-The library uses c++14 and requires `Boost <https://www.boost.org/>`_ :math:`\geq` 1.56.0,
+The library uses c++14 and requires `Boost <https://www.boost.org/>`_ :math:`\geq` 1.66.0,
`CMake <https://www.cmake.org/>`_ :math:`\geq` 3.5 to generate makefiles,
-`NumPy <http://numpy.org>`_ :math:`\geq` 1.15.0, `Cython <https://www.cython.org/>`_ and
-`pybind11 <https://github.com/pybind/pybind11>`_ to compile
-the GUDHI Python module.
-It is a multi-platform library and compiles on Linux, Mac OSX and Visual
-Studio 2017 or later.
+Python :math:`\geq` 3.5, `NumPy <http://numpy.org>`_ :math:`\geq` 1.15.0, `Cython <https://www.cython.org/>`_
+:math:`\geq` 0.27 and `pybind11 <https://github.com/pybind/pybind11>`_ to compile the GUDHI Python module.
+It is a multi-platform library and compiles on Linux, Mac OSX and Visual Studio 2017 or later.
-On `Windows <https://wiki.python.org/moin/WindowsCompilers>`_ , only Python
-:math:`\geq` 3.5 are available because of the required Visual Studio version.
-
-On other systems, if you have several Python/python installed, the version 2.X
-will be used by default, but you can force it by adding
+If you have several Python/python installed, the version 2.X may be used by default, but you can force it by adding
:code:`-DPython_ADDITIONAL_VERSIONS=3` to the cmake command.
GUDHI Python module compilation
@@ -142,54 +136,63 @@ If :code:`import gudhi` succeeds, please have a look to debug information:
.. code-block:: python
- import gudhi
- print(gudhi.__debug_info__)
+ import gudhi as gd
+ print(gd.__debug_info__)
+ print("+ Installed modules are: " + gd.__available_modules)
+ print("+ Missing modules are: " + gd.__missing_modules)
You shall have something like:
.. code-block:: none
- Python version 2.7.15
- Cython version 0.26.1
- Numpy version 1.14.1
- Eigen3 version 3.1.1
- Installed modules are: off_reader;simplex_tree;rips_complex;
- cubical_complex;periodic_cubical_complex;reader_utils;witness_complex;
- strong_witness_complex;alpha_complex;
- Missing modules are: bottleneck_distance;nerve_gic;subsampling;
- tangential_complex;persistence_graphical_tools;
- euclidean_witness_complex;euclidean_strong_witness_complex;
- CGAL version 4.7.1000
- GMP_LIBRARIES = /usr/lib/x86_64-linux-gnu/libgmp.so
- GMPXX_LIBRARIES = /usr/lib/x86_64-linux-gnu/libgmpxx.so
- TBB version 9107 found and used
+ Pybind11 version 2.8.1
+ Python version 3.7.12
+ Cython version 0.29.25
+ Numpy version 1.21.4
+ Boost version 1.77.0
+ + Installed modules are: off_reader;simplex_tree;rips_complex;cubical_complex;periodic_cubical_complex;
+ persistence_graphical_tools;reader_utils;witness_complex;strong_witness_complex;
+ + Missing modules are: bottleneck;nerve_gic;subsampling;tangential_complex;alpha_complex;euclidean_witness_complex;
+ euclidean_strong_witness_complex;
-Here, you can see that bottleneck_distance, nerve_gic, subsampling and
-tangential_complex are missing because of the CGAL version.
-persistence_graphical_tools is not available as matplotlib is not
-available.
+Here, you can see that the modules that need CGAL are missing, because CGAL is not installed.
+:code:`persistence_graphical_tools` is installed, but
+`its functions <https://gudhi.inria.fr/python/latest/persistence_graphical_tools_ref.html>`_ will produce an error as
+matplotlib is not available.
Unitary tests cannot be run as pytest is missing.
A complete configuration would be :
.. code-block:: none
- Python version 3.6.5
- Cython version 0.28.2
- Pytest version 3.3.2
- Matplotlib version 2.2.2
- Numpy version 1.14.5
- Eigen3 version 3.3.4
- Installed modules are: off_reader;simplex_tree;rips_complex;
- cubical_complex;periodic_cubical_complex;persistence_graphical_tools;
- reader_utils;witness_complex;strong_witness_complex;
- persistence_graphical_tools;bottleneck_distance;nerve_gic;subsampling;
- tangential_complex;alpha_complex;euclidean_witness_complex;
- euclidean_strong_witness_complex;
- CGAL header only version 4.11.0
+ Pybind11 version 2.8.1
+ Python version 3.9.7
+ Cython version 0.29.24
+ Pytest version 6.2.5
+ Matplotlib version 3.5.0
+ Numpy version 1.21.4
+ Scipy version 1.7.3
+ Scikit-learn version 1.0.1
+ POT version 0.8.0
+ HNSWlib found
+ PyKeOps version [pyKeOps]: 1.5
+ EagerPy version 0.30.0
+ TensorFlow version 2.7.0
+ Sphinx version 4.3.0
+ Sphinx-paramlinks version 0.5.2
+ python_docs_theme found
+ Eigen3 version 3.4.0
+ Boost version 1.74.0
+ CGAL version 5.3
GMP_LIBRARIES = /usr/lib/x86_64-linux-gnu/libgmp.so
GMPXX_LIBRARIES = /usr/lib/x86_64-linux-gnu/libgmpxx.so
+ MPFR_LIBRARIES = /usr/lib/x86_64-linux-gnu/libmpfr.so
TBB version 9107 found and used
+ + Installed modules are: bottleneck;off_reader;simplex_tree;rips_complex;cubical_complex;periodic_cubical_complex;
+ persistence_graphical_tools;reader_utils;witness_complex;strong_witness_complex;nerve_gic;subsampling;
+ tangential_complex;alpha_complex;euclidean_witness_complex;euclidean_strong_witness_complex;
+ + Missing modules are:
+
Documentation
=============
@@ -345,8 +348,8 @@ You can still deactivate LaTeX rendering by saying:
.. code-block:: python
- import gudhi
- gudhi.persistence_graphical_tools._gudhi_matplotlib_use_tex=False
+ import gudhi as gd
+ gd.persistence_graphical_tools._gudhi_matplotlib_use_tex=False
PyKeOps
-------
diff --git a/src/python/doc/nerve_gic_complex_user.rst b/src/python/doc/nerve_gic_complex_user.rst
index 0b820abf..8633cadb 100644
--- a/src/python/doc/nerve_gic_complex_user.rst
+++ b/src/python/doc/nerve_gic_complex_user.rst
@@ -12,7 +12,7 @@ Definition
Visualizations of the simplicial complexes can be done with either
neato (from `graphviz <http://www.graphviz.org/>`_),
`geomview <http://www.geomview.org/>`_,
-`KeplerMapper <https://github.com/MLWave/kepler-mapper>`_.
+`KeplerMapper <https://github.com/scikit-tda/kepler-mapper>`_.
Input point clouds are assumed to be OFF files (cf. `OFF file format <fileformats.html#off-file-format>`_).
Covers
diff --git a/src/python/doc/persistence_graphical_tools_user.rst b/src/python/doc/persistence_graphical_tools_user.rst
index d95b9d2b..e1d28c71 100644
--- a/src/python/doc/persistence_graphical_tools_user.rst
+++ b/src/python/doc/persistence_graphical_tools_user.rst
@@ -60,7 +60,7 @@ of shape (N x 2) encoding a persistence diagram (in a given dimension).
import matplotlib.pyplot as plt
import gudhi
import numpy as np
- d = np.array([[0, 1], [1, 2], [1, np.inf]])
+ d = np.array([[0., 1.], [1., 2.], [1., np.inf]])
gudhi.plot_persistence_diagram(d)
plt.show()
diff --git a/src/python/gudhi/__init__.py.in b/src/python/gudhi/__init__.py.in
index 3043201a..79e12fbc 100644
--- a/src/python/gudhi/__init__.py.in
+++ b/src/python/gudhi/__init__.py.in
@@ -23,10 +23,6 @@ __all__ = [@GUDHI_PYTHON_MODULES@ @GUDHI_PYTHON_MODULES_EXTRA@]
__available_modules = ''
__missing_modules = ''
-# For unitary tests purpose
-# could use "if 'collapse_edges' in gudhi.__all__" when collapse edges will have a python module
-__GUDHI_USE_EIGEN3 = @GUDHI_USE_EIGEN3@
-
# Try to import * from gudhi.__module_name for default modules.
# Extra modules require an explicit import by the user (mostly because of
# unusual dependencies, but also to avoid cluttering namespace gudhi and
diff --git a/src/python/gudhi/alpha_complex.pyx b/src/python/gudhi/alpha_complex.pyx
index a4888914..375e1561 100644
--- a/src/python/gudhi/alpha_complex.pyx
+++ b/src/python/gudhi/alpha_complex.pyx
@@ -31,6 +31,10 @@ cdef extern from "Alpha_complex_interface.h" namespace "Gudhi":
Alpha_complex_interface(vector[vector[double]] points, vector[double] weights, bool fast_version, bool exact_version) nogil except +
vector[double] get_point(int vertex) nogil except +
void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, double max_alpha_square, bool default_filtration_value) nogil except +
+ @staticmethod
+ void set_float_relative_precision(double precision) nogil
+ @staticmethod
+ double get_float_relative_precision() nogil
# AlphaComplex python interface
cdef class AlphaComplex:
@@ -133,3 +137,28 @@ cdef class AlphaComplex:
self.this_ptr.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr,
mas, compute_filtration)
return stree
+
+ @staticmethod
+ def set_float_relative_precision(precision):
+ """
+ :param precision: When the AlphaComplex is constructed with :code:`precision = 'safe'` (the default),
+ one can set the float relative precision of filtration values computed in
+ :func:`~gudhi.AlphaComplex.create_simplex_tree`.
+ Default is :code:`1e-5` (cf. :func:`~gudhi.AlphaComplex.get_float_relative_precision`).
+ For more details, please refer to
+ `CGAL::Lazy_exact_nt<NT>::set_relative_precision_of_to_double <https://doc.cgal.org/latest/Number_types/classCGAL_1_1Lazy__exact__nt.html>`_
+ :type precision: float
+ """
+ if precision <=0. or precision >= 1.:
+ raise ValueError("Relative precision value must be strictly greater than 0 and strictly lower than 1")
+ Alpha_complex_interface.set_float_relative_precision(precision)
+
+ @staticmethod
+ def get_float_relative_precision():
+ """
+ :returns: The float relative precision of filtration values computation in
+ :func:`~gudhi.AlphaComplex.create_simplex_tree` when the AlphaComplex is constructed with
+ :code:`precision = 'safe'` (the default).
+ :rtype: float
+ """
+ return Alpha_complex_interface.get_float_relative_precision()
diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py
index 848dc03e..7ed11360 100644
--- a/src/python/gudhi/persistence_graphical_tools.py
+++ b/src/python/gudhi/persistence_graphical_tools.py
@@ -12,6 +12,9 @@ from os import path
from math import isfinite
import numpy as np
from functools import lru_cache
+import warnings
+import errno
+import os
from gudhi.reader_utils import read_persistence_intervals_in_dimension
from gudhi.reader_utils import read_persistence_intervals_grouped_by_dimension
@@ -22,6 +25,7 @@ __license__ = "MIT"
_gudhi_matplotlib_use_tex = True
+
def __min_birth_max_death(persistence, band=0.0):
"""This function returns (min_birth, max_death) from the persistence.
@@ -44,20 +48,46 @@ def __min_birth_max_death(persistence, band=0.0):
min_birth = float(interval[1][0])
if band > 0.0:
max_death += band
+ # can happen if only points at inf death
+ if min_birth == max_death:
+ max_death = max_death + 1.0
return (min_birth, max_death)
def _array_handler(a):
- '''
+ """
:param a: if array, assumes it is a (n x 2) np.array and return a
persistence-compatible list (padding with 0), so that the
plot can be performed seamlessly.
- '''
- if isinstance(a[0][1], np.float64) or isinstance(a[0][1], float):
+ """
+ if isinstance(a[0][1], (np.floating, float)):
return [[0, x] for x in a]
else:
return a
+
+def _limit_to_max_intervals(persistence, max_intervals, key):
+ """This function returns truncated persistence if length is bigger than max_intervals.
+ :param persistence: Persistence intervals values list. Can be grouped by dimension or not.
+ :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death).
+ :param max_intervals: maximal number of intervals to display.
+ Selected intervals are those with the longest life time. Set it
+ to 0 to see all. Default value is 1000.
+ :type max_intervals: int.
+ :param key: key function for sort algorithm.
+ :type key: function or lambda.
+ """
+ if max_intervals > 0 and max_intervals < len(persistence):
+ warnings.warn(
+ "There are %s intervals given as input, whereas max_intervals is set to %s."
+ % (len(persistence), max_intervals)
+ )
+ # Sort by life time, then takes only the max_intervals elements
+ return sorted(persistence, key=key, reverse=True)[:max_intervals]
+ else:
+ return persistence
+
+
@lru_cache(maxsize=1)
def _matplotlib_can_use_tex():
"""This function returns True if matplotlib can deal with LaTeX, False otherwise.
@@ -65,17 +95,17 @@ def _matplotlib_can_use_tex():
"""
try:
from matplotlib import checkdep_usetex
+
return checkdep_usetex(True)
- except ImportError:
- print("This function is not available, you may be missing matplotlib.")
+ except ImportError as import_error:
+ warnings.warn(f"This function is not available.\nModuleNotFoundError: No module named '{import_error.name}'.")
def plot_persistence_barcode(
persistence=[],
persistence_file="",
alpha=0.6,
- max_intervals=1000,
- max_barcodes=1000,
+ max_intervals=20000,
inf_delta=0.1,
legend=False,
colormap=None,
@@ -97,7 +127,7 @@ def plot_persistence_barcode(
:type alpha: float.
:param max_intervals: maximal number of intervals to display.
Selected intervals are those with the longest life time. Set it
- to 0 to see all. Default value is 1000.
+ to 0 to see all. Default value is 20000.
:type max_intervals: int.
:param inf_delta: Infinity is placed at :code:`((max_death - min_birth) x
inf_delta)` above :code:`max_death` value. A reasonable value is
@@ -119,99 +149,68 @@ def plot_persistence_barcode(
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib import rc
+
if _gudhi_matplotlib_use_tex and _matplotlib_can_use_tex():
- plt.rc('text', usetex=True)
- plt.rc('font', family='serif')
+ plt.rc("text", usetex=True)
+ plt.rc("font", family="serif")
else:
- plt.rc('text', usetex=False)
- plt.rc('font', family='DejaVu Sans')
+ plt.rc("text", usetex=False)
+ plt.rc("font", family="DejaVu Sans")
if persistence_file != "":
if path.isfile(persistence_file):
# Reset persistence
persistence = []
- diag = read_persistence_intervals_grouped_by_dimension(
- persistence_file=persistence_file
- )
+ diag = read_persistence_intervals_grouped_by_dimension(persistence_file=persistence_file)
for key in diag.keys():
for persistence_interval in diag[key]:
persistence.append((key, persistence_interval))
else:
- print("file " + persistence_file + " not found.")
- return None
-
- persistence = _array_handler(persistence)
-
- if max_barcodes != 1000:
- print("Deprecated parameter. It has been replaced by max_intervals")
- max_intervals = max_barcodes
-
- if max_intervals > 0 and max_intervals < len(persistence):
- # Sort by life time, then takes only the max_intervals elements
- persistence = sorted(
- persistence,
- key=lambda life_time: life_time[1][1] - life_time[1][0],
- reverse=True,
- )[:max_intervals]
-
- if colormap == None:
- colormap = plt.cm.Set1.colors
- if axes == None:
- fig, axes = plt.subplots(1, 1)
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), persistence_file)
- persistence = sorted(persistence, key=lambda birth: birth[1][0])
+ try:
+ persistence = _array_handler(persistence)
+ persistence = _limit_to_max_intervals(
+ persistence, max_intervals, key=lambda life_time: life_time[1][1] - life_time[1][0]
+ )
+ (min_birth, max_death) = __min_birth_max_death(persistence)
+ persistence = sorted(persistence, key=lambda birth: birth[1][0])
+ except IndexError:
+ min_birth, max_death = 0.0, 1.0
+ pass
- (min_birth, max_death) = __min_birth_max_death(persistence)
- ind = 0
delta = (max_death - min_birth) * inf_delta
# Replace infinity values with max_death + delta for bar code to be more
# readable
infinity = max_death + delta
axis_start = min_birth - delta
- # Draw horizontal bars in loop
- for interval in reversed(persistence):
- if float(interval[1][1]) != float("inf"):
- # Finite death case
- axes.barh(
- ind,
- (interval[1][1] - interval[1][0]),
- height=0.8,
- left=interval[1][0],
- alpha=alpha,
- color=colormap[interval[0]],
- linewidth=0,
- )
- else:
- # Infinite death case for diagram to be nicer
- axes.barh(
- ind,
- (infinity - interval[1][0]),
- height=0.8,
- left=interval[1][0],
- alpha=alpha,
- color=colormap[interval[0]],
- linewidth=0,
- )
- ind = ind + 1
+
+ if axes == None:
+ _, axes = plt.subplots(1, 1)
+ if colormap == None:
+ colormap = plt.cm.Set1.colors
+
+ x=[birth for (dim,(birth,death)) in persistence]
+ y=[(death - birth) if death != float("inf") else (infinity - birth) for (dim,(birth,death)) in persistence]
+ c=[colormap[dim] for (dim,(birth,death)) in persistence]
+
+ axes.barh(list(reversed(range(len(x)))), y, height=0.8, left=x, alpha=alpha, color=c, linewidth=0)
if legend:
dimensions = list(set(item[0] for item in persistence))
axes.legend(
- handles=[
- mpatches.Patch(color=colormap[dim], label=str(dim))
- for dim in dimensions
- ],
- loc="lower right",
+ handles=[mpatches.Patch(color=colormap[dim], label=str(dim)) for dim in dimensions], loc="lower right",
)
axes.set_title("Persistence barcode", fontsize=fontsize)
# Ends plot on infinity value and starts a little bit before min_birth
- axes.axis([axis_start, infinity, 0, ind])
+ if len(x) != 0:
+ axes.axis([axis_start, infinity, 0, len(x)])
return axes
- except ImportError:
- print("This function is not available, you may be missing matplotlib.")
+ except ImportError as import_error:
+ warnings.warn(f"This function is not available.\nModuleNotFoundError: No module named '{import_error.name}'.")
def plot_persistence_diagram(
@@ -219,14 +218,13 @@ def plot_persistence_diagram(
persistence_file="",
alpha=0.6,
band=0.0,
- max_intervals=1000,
- max_plots=1000,
+ max_intervals=1000000,
inf_delta=0.1,
legend=False,
colormap=None,
axes=None,
fontsize=16,
- greyblock=True
+ greyblock=True,
):
"""This function plots the persistence diagram from persistence values
list, a np.array of shape (N x 2) representing a diagram in a single
@@ -244,7 +242,7 @@ def plot_persistence_diagram(
:type band: float.
:param max_intervals: maximal number of intervals to display.
Selected intervals are those with the longest life time. Set it
- to 0 to see all. Default value is 1000.
+ to 0 to see all. Default value is 1000000.
:type max_intervals: int.
:param inf_delta: Infinity is placed at :code:`((max_death - min_birth) x
inf_delta)` above :code:`max_death` value. A reasonable value is
@@ -268,47 +266,35 @@ def plot_persistence_diagram(
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib import rc
+
if _gudhi_matplotlib_use_tex and _matplotlib_can_use_tex():
- plt.rc('text', usetex=True)
- plt.rc('font', family='serif')
+ plt.rc("text", usetex=True)
+ plt.rc("font", family="serif")
else:
- plt.rc('text', usetex=False)
- plt.rc('font', family='DejaVu Sans')
+ plt.rc("text", usetex=False)
+ plt.rc("font", family="DejaVu Sans")
if persistence_file != "":
if path.isfile(persistence_file):
# Reset persistence
persistence = []
- diag = read_persistence_intervals_grouped_by_dimension(
- persistence_file=persistence_file
- )
+ diag = read_persistence_intervals_grouped_by_dimension(persistence_file=persistence_file)
for key in diag.keys():
for persistence_interval in diag[key]:
persistence.append((key, persistence_interval))
else:
- print("file " + persistence_file + " not found.")
- return None
-
- persistence = _array_handler(persistence)
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), persistence_file)
- if max_plots != 1000:
- print("Deprecated parameter. It has been replaced by max_intervals")
- max_intervals = max_plots
-
- if max_intervals > 0 and max_intervals < len(persistence):
- # Sort by life time, then takes only the max_intervals elements
- persistence = sorted(
- persistence,
- key=lambda life_time: life_time[1][1] - life_time[1][0],
- reverse=True,
- )[:max_intervals]
-
- if colormap == None:
- colormap = plt.cm.Set1.colors
- if axes == None:
- fig, axes = plt.subplots(1, 1)
+ try:
+ persistence = _array_handler(persistence)
+ persistence = _limit_to_max_intervals(
+ persistence, max_intervals, key=lambda life_time: life_time[1][1] - life_time[1][0]
+ )
+ min_birth, max_death = __min_birth_max_death(persistence, band)
+ except IndexError:
+ min_birth, max_death = 0.0, 1.0
+ pass
- (min_birth, max_death) = __min_birth_max_death(persistence, band)
delta = (max_death - min_birth) * inf_delta
# Replace infinity values with max_death + delta for diagram to be more
# readable
@@ -316,61 +302,56 @@ def plot_persistence_diagram(
axis_end = max_death + delta / 2
axis_start = min_birth - delta
+ if axes == None:
+ _, axes = plt.subplots(1, 1)
+ if colormap == None:
+ colormap = plt.cm.Set1.colors
# bootstrap band
if band > 0.0:
x = np.linspace(axis_start, infinity, 1000)
axes.fill_between(x, x, x + band, alpha=alpha, facecolor="red")
# lower diag patch
if greyblock:
- axes.add_patch(mpatches.Polygon([[axis_start, axis_start], [axis_end, axis_start], [axis_end, axis_end]], fill=True, color='lightgrey'))
- # Draw points in loop
- pts_at_infty = False # Records presence of pts at infty
- for interval in reversed(persistence):
- if float(interval[1][1]) != float("inf"):
- # Finite death case
- axes.scatter(
- interval[1][0],
- interval[1][1],
- alpha=alpha,
- color=colormap[interval[0]],
+ axes.add_patch(
+ mpatches.Polygon(
+ [[axis_start, axis_start], [axis_end, axis_start], [axis_end, axis_end]],
+ fill=True,
+ color="lightgrey",
)
- else:
- pts_at_infty = True
- # Infinite death case for diagram to be nicer
- axes.scatter(
- interval[1][0], infinity, alpha=alpha, color=colormap[interval[0]]
- )
- if pts_at_infty:
+ )
+ # line display of equation : birth = death
+ axes.plot([axis_start, axis_end], [axis_start, axis_end], linewidth=1.0, color="k")
+
+ x=[birth for (dim,(birth,death)) in persistence]
+ y=[death if death != float("inf") else infinity for (dim,(birth,death)) in persistence]
+ c=[colormap[dim] for (dim,(birth,death)) in persistence]
+
+ axes.scatter(x,y,alpha=alpha,color=c)
+ if float("inf") in (death for (dim,(birth,death)) in persistence):
# infinity line and text
- axes.plot([axis_start, axis_end], [axis_start, axis_end], linewidth=1.0, color="k")
axes.plot([axis_start, axis_end], [infinity, infinity], linewidth=1.0, color="k", alpha=alpha)
# Infinity label
yt = axes.get_yticks()
- yt = yt[np.where(yt < axis_end)] # to avoid ploting ticklabel higher than infinity
+ yt = yt[np.where(yt < axis_end)] # to avoid ploting ticklabel higher than infinity
yt = np.append(yt, infinity)
ytl = ["%.3f" % e for e in yt] # to avoid float precision error
- ytl[-1] = r'$+\infty$'
+ ytl[-1] = r"$+\infty$"
axes.set_yticks(yt)
axes.set_yticklabels(ytl)
if legend:
dimensions = list(set(item[0] for item in persistence))
- axes.legend(
- handles=[
- mpatches.Patch(color=colormap[dim], label=str(dim))
- for dim in dimensions
- ]
- )
+ axes.legend(handles=[mpatches.Patch(color=colormap[dim], label=str(dim)) for dim in dimensions])
axes.set_xlabel("Birth", fontsize=fontsize)
axes.set_ylabel("Death", fontsize=fontsize)
axes.set_title("Persistence diagram", fontsize=fontsize)
# Ends plot on infinity value and starts a little bit before min_birth
- axes.axis([axis_start, axis_end, axis_start, infinity + delta/2])
+ axes.axis([axis_start, axis_end, axis_start, infinity + delta / 2])
return axes
- except ImportError:
- print("This function is not available, you may be missing matplotlib.")
+ except ImportError as import_error:
+ warnings.warn(f"This function is not available.\nModuleNotFoundError: No module named '{import_error.name}'.")
def plot_persistence_density(
@@ -384,7 +365,7 @@ def plot_persistence_density(
legend=False,
axes=None,
fontsize=16,
- greyblock=False
+ greyblock=False,
):
"""This function plots the persistence density from persistence
values list, np.array of shape (N x 2) representing a diagram
@@ -444,12 +425,13 @@ def plot_persistence_density(
import matplotlib.patches as mpatches
from scipy.stats import kde
from matplotlib import rc
+
if _gudhi_matplotlib_use_tex and _matplotlib_can_use_tex():
- plt.rc('text', usetex=True)
- plt.rc('font', family='serif')
+ plt.rc("text", usetex=True)
+ plt.rc("font", family="serif")
else:
- plt.rc('text', usetex=False)
- plt.rc('font', family='DejaVu Sans')
+ plt.rc("text", usetex=False)
+ plt.rc("font", family="DejaVu Sans")
if persistence_file != "":
if dimension is None:
@@ -460,10 +442,16 @@ def plot_persistence_density(
persistence_file=persistence_file, only_this_dim=dimension
)
else:
- print("file " + persistence_file + " not found.")
- return None
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), persistence_file)
+
+ # default cmap value cannot be done at argument definition level as matplotlib is not yet defined.
+ if cmap is None:
+ cmap = plt.cm.hot_r
+ if axes == None:
+ _, axes = plt.subplots(1, 1)
- if len(persistence) > 0:
+ try:
+ # if not read from file but given by an argument
persistence = _array_handler(persistence)
persistence_dim = np.array(
[
@@ -472,47 +460,54 @@ def plot_persistence_density(
if (dim_interval[0] == dimension) or (dimension is None)
]
)
-
- persistence_dim = persistence_dim[np.isfinite(persistence_dim[:, 1])]
- if max_intervals > 0 and max_intervals < len(persistence_dim):
- # Sort by life time, then takes only the max_intervals elements
+ persistence_dim = persistence_dim[np.isfinite(persistence_dim[:, 1])]
persistence_dim = np.array(
- sorted(
- persistence_dim,
- key=lambda life_time: life_time[1] - life_time[0],
- reverse=True,
- )[:max_intervals]
+ _limit_to_max_intervals(
+ persistence_dim, max_intervals, key=lambda life_time: life_time[1] - life_time[0]
+ )
)
- # Set as numpy array birth and death (remove undefined values - inf and NaN)
- birth = persistence_dim[:, 0]
- death = persistence_dim[:, 1]
-
- # default cmap value cannot be done at argument definition level as matplotlib is not yet defined.
- if cmap is None:
- cmap = plt.cm.hot_r
- if axes == None:
- fig, axes = plt.subplots(1, 1)
+ # Set as numpy array birth and death (remove undefined values - inf and NaN)
+ birth = persistence_dim[:, 0]
+ death = persistence_dim[:, 1]
+ birth_min = birth.min()
+ birth_max = birth.max()
+ death_min = death.min()
+ death_max = death.max()
+
+ # Evaluate a gaussian kde on a regular grid of nbins x nbins over data extents
+ k = kde.gaussian_kde([birth, death], bw_method=bw_method)
+ xi, yi = np.mgrid[
+ birth_min : birth_max : nbins * 1j, death_min : death_max : nbins * 1j,
+ ]
+ zi = k(np.vstack([xi.flatten(), yi.flatten()]))
+ # Make the plot
+ img = axes.pcolormesh(xi, yi, zi.reshape(xi.shape), cmap=cmap, shading="auto")
+ plot_success = True
+
+ # IndexError on empty diagrams, ValueError on only inf death values
+ except (IndexError, ValueError):
+ birth_min = 0.0
+ birth_max = 1.0
+ death_min = 0.0
+ death_max = 1.0
+ plot_success = False
+ pass
# line display of equation : birth = death
- x = np.linspace(death.min(), birth.max(), 1000)
+ x = np.linspace(death_min, birth_max, 1000)
axes.plot(x, x, color="k", linewidth=1.0)
- # Evaluate a gaussian kde on a regular grid of nbins x nbins over data extents
- k = kde.gaussian_kde([birth, death], bw_method=bw_method)
- xi, yi = np.mgrid[
- birth.min() : birth.max() : nbins * 1j,
- death.min() : death.max() : nbins * 1j,
- ]
- zi = k(np.vstack([xi.flatten(), yi.flatten()]))
-
- # Make the plot
- img = axes.pcolormesh(xi, yi, zi.reshape(xi.shape), cmap=cmap)
-
if greyblock:
- axes.add_patch(mpatches.Polygon([[birth.min(), birth.min()], [death.max(), birth.min()], [death.max(), death.max()]], fill=True, color='lightgrey'))
+ axes.add_patch(
+ mpatches.Polygon(
+ [[birth_min, birth_min], [death_max, birth_min], [death_max, death_max]],
+ fill=True,
+ color="lightgrey",
+ )
+ )
- if legend:
+ if plot_success and legend:
plt.colorbar(img, ax=axes)
axes.set_xlabel("Birth", fontsize=fontsize)
@@ -521,7 +516,5 @@ def plot_persistence_density(
return axes
- except ImportError:
- print(
- "This function is not available, you may be missing matplotlib and/or scipy."
- )
+ except ImportError as import_error:
+ warnings.warn(f"This function is not available.\nModuleNotFoundError: No module named '{import_error.name}'.")
diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd
index 006a24ed..4f229663 100644
--- a/src/python/gudhi/simplex_tree.pxd
+++ b/src/python/gudhi/simplex_tree.pxd
@@ -45,6 +45,7 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
cdef cppclass Simplex_tree_interface_full_featured "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>":
Simplex_tree_interface_full_featured() nogil
+ Simplex_tree_interface_full_featured(Simplex_tree_interface_full_featured&) nogil
double simplex_filtration(vector[int] simplex) nogil
void assign_simplex_filtration(vector[int] simplex, double filtration) nogil
void initialize_filtration() nogil
@@ -65,6 +66,7 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(vector[pair[int, pair[double, double]]] dgm, double min_persistence) nogil
Simplex_tree_interface_full_featured* collapse_edges(int nb_collapse_iteration) nogil except +
void reset_filtration(double filtration, int dimension) nogil
+ bint operator==(Simplex_tree_interface_full_featured) nogil
# Iterators over Simplex tree
pair[vector[int], double] get_simplex_and_filtration(Simplex_tree_simplex_handle f_simplex) nogil
Simplex_tree_simplices_iterator get_simplices_iterator_begin() nogil
@@ -74,6 +76,9 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
Simplex_tree_skeleton_iterator get_skeleton_iterator_begin(int dimension) nogil
Simplex_tree_skeleton_iterator get_skeleton_iterator_end(int dimension) nogil
pair[Simplex_tree_boundary_iterator, Simplex_tree_boundary_iterator] get_boundary_iterators(vector[int] simplex) nogil except +
+ # Expansion with blockers
+ ctypedef bool (*blocker_func_t)(vector[int], void *user_data)
+ void expansion_with_blockers_callback(int dimension, blocker_func_t user_func, void *user_data)
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Simplex_tree<Gudhi::Simplex_tree_options_full_featured>>":
diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx
index c3720936..2c53a872 100644
--- a/src/python/gudhi/simplex_tree.pyx
+++ b/src/python/gudhi/simplex_tree.pyx
@@ -16,6 +16,9 @@ __author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
+cdef bool callback(vector[int] simplex, void *blocker_func):
+ return (<object>blocker_func)(simplex)
+
# SimplexTree python interface
cdef class SimplexTree:
"""The simplex tree is an efficient and flexible data structure for
@@ -38,13 +41,29 @@ cdef class SimplexTree:
cdef Simplex_tree_persistence_interface * pcohptr
# Fake constructor that does nothing but documenting the constructor
- def __init__(self):
+ def __init__(self, other = None):
"""SimplexTree constructor.
+
+ :param other: If `other` is `None` (default value), an empty `SimplexTree` is created.
+ If `other` is a `SimplexTree`, the `SimplexTree` is constructed from a deep copy of `other`.
+ :type other: SimplexTree (Optional)
+ :returns: An empty or a copy simplex tree.
+ :rtype: SimplexTree
+
+ :raises TypeError: In case `other` is neither `None`, nor a `SimplexTree`.
+ :note: If the `SimplexTree` is a copy, the persistence information is not copied. If you need it in the clone,
+ you have to call :func:`compute_persistence` on it even if you had already computed it in the original.
"""
# The real cython constructor
- def __cinit__(self):
- self.thisptr = <intptr_t>(new Simplex_tree_interface_full_featured())
+ def __cinit__(self, other = None):
+ if other:
+ if isinstance(other, SimplexTree):
+ self.thisptr = _get_copy_intptr(other)
+ else:
+ raise TypeError("`other` argument requires to be of type `SimplexTree`, or `None`.")
+ else:
+ self.thisptr = <intptr_t>(new Simplex_tree_interface_full_featured())
def __dealloc__(self):
cdef Simplex_tree_interface_full_featured* ptr = self.get_ptr()
@@ -63,6 +82,21 @@ cdef class SimplexTree:
"""
return self.pcohptr != NULL
+ def copy(self):
+ """
+ :returns: A simplex tree that is a deep copy of itself.
+ :rtype: SimplexTree
+
+ :note: The persistence information is not copied. If you need it in the clone, you have to call
+ :func:`compute_persistence` on it even if you had already computed it in the original.
+ """
+ stree = SimplexTree()
+ stree.thisptr = _get_copy_intptr(self)
+ return stree
+
+ def __deepcopy__(self):
+ return self.copy()
+
def filtration(self, simplex):
"""This function returns the filtration value for a given N-simplex in
this simplicial complex, or +infinity if it is not in the complex.
@@ -443,6 +477,27 @@ cdef class SimplexTree:
persistence_result = self.pcohptr.get_persistence()
return self.get_ptr().compute_extended_persistence_subdiagrams(persistence_result, min_persistence)
+ def expansion_with_blocker(self, max_dim, blocker_func):
+ """Expands the Simplex_tree containing only a graph. Simplices corresponding to cliques in the graph are added
+ incrementally, faces before cofaces, unless the simplex has dimension larger than `max_dim` or `blocker_func`
+ returns `True` for this simplex.
+
+ The function identifies a candidate simplex whose faces are all already in the complex, inserts it with a
+ filtration value corresponding to the maximum of the filtration values of the faces, then calls `blocker_func`
+ with this new simplex (represented as a list of int). If `blocker_func` returns `True`, the simplex is removed,
+ otherwise it is kept. The algorithm then proceeds with the next candidate.
+
+ .. warning::
+ Several candidates of the same dimension may be inserted simultaneously before calling `block_simplex`, so
+ if you examine the complex in `block_simplex`, you may hit a few simplices of the same dimension that have
+ not been vetted by `block_simplex` yet, or have already been rejected but not yet removed.
+
+ :param max_dim: Expansion maximal dimension value.
+ :type max_dim: int
+ :param blocker_func: Blocker oracle.
+ :type blocker_func: Callable[[List[int]], bool]
+ """
+ self.get_ptr().expansion_with_blockers_callback(max_dim, callback, <void*>blocker_func)
def persistence(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False):
"""This function computes and returns the persistence of the simplicial complex.
@@ -621,18 +676,17 @@ cdef class SimplexTree:
return (normal0, normals, infinite0, infinites)
def collapse_edges(self, nb_iterations = 1):
- """Assuming the simplex tree is a 1-skeleton graph, this method collapse edges (simplices of higher dimension
- are ignored) and resets the simplex tree from the remaining edges.
- A good candidate is to build a simplex tree on top of a :class:`~gudhi.RipsComplex` of dimension 1 before
- collapsing edges
+ """Assuming the complex is a graph (simplices of higher dimension are ignored), this method implicitly
+ interprets it as the 1-skeleton of a flag complex, and replaces it with another (smaller) graph whose
+ expansion has the same persistent homology, using a technique known as edge collapses
+ (see :cite:`edgecollapsearxiv`).
+
+ A natural application is to get a simplex tree of dimension 1 from :class:`~gudhi.RipsComplex`,
+ then collapse edges, perform :meth:`expansion()` and finally compute persistence
(cf. :download:`rips_complex_edge_collapse_example.py <../example/rips_complex_edge_collapse_example.py>`).
- For implementation details, please refer to :cite:`edgecollapsesocg2020`.
:param nb_iterations: The number of edge collapse iterations to perform. Default is 1.
:type nb_iterations: int
-
- :note: collapse_edges method requires `Eigen <installation.html#eigen>`_ >= 3.1.0 and an exception is thrown
- if this method is not available.
"""
# Backup old pointer
cdef Simplex_tree_interface_full_featured* ptr = self.get_ptr()
@@ -642,3 +696,13 @@ cdef class SimplexTree:
self.thisptr = <intptr_t>(ptr.collapse_edges(nb_iter))
# Delete old pointer
del ptr
+
+ def __eq__(self, other:SimplexTree):
+ """Test for structural equality
+ :returns: True if the 2 simplex trees are equal, False otherwise.
+ :rtype: bool
+ """
+ return dereference(self.get_ptr()) == dereference(other.get_ptr())
+
+cdef intptr_t _get_copy_intptr(SimplexTree stree) nogil:
+ return <intptr_t>(new Simplex_tree_interface_full_featured(dereference(stree.get_ptr())))
diff --git a/src/python/gudhi/weighted_rips_complex.py b/src/python/gudhi/weighted_rips_complex.py
index 0541572b..16f63c3d 100644
--- a/src/python/gudhi/weighted_rips_complex.py
+++ b/src/python/gudhi/weighted_rips_complex.py
@@ -12,9 +12,11 @@ from gudhi import SimplexTree
class WeightedRipsComplex:
"""
Class to generate a weighted Rips complex from a distance matrix and weights on vertices,
- in the way described in :cite:`dtmfiltrations`.
+ in the way described in :cite:`dtmfiltrations` with `p=1`. The filtration value of vertex `i` is `2*weights[i]`,
+ and the filtration value of edge `ij` is `distance_matrix[i][j]+weights[i]+weights[j]`,
+ or the maximum of the filtrations of its extremities, whichever is largest.
Remark that all the filtration values are doubled compared to the definition in the paper
- for the consistency with RipsComplex.
+ for consistency with RipsComplex.
"""
def __init__(self,
distance_matrix,
diff --git a/src/python/include/Alpha_complex_interface.h b/src/python/include/Alpha_complex_interface.h
index 671af4a4..469b91ce 100644
--- a/src/python/include/Alpha_complex_interface.h
+++ b/src/python/include/Alpha_complex_interface.h
@@ -57,6 +57,16 @@ class Alpha_complex_interface {
alpha_ptr_->create_simplex_tree(simplex_tree, max_alpha_square, default_filtration_value);
}
+ static void set_float_relative_precision(double precision) {
+ // cf. Exact_alpha_complex_dD kernel type in Alpha_complex_factory.h
+ CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>::FT::set_relative_precision_of_to_double(precision);
+ }
+
+ static double get_float_relative_precision() {
+ // cf. Exact_alpha_complex_dD kernel type in Alpha_complex_factory.h
+ return CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>::FT::get_relative_precision_of_to_double();
+ }
+
private:
std::unique_ptr<Abstract_alpha_complex> alpha_ptr_;
};
diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h
index 629f6083..7f9b0067 100644
--- a/src/python/include/Simplex_tree_interface.h
+++ b/src/python/include/Simplex_tree_interface.h
@@ -15,9 +15,7 @@
#include <gudhi/distance_functions.h>
#include <gudhi/Simplex_tree.h>
#include <gudhi/Points_off_io.h>
-#ifdef GUDHI_USE_EIGEN3
#include <gudhi/Flag_complex_edge_collapser.h>
-#endif
#include <iostream>
#include <vector>
@@ -42,6 +40,7 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
using Complex_simplex_iterator = typename Base::Complex_simplex_iterator;
using Extended_filtration_data = typename Base::Extended_filtration_data;
using Boundary_simplex_iterator = typename Base::Boundary_simplex_iterator;
+ typedef bool (*blocker_func_t)(Simplex simplex, void *user_data);
public:
@@ -164,7 +163,6 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
}
Simplex_tree_interface* collapse_edges(int nb_collapse_iteration) {
-#ifdef GUDHI_USE_EIGEN3
using Filtered_edge = std::tuple<Vertex_handle, Vertex_handle, Filtration_value>;
std::vector<Filtered_edge> edges;
for (Simplex_handle sh : Base::skeleton_simplex_range(1)) {
@@ -178,7 +176,7 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
}
for (int iteration = 0; iteration < nb_collapse_iteration; iteration++) {
- edges = Gudhi::collapse::flag_complex_collapse_edges(edges);
+ edges = Gudhi::collapse::flag_complex_collapse_edges(std::move(edges));
}
Simplex_tree_interface* collapsed_stree_ptr = new Simplex_tree_interface();
// Copy the original 0-skeleton
@@ -190,9 +188,13 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
collapsed_stree_ptr->insert({std::get<0>(remaining_edge), std::get<1>(remaining_edge)}, std::get<2>(remaining_edge));
}
return collapsed_stree_ptr;
-#else
- throw std::runtime_error("Unable to collapse edges as it requires Eigen3 >= 3.1.0.");
-#endif
+ }
+
+ void expansion_with_blockers_callback(int dimension, blocker_func_t user_func, void *user_data) {
+ Base::expansion_with_blockers(dimension, [&](Simplex_handle sh){
+ Simplex simplex(Base::simplex_vertex_range(sh).begin(), Base::simplex_vertex_range(sh).end());
+ return user_func(simplex, user_data);
+ });
}
// Iterator over the simplex tree
diff --git a/src/python/pyproject.toml b/src/python/pyproject.toml
index a9fb4985..55b64466 100644
--- a/src/python/pyproject.toml
+++ b/src/python/pyproject.toml
@@ -1,3 +1,3 @@
[build-system]
-requires = ["setuptools", "wheel", "numpy>=1.15.0", "cython", "pybind11"]
+requires = ["setuptools>=24.2.0", "wheel", "numpy>=1.15.0", "cython>=0.27", "pybind11"]
build-backend = "setuptools.build_meta"
diff --git a/src/python/setup.py.in b/src/python/setup.py.in
index 23746998..2c67c2c5 100644
--- a/src/python/setup.py.in
+++ b/src/python/setup.py.in
@@ -5,6 +5,7 @@
Copyright (C) 2019 Inria
Modification(s):
+ - 2021/12 Vincent Rouvreau: Python 3.5 as minimal version
- YYYY/MM Author: Description of the modification
"""
@@ -43,7 +44,7 @@ for module in cython_modules:
include_dirs=include_dirs,
runtime_library_dirs=runtime_library_dirs,))
-ext_modules = cythonize(ext_modules, compiler_directives={'language_level': str(sys.version_info[0])})
+ext_modules = cythonize(ext_modules, compiler_directives={'language_level': '3'})
for module in pybind11_modules:
my_include_dirs = include_dirs + [pybind11.get_include(False), pybind11.get_include(True)]
@@ -86,6 +87,7 @@ setup(
long_description_content_type='text/x-rst',
long_description=long_description,
ext_modules = ext_modules,
+ python_requires='>=3.5.0',
install_requires = ['numpy >= 1.15.0',],
package_data={"": ["*.dll"], },
)
diff --git a/src/python/test/test_alpha_complex.py b/src/python/test/test_alpha_complex.py
index f15284f3..f81e6137 100755
--- a/src/python/test/test_alpha_complex.py
+++ b/src/python/test/test_alpha_complex.py
@@ -286,3 +286,30 @@ def _weighted_doc_example(precision):
def test_weighted_doc_example():
for precision in ['fast', 'safe', 'exact']:
_weighted_doc_example(precision)
+
+def test_float_relative_precision():
+ assert AlphaComplex.get_float_relative_precision() == 1e-5
+ # Must be > 0.
+ with pytest.raises(ValueError):
+ AlphaComplex.set_float_relative_precision(0.)
+ # Must be < 1.
+ with pytest.raises(ValueError):
+ AlphaComplex.set_float_relative_precision(1.)
+
+ points = [[1, 1], [7, 0], [4, 6], [9, 6], [0, 14], [2, 19], [9, 17]]
+ st = AlphaComplex(points=points).create_simplex_tree()
+ filtrations = list(st.get_filtration())
+
+ # Get a better precision
+ AlphaComplex.set_float_relative_precision(1e-15)
+ assert AlphaComplex.get_float_relative_precision() == 1e-15
+
+ st = AlphaComplex(points=points).create_simplex_tree()
+ filtrations_better_resolution = list(st.get_filtration())
+
+ assert len(filtrations) == len(filtrations_better_resolution)
+ for idx in range(len(filtrations)):
+ # check simplex is the same
+ assert filtrations[idx][0] == filtrations_better_resolution[idx][0]
+ # check filtration is about the same with a relative precision of the worst case
+ assert filtrations[idx][1] == pytest.approx(filtrations_better_resolution[idx][1], rel=1e-5)
diff --git a/src/python/test/test_persistence_graphical_tools.py b/src/python/test/test_persistence_graphical_tools.py
new file mode 100644
index 00000000..c19836b7
--- /dev/null
+++ b/src/python/test/test_persistence_graphical_tools.py
@@ -0,0 +1,121 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Vincent Rouvreau
+
+ Copyright (C) 2021 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+import gudhi as gd
+import numpy as np
+import matplotlib as plt
+import pytest
+
+
+def test_array_handler():
+ diags = np.array([[1, 2], [3, 4], [5, 6]], float)
+ arr_diags = gd.persistence_graphical_tools._array_handler(diags)
+ for idx in range(len(diags)):
+ assert arr_diags[idx][0] == 0
+ np.testing.assert_array_equal(arr_diags[idx][1], diags[idx])
+
+ diags = [(1.0, 2.0), (3.0, 4.0), (5.0, 6.0)]
+ arr_diags = gd.persistence_graphical_tools._array_handler(diags)
+ for idx in range(len(diags)):
+ assert arr_diags[idx][0] == 0
+ assert arr_diags[idx][1] == diags[idx]
+
+ diags = [(0, (1.0, 2.0)), (0, (3.0, 4.0)), (0, (5.0, 6.0))]
+ assert gd.persistence_graphical_tools._array_handler(diags) == diags
+
+
+def test_min_birth_max_death():
+ diags = [
+ (0, (0.0, float("inf"))),
+ (0, (0.0983494, float("inf"))),
+ (0, (0.0, 0.122545)),
+ (0, (0.0, 0.12047)),
+ (0, (0.0, 0.118398)),
+ (0, (0.118398, 1.0)),
+ (0, (0.0, 0.117908)),
+ (0, (0.0, 0.112307)),
+ (0, (0.0, 0.107535)),
+ (0, (0.0, 0.106382)),
+ ]
+ assert gd.persistence_graphical_tools.__min_birth_max_death(diags) == (0.0, 1.0)
+ assert gd.persistence_graphical_tools.__min_birth_max_death(diags, band=4.0) == (0.0, 5.0)
+
+
+def test_limit_min_birth_max_death():
+ diags = [
+ (0, (2.0, float("inf"))),
+ (0, (2.0, float("inf"))),
+ ]
+ assert gd.persistence_graphical_tools.__min_birth_max_death(diags) == (2.0, 3.0)
+ assert gd.persistence_graphical_tools.__min_birth_max_death(diags, band=4.0) == (2.0, 6.0)
+
+
+def test_limit_to_max_intervals():
+ diags = [
+ (0, (0.0, float("inf"))),
+ (0, (0.0983494, float("inf"))),
+ (0, (0.0, 0.122545)),
+ (0, (0.0, 0.12047)),
+ (0, (0.0, 0.118398)),
+ (0, (0.118398, 1.0)),
+ (0, (0.0, 0.117908)),
+ (0, (0.0, 0.112307)),
+ (0, (0.0, 0.107535)),
+ (0, (0.0, 0.106382)),
+ ]
+ # check no warnings if max_intervals equals to the diagrams number
+ with pytest.warns(None) as record:
+ truncated_diags = gd.persistence_graphical_tools._limit_to_max_intervals(
+ diags, 10, key=lambda life_time: life_time[1][1] - life_time[1][0]
+ )
+ # check diagrams are not sorted
+ assert truncated_diags == diags
+ assert len(record) == 0
+
+ # check warning if max_intervals lower than the diagrams number
+ with pytest.warns(UserWarning) as record:
+ truncated_diags = gd.persistence_graphical_tools._limit_to_max_intervals(
+ diags, 5, key=lambda life_time: life_time[1][1] - life_time[1][0]
+ )
+ # check diagrams are truncated and sorted by life time
+ assert truncated_diags == [
+ (0, (0.0, float("inf"))),
+ (0, (0.0983494, float("inf"))),
+ (0, (0.118398, 1.0)),
+ (0, (0.0, 0.122545)),
+ (0, (0.0, 0.12047)),
+ ]
+ assert len(record) == 1
+
+
+def _limit_plot_persistence(function):
+ pplot = function(persistence=[])
+ assert isinstance(pplot, plt.axes.SubplotBase)
+ pplot = function(persistence=[], legend=True)
+ assert isinstance(pplot, plt.axes.SubplotBase)
+ pplot = function(persistence=[(0, float("inf"))])
+ assert isinstance(pplot, plt.axes.SubplotBase)
+ pplot = function(persistence=[(0, float("inf"))], legend=True)
+ assert isinstance(pplot, plt.axes.SubplotBase)
+
+
+def test_limit_plot_persistence():
+ for function in [gd.plot_persistence_barcode, gd.plot_persistence_diagram, gd.plot_persistence_density]:
+ _limit_plot_persistence(function)
+
+
+def _non_existing_persistence_file(function):
+ with pytest.raises(FileNotFoundError):
+ function(persistence_file="pouetpouettralala.toubiloubabdou")
+
+
+def test_non_existing_persistence_file():
+ for function in [gd.plot_persistence_barcode, gd.plot_persistence_diagram, gd.plot_persistence_density]:
+ _non_existing_persistence_file(function)
diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py
index 31c46213..688f4fd6 100755
--- a/src/python/test/test_simplex_tree.py
+++ b/src/python/test/test_simplex_tree.py
@@ -8,7 +8,7 @@
- YYYY/MM Author: Description of the modification
"""
-from gudhi import SimplexTree, __GUDHI_USE_EIGEN3
+from gudhi import SimplexTree
import numpy as np
import pytest
@@ -354,16 +354,11 @@ def test_collapse_edges():
assert st.num_simplices() == 10
- if __GUDHI_USE_EIGEN3:
- st.collapse_edges()
- assert st.num_simplices() == 9
- assert st.find([1, 3]) == False
- for simplex in st.get_skeleton(0):
- assert simplex[1] == 1.
- else:
- # If no Eigen3, collapse_edges throws an exception
- with pytest.raises(RuntimeError):
- st.collapse_edges()
+ st.collapse_edges()
+ assert st.num_simplices() == 9
+ assert st.find([0, 2]) == False # [1, 3] would be fine as well
+ for simplex in st.get_skeleton(0):
+ assert simplex[1] == 1.
def test_reset_filtration():
st = SimplexTree()
@@ -447,4 +442,116 @@ def test_persistence_intervals_in_dimension():
assert np.array_equal(H2, np.array([[ 0., float("inf")]]))
# Test empty case
assert st.persistence_intervals_in_dimension(3).shape == (0, 2)
- \ No newline at end of file
+
+def test_equality_operator():
+ st1 = SimplexTree()
+ st2 = SimplexTree()
+
+ assert st1 == st2
+
+ st1.insert([1,2,3], 4.)
+ assert st1 != st2
+
+ st2.insert([1,2,3], 4.)
+ assert st1 == st2
+
+def test_simplex_tree_deep_copy():
+ st = SimplexTree()
+ st.insert([1, 2, 3], 0.)
+ # compute persistence only on the original
+ st.compute_persistence()
+
+ st_copy = st.copy()
+ assert st_copy == st
+ st_filt_list = list(st.get_filtration())
+
+ # check persistence is not copied
+ assert st.__is_persistence_defined() == True
+ assert st_copy.__is_persistence_defined() == False
+
+ # remove something in the copy and check the copy is included in the original
+ st_copy.remove_maximal_simplex([1, 2, 3])
+ a_filt_list = list(st_copy.get_filtration())
+ assert len(a_filt_list) < len(st_filt_list)
+
+ for a_splx in a_filt_list:
+ assert a_splx in st_filt_list
+
+ # test double free
+ del st
+ del st_copy
+
+def test_simplex_tree_deep_copy_constructor():
+ st = SimplexTree()
+ st.insert([1, 2, 3], 0.)
+ # compute persistence only on the original
+ st.compute_persistence()
+
+ st_copy = SimplexTree(st)
+ assert st_copy == st
+ st_filt_list = list(st.get_filtration())
+
+ # check persistence is not copied
+ assert st.__is_persistence_defined() == True
+ assert st_copy.__is_persistence_defined() == False
+
+ # remove something in the copy and check the copy is included in the original
+ st_copy.remove_maximal_simplex([1, 2, 3])
+ a_filt_list = list(st_copy.get_filtration())
+ assert len(a_filt_list) < len(st_filt_list)
+
+ for a_splx in a_filt_list:
+ assert a_splx in st_filt_list
+
+ # test double free
+ del st
+ del st_copy
+
+def test_simplex_tree_constructor_exception():
+ with pytest.raises(TypeError):
+ st = SimplexTree(other = "Construction from a string shall raise an exception")
+
+def test_expansion_with_blocker():
+ st=SimplexTree()
+ st.insert([0,1],0)
+ st.insert([0,2],1)
+ st.insert([0,3],2)
+ st.insert([1,2],3)
+ st.insert([1,3],4)
+ st.insert([2,3],5)
+ st.insert([2,4],6)
+ st.insert([3,6],7)
+ st.insert([4,5],8)
+ st.insert([4,6],9)
+ st.insert([5,6],10)
+ st.insert([6],10)
+
+ def blocker(simplex):
+ try:
+ # Block all simplices that countains vertex 6
+ simplex.index(6)
+ print(simplex, ' is blocked')
+ return True
+ except ValueError:
+ print(simplex, ' is accepted')
+ st.assign_filtration(simplex, st.filtration(simplex) + 1.)
+ return False
+
+ st.expansion_with_blocker(2, blocker)
+ assert st.num_simplices() == 22
+ assert st.dimension() == 2
+ assert st.find([4,5,6]) == False
+ assert st.filtration([0,1,2]) == 4.
+ assert st.filtration([0,1,3]) == 5.
+ assert st.filtration([0,2,3]) == 6.
+ assert st.filtration([1,2,3]) == 6.
+
+ st.expansion_with_blocker(3, blocker)
+ assert st.num_simplices() == 23
+ assert st.dimension() == 3
+ assert st.find([4,5,6]) == False
+ assert st.filtration([0,1,2]) == 4.
+ assert st.filtration([0,1,3]) == 5.
+ assert st.filtration([0,2,3]) == 6.
+ assert st.filtration([1,2,3]) == 6.
+ assert st.filtration([0,1,2,3]) == 7.