summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGard Spreemann <gspr@nonempty.org>2020-01-17 15:37:17 +0100
committerGard Spreemann <gspr@nonempty.org>2020-01-17 15:37:17 +0100
commit9acc59fcc1d5001a212e7b9cd6f00a569a625882 (patch)
tree0f8c3a3c29a9fef6394394a9650198b348bff130
parent5ccee32ec2ba38743c6b96867db3e1b5151e45e4 (diff)
parentcabc43b34723efa7640313348b844eabe9971e38 (diff)
Merge tag 'tags/gudhi-release-3.1.0.rc1' into dfsg/latest
-rw-r--r--.appveyor.yml5
-rw-r--r--.circleci/config.yml13
-rw-r--r--.travis.yml31
-rw-r--r--CMakeGUDHIVersion.txt4
-rw-r--r--Dockerfile_for_circleci_image (renamed from Dockerfile_ubuntu)5
-rw-r--r--Dockerfile_gudhi_installation65
-rw-r--r--README.md2
-rw-r--r--biblio/bibliography.bib2
-rw-r--r--biblio/how_to_cite_cgal.bib920
-rw-r--r--biblio/how_to_cite_gudhi.bib.in (renamed from biblio/how_to_cite_gudhi.bib)80
-rw-r--r--src/Alpha_complex/benchmark/Alpha_complex_3d_benchmark.cpp4
-rw-r--r--src/Alpha_complex/doc/Intro_alpha_complex.h31
-rw-r--r--src/Alpha_complex/doc/alpha_complex_representation.ipe6
-rw-r--r--src/Alpha_complex/doc/alpha_complex_representation.pngbin14606 -> 19568 bytes
-rw-r--r--src/Alpha_complex/example/Alpha_complex_from_off.cpp13
-rw-r--r--src/Alpha_complex/example/Alpha_complex_from_points.cpp5
-rw-r--r--src/Alpha_complex/example/CMakeLists.txt16
-rw-r--r--src/Alpha_complex/example/Fast_alpha_complex_from_off.cpp65
-rw-r--r--src/Alpha_complex/example/Weighted_alpha_complex_3d_from_points.cpp12
-rw-r--r--src/Alpha_complex/include/gudhi/Alpha_complex.h75
-rw-r--r--src/Alpha_complex/include/gudhi/Alpha_complex_3d.h81
-rw-r--r--src/Alpha_complex/test/Alpha_complex_3d_unit_test.cpp53
-rw-r--r--src/Alpha_complex/test/Alpha_complex_unit_test.cpp73
-rw-r--r--src/Alpha_complex/test/CMakeLists.txt22
-rw-r--r--src/Alpha_complex/test/Periodic_alpha_complex_3d_unit_test.cpp37
-rw-r--r--src/Alpha_complex/test/Weighted_alpha_complex_3d_unit_test.cpp50
-rw-r--r--src/Alpha_complex/test/Weighted_periodic_alpha_complex_3d_unit_test.cpp24
-rw-r--r--src/Alpha_complex/utilities/CMakeLists.txt30
-rw-r--r--src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp4
-rw-r--r--src/Alpha_complex/utilities/alpha_complex_persistence.cpp107
-rw-r--r--src/Alpha_complex/utilities/alphacomplex.md2
-rw-r--r--src/Bitmap_cubical_complex/test/CMakeLists.txt5
-rw-r--r--src/Bottleneck_distance/include/gudhi/Bottleneck.h2
-rw-r--r--src/Bottleneck_distance/test/CMakeLists.txt5
-rw-r--r--src/Bottleneck_distance/test/bottleneck_unit_test.cpp37
-rw-r--r--src/Cech_complex/doc/Intro_cech_complex.h2
-rw-r--r--src/Cech_complex/test/CMakeLists.txt5
-rw-r--r--src/Contraction/include/gudhi/Skeleton_blocker_contractor.h2
-rw-r--r--src/Doxyfile.in2
-rw-r--r--src/Nerve_GIC/include/gudhi/GIC.h7
-rw-r--r--src/Nerve_GIC/test/CMakeLists.txt5
-rw-r--r--src/Persistence_representations/include/gudhi/Persistence_intervals.h5
-rw-r--r--src/Persistence_representations/test/CMakeLists.txt33
-rw-r--r--src/Persistence_representations/test/persistence_intervals_test.cpp211
-rw-r--r--src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h8
-rw-r--r--src/Persistent_cohomology/test/CMakeLists.txt13
-rw-r--r--src/Persistent_cohomology/test/betti_numbers_unit_test.cpp9
-rw-r--r--src/Rips_complex/example/example_rips_complex_from_csv_distance_matrix_file.cpp8
-rw-r--r--src/Rips_complex/example/example_rips_complex_from_off_file.cpp8
-rw-r--r--src/Rips_complex/test/CMakeLists.txt5
-rw-r--r--src/Rips_complex/utilities/ripscomplex.md1
-rw-r--r--src/Rips_complex/utilities/sparse_rips_persistence.cpp15
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree.h2
-rw-r--r--src/Simplex_tree/test/CMakeLists.txt18
-rw-r--r--src/Skeleton_blocker/test/CMakeLists.txt11
-rw-r--r--src/Spatial_searching/include/gudhi/Kd_tree_search.h4
-rw-r--r--src/Spatial_searching/test/CMakeLists.txt9
-rw-r--r--src/Subsampling/test/CMakeLists.txt14
-rw-r--r--src/Tangential_complex/include/gudhi/Tangential_complex.h4
-rw-r--r--src/Tangential_complex/test/CMakeLists.txt6
-rw-r--r--src/Toplex_map/test/CMakeLists.txt8
-rw-r--r--src/Witness_complex/include/gudhi/Euclidean_strong_witness_complex.h4
-rw-r--r--src/Witness_complex/include/gudhi/Euclidean_witness_complex.h4
-rw-r--r--src/Witness_complex/test/CMakeLists.txt8
-rw-r--r--src/cmake/modules/GUDHI_boost_test.cmake26
-rw-r--r--src/cmake/modules/GUDHI_compilation_flags.cmake2
-rw-r--r--src/cmake/modules/GUDHI_test_coverage.cmake26
-rw-r--r--src/cmake/modules/GUDHI_third_party_libraries.cmake2
-rw-r--r--src/cmake/modules/GUDHI_user_version_target.cmake14
-rw-r--r--src/common/doc/installation.h7
-rw-r--r--src/common/doc/main_page.md5
-rw-r--r--src/common/include/gudhi/Unitary_tests_utils.h12
-rw-r--r--src/common/include/gudhi/random_point_generators.h2
-rw-r--r--src/common/include/gudhi/reader_utils.h10
-rw-r--r--src/common/test/CMakeLists.txt13
-rw-r--r--src/python/CMakeLists.txt102
-rw-r--r--src/python/doc/_templates/layout.html16
-rw-r--r--src/python/doc/alpha_complex_sum.inc6
-rw-r--r--src/python/doc/alpha_complex_user.rst34
-rwxr-xr-xsrc/python/doc/conf.py2
-rw-r--r--src/python/doc/cubical_complex_user.rst3
-rw-r--r--src/python/doc/examples.rst3
-rw-r--r--src/python/doc/img/sklearn-tda.pngbin0 -> 388075 bytes
-rw-r--r--src/python/doc/index.rst16
-rw-r--r--src/python/doc/installation.rst64
-rw-r--r--src/python/doc/persistence_graphical_tools_user.rst26
-rw-r--r--src/python/doc/reader_utils_ref.rst2
-rw-r--r--src/python/doc/representations.rst72
-rw-r--r--src/python/doc/representations_sum.inc14
-rw-r--r--src/python/doc/rips_complex_user.rst26
-rw-r--r--src/python/doc/simplex_tree_user.rst10
-rw-r--r--src/python/doc/tangential_complex_user.rst6
-rw-r--r--src/python/doc/wasserstein_distance_sum.inc14
-rw-r--r--src/python/doc/wasserstein_distance_user.rst40
-rw-r--r--src/python/doc/witness_complex_user.rst6
-rwxr-xr-xsrc/python/example/alpha_complex_diagram_persistence_from_off_file_example.py7
-rwxr-xr-xsrc/python/example/alpha_rips_persistence_bottleneck_distance.py2
-rwxr-xr-xsrc/python/example/diagram_vectorizations_distances_kernels.py133
-rwxr-xr-xsrc/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py9
-rwxr-xr-xsrc/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py9
-rwxr-xr-xsrc/python/example/gudhi_graphical_tools_example.py18
-rwxr-xr-xsrc/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py4
-rwxr-xr-xsrc/python/example/plot_alpha_complex.py37
-rwxr-xr-xsrc/python/example/plot_rips_complex.py38
-rwxr-xr-xsrc/python/example/plot_simplex_tree_dim012.py66
-rwxr-xr-xsrc/python/example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py7
-rwxr-xr-xsrc/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py7
-rwxr-xr-xsrc/python/example/rips_complex_diagram_persistence_from_off_file_example.py9
-rwxr-xr-xsrc/python/example/rips_persistence_diagram.py5
-rwxr-xr-xsrc/python/example/sparse_rips_persistence_diagram.py5
-rwxr-xr-xsrc/python/example/tangential_complex_plain_homology_from_off_file_example.py7
-rw-r--r--src/python/gudhi/__init__.py.in32
-rw-r--r--src/python/gudhi/alpha_complex.pyx27
-rw-r--r--src/python/gudhi/bottleneck.pyx19
-rw-r--r--src/python/gudhi/cubical_complex.pyx47
-rw-r--r--src/python/gudhi/euclidean_strong_witness_complex.pyx21
-rw-r--r--src/python/gudhi/euclidean_witness_complex.pyx21
-rw-r--r--src/python/gudhi/nerve_gic.pyx31
-rw-r--r--src/python/gudhi/off_reader.pyx25
-rw-r--r--src/python/gudhi/periodic_cubical_complex.pyx61
-rw-r--r--src/python/gudhi/persistence_graphical_tools.py163
-rw-r--r--src/python/gudhi/reader_utils.pyx33
-rw-r--r--src/python/gudhi/representations/__init__.py6
-rw-r--r--src/python/gudhi/representations/kernel_methods.py206
-rw-r--r--src/python/gudhi/representations/metrics.py244
-rw-r--r--src/python/gudhi/representations/preprocessing.py305
-rw-r--r--src/python/gudhi/representations/vector_methods.py492
-rw-r--r--src/python/gudhi/rips_complex.pyx19
-rw-r--r--src/python/gudhi/simplex_tree.pxd19
-rw-r--r--src/python/gudhi/simplex_tree.pyx36
-rw-r--r--src/python/gudhi/strong_witness_complex.pyx21
-rw-r--r--src/python/gudhi/subsampling.pyx41
-rw-r--r--src/python/gudhi/tangential_complex.pyx27
-rw-r--r--src/python/gudhi/wasserstein.py97
-rw-r--r--src/python/gudhi/witness_complex.pyx21
-rw-r--r--src/python/include/Alpha_complex_interface.h16
-rw-r--r--src/python/setup.py.in12
-rwxr-xr-xsrc/python/test/test_alpha_complex.py38
-rwxr-xr-xsrc/python/test/test_bottleneck_distance.py4
-rwxr-xr-xsrc/python/test/test_cover_complex.py4
-rwxr-xr-xsrc/python/test/test_cubical_complex.py63
-rwxr-xr-xsrc/python/test/test_euclidean_witness_complex.py4
-rwxr-xr-xsrc/python/test/test_reader_utils.py6
-rwxr-xr-xsrc/python/test/test_representations.py12
-rwxr-xr-xsrc/python/test/test_rips_complex.py6
-rwxr-xr-xsrc/python/test/test_simplex_tree.py4
-rwxr-xr-xsrc/python/test/test_subsampling.py4
-rwxr-xr-xsrc/python/test/test_tangential_complex.py4
-rwxr-xr-xsrc/python/test/test_wasserstein_distance.py48
-rwxr-xr-xsrc/python/test/test_witness_complex.py4
150 files changed, 3988 insertions, 1516 deletions
diff --git a/.appveyor.yml b/.appveyor.yml
index 6ed75cf7..4a76ea0a 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -48,6 +48,7 @@ install:
- pip --version
- python -m pip install --upgrade pip
- pip install -U setuptools numpy matplotlib scipy Cython pytest
+ - pip install -U POT
build_script:
- mkdir build
@@ -56,8 +57,8 @@ build_script:
- if [%target%]==[Python] (
cd src/python &
MSBuild Cython.sln /m /p:Configuration=Release /p:Platform=x64 &
- ctest -j 1 -C Release
+ ctest -j 1 --output-on-failure -C Release
) else (
MSBuild GUDHIdev.sln /m /p:Configuration=Release /p:Platform=x64 &
- ctest -j 1 -C Release -E diff_files
+ ctest -j 1 --output-on-failure -C Release -E diff_files
)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 52cb3d45..5e45bc14 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -11,7 +11,8 @@ jobs:
mkdir build;
cd build;
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=OFF ..;
- make all test;
+ make all;
+ ctest --output-on-failure;
tests:
docker:
@@ -24,7 +25,8 @@ jobs:
mkdir build;
cd build;
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=OFF ..;
- make all test;
+ make all;
+ ctest --output-on-failure;
utils:
docker:
@@ -37,7 +39,8 @@ jobs:
mkdir build;
cd build;
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=OFF ..;
- make all test;
+ make all;
+ ctest --output-on-failure;
python:
docker:
@@ -55,7 +58,9 @@ jobs:
mkdir build;
cd build;
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..;
- make all test sphinx;
+ make all;
+ ctest --output-on-failure;
+ make sphinx;
cp -R python/sphinx /tmp/sphinx;
- store_artifacts:
diff --git a/.travis.yml b/.travis.yml
index bf268057..60d32ef8 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -13,30 +13,36 @@ matrix:
include:
- env:
# 1. Only examples and associated tests
- - CMAKE_EXAMPLE='ON' CMAKE_TEST='OFF' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='OFF' MAKE_TARGET='test'
+ - CMAKE_EXAMPLE='ON' CMAKE_TEST='OFF' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='OFF' MAKE_TARGET='all' CTEST_COMMAND='ctest --output-on-failure'
- env:
# 2. Only unitary tests
- - CMAKE_EXAMPLE='OFF' CMAKE_TEST='ON' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='OFF' MAKE_TARGET='test'
+ - CMAKE_EXAMPLE='OFF' CMAKE_TEST='ON' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='OFF' MAKE_TARGET='all' CTEST_COMMAND='ctest --output-on-failure'
- env:
# 3. Only utilities and associated tests
- - CMAKE_EXAMPLE='OFF' CMAKE_TEST='OFF' CMAKE_UTILITIES='ON' CMAKE_PYTHON='OFF' MAKE_TARGET='test'
+ - CMAKE_EXAMPLE='OFF' CMAKE_TEST='OFF' CMAKE_UTILITIES='ON' CMAKE_PYTHON='OFF' MAKE_TARGET='all' CTEST_COMMAND='ctest --output-on-failure'
- env:
# 4. Only doxygen documentation
- - CMAKE_EXAMPLE='OFF' CMAKE_TEST='OFF' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='OFF' MAKE_TARGET='doxygen'
- # Issue with sphinx-build with sphinx 2.0.1
- # - env:
- # # 5. Only Python, associated tests and sphinx documentation
- # - CMAKE_EXAMPLE='OFF' CMAKE_TEST='OFF' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='ON' MAKE_TARGET='test sphinx'
+ - CMAKE_EXAMPLE='OFF' CMAKE_TEST='OFF' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='OFF' MAKE_TARGET='doxygen' CTEST_COMMAND='echo No tests for doxygen target'
+ - env:
+ # 5. Only Python, associated tests and sphinx documentation
+ # $ which python3 => /usr/local/bin/python3
+ # cmake => -- Found PythonInterp: /usr/local/bin/python3 (found version "3.7.5")
+ # In python3-sphinx-build.py, print(sys.executable) => /usr/local/opt/python/bin/python3.7 ???
+ # should be : MAKE_TARGET='all sphinx' CTEST_COMMAND='ctest --output-on-failure'
+ - CMAKE_EXAMPLE='OFF' CMAKE_TEST='OFF' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='ON' MAKE_TARGET='all' CTEST_COMMAND='ctest --output-on-failure -E sphinx'
cache:
directories:
- $HOME/.cache/pip
- $HOME/Library/Caches/Homebrew
+before_install:
+ - brew update && brew unlink python@2 && brew upgrade python
+
addons:
homebrew:
- update: true
packages:
+ - python3
- cmake
- graphviz
- doxygen
@@ -46,7 +52,6 @@ addons:
- mpfr
- tbb
- cgal
- - python3
before_cache:
- rm -f $HOME/.cache/pip/log/debug.log
@@ -55,14 +60,16 @@ before_cache:
# When installing through libcgal-dev apt, CMake Error at CGAL Exports.cmake The imported target "CGAL::CGAL Qt5" references the file
install:
- python3 -m pip install --upgrade pip setuptools wheel
- - python3 -m pip install --user pytest Cython sphinx sphinxcontrib-bibtex matplotlib numpy scipy
+ - python3 -m pip install --user pytest Cython sphinx sphinxcontrib-bibtex sphinx-paramlinks matplotlib numpy scipy scikit-learn
+ - python3 -m pip install --user POT
script:
- rm -rf build
- mkdir -p build
- cd build
- cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=${CMAKE_EXAMPLE} -DWITH_GUDHI_TEST=${CMAKE_TEST} -DWITH_GUDHI_UTILITIES=${CMAKE_UTILITIES} -DWITH_GUDHI_PYTHON=${CMAKE_PYTHON} -DUSER_VERSION_DIR=version -DPython_ADDITIONAL_VERSIONS=3 ..
- - make all ${MAKE_TARGET}
+ - make ${MAKE_TARGET}
+ - ${CTEST_COMMAND}
- cd ..
notifications:
diff --git a/CMakeGUDHIVersion.txt b/CMakeGUDHIVersion.txt
index eb2a0666..8300b75e 100644
--- a/CMakeGUDHIVersion.txt
+++ b/CMakeGUDHIVersion.txt
@@ -1,6 +1,6 @@
set (GUDHI_MAJOR_VERSION 3)
-set (GUDHI_MINOR_VERSION 0)
-set (GUDHI_PATCH_VERSION 0)
+set (GUDHI_MINOR_VERSION 1)
+set (GUDHI_PATCH_VERSION 0.rc1)
set(GUDHI_VERSION ${GUDHI_MAJOR_VERSION}.${GUDHI_MINOR_VERSION}.${GUDHI_PATCH_VERSION})
message(STATUS "GUDHI version : ${GUDHI_VERSION}")
diff --git a/Dockerfile_ubuntu b/Dockerfile_for_circleci_image
index e149a33a..ff4e6018 100644
--- a/Dockerfile_ubuntu
+++ b/Dockerfile_for_circleci_image
@@ -25,6 +25,7 @@ ENV LC_ALL en_US.UTF-8
# Required for Gudhi compilation
RUN apt-get install -y make \
+ git \
g++ \
cmake \
graphviz \
@@ -43,6 +44,7 @@ RUN apt-get install -y make \
python3-pip \
python3-pytest \
python3-tk \
+ python3-pybind11 \
libfreetype6-dev \
pkg-config
@@ -51,7 +53,10 @@ RUN pip3 install \
matplotlib \
scipy \
Cython \
+ POT \
+ scikit-learn \
sphinx \
+ sphinx-paramlinks \
sphinxcontrib-bibtex
# apt clean up
diff --git a/Dockerfile_gudhi_installation b/Dockerfile_gudhi_installation
new file mode 100644
index 00000000..9fe20730
--- /dev/null
+++ b/Dockerfile_gudhi_installation
@@ -0,0 +1,65 @@
+FROM ubuntu:19.04
+
+# Update and upgrade distribution
+RUN apt-get update && \
+ apt-get upgrade -y
+
+# Tools necessary for installing and configuring Ubuntu
+RUN apt-get install -y \
+ apt-utils \
+ locales \
+ tzdata
+
+# Timezone
+RUN echo "Europe/Paris" | tee /etc/timezone && \
+ ln -fs /usr/share/zoneinfo/Europe/Paris /etc/localtime && \
+ dpkg-reconfigure -f noninteractive tzdata
+
+# Locale with UTF-8 support
+RUN echo en_US.UTF-8 UTF-8 >> /etc/locale.gen && \
+ locale-gen && \
+ update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
+ENV LANG en_US.UTF-8
+ENV LANGUAGE en_US:en
+ENV LC_ALL en_US.UTF-8
+
+# Required for Gudhi compilation
+RUN apt-get install -y make \
+ g++ \
+ cmake \
+ graphviz \
+ perl \
+ texlive-bibtex-extra \
+ biber \
+ libboost-all-dev \
+ libeigen3-dev \
+ libgmp3-dev \
+ libmpfr-dev \
+ libtbb-dev \
+ libcgal-dev \
+ locales \
+ python3 \
+ python3-pip \
+ python3-pytest \
+ python3-tk \
+ libfreetype6-dev \
+ pkg-config \
+ curl
+
+RUN pip3 install \
+ numpy \
+ matplotlib \
+ scipy \
+ Cython
+
+# apt clean up
+RUN apt autoremove && rm -rf /var/lib/apt/lists/*
+
+RUN curl -LO "https://github.com/GUDHI/gudhi-devel/releases/download/tags%2Fgudhi-release-3.0.0/gudhi.3.0.0.tar.gz" \
+&& tar xf gudhi.3.0.0.tar.gz \
+&& cd gudhi.3.0.0 \
+&& mkdir build && cd build && cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_PYTHON=OFF -DPython_ADDITIONAL_VERSIONS=3 .. \
+&& make all test install \
+&& cmake -DWITH_GUDHI_PYTHON=ON . \
+&& cd python \
+&& python3 setup.py install \ No newline at end of file
diff --git a/README.md b/README.md
index 8636ac77..167a38b3 100644
--- a/README.md
+++ b/README.md
@@ -2,6 +2,8 @@
[![Build Status](https://travis-ci.org/GUDHI/gudhi-devel.svg?branch=master)](https://travis-ci.org/GUDHI/gudhi-devel)
[![CircleCI](https://circleci.com/gh/GUDHI/gudhi-devel/tree/master.svg?style=svg)](https://circleci.com/gh/GUDHI/gudhi-devel/tree/master)
[![Build status](https://ci.appveyor.com/api/projects/status/976j2uut8xgalvx2/branch/master?svg=true)](https://ci.appveyor.com/project/GUDHI/gudhi-devel/branch/master)
+[![Anaconda Cloud](https://anaconda.org/conda-forge/gudhi/badges/version.svg)](https://anaconda.org/conda-forge/gudhi)
+[![Anaconda downloads](https://anaconda.org/conda-forge/gudhi/badges/downloads.svg)](https://anaconda.org/conda-forge/gudhi)
![GUDHI](src/common/doc/Gudhi_banner.png "Topological Data Analysis (TDA) and Higher Dimensional Geometry Understanding")
diff --git a/biblio/bibliography.bib b/biblio/bibliography.bib
index d1b2f558..a1b951e0 100644
--- a/biblio/bibliography.bib
+++ b/biblio/bibliography.bib
@@ -1076,7 +1076,7 @@ language={English}
journal = {Computational Geometry: Theory and Applications},
volume = {58},
pages = {70--96},
- doi = "https://doi.org/10.1016/j.comgeo.2016.07.001",
+ doi = "10.1016/j.comgeo.2016.07.001",
year = {2016}
}
diff --git a/biblio/how_to_cite_cgal.bib b/biblio/how_to_cite_cgal.bib
index 7336ee81..9e3b69e5 100644
--- a/biblio/how_to_cite_cgal.bib
+++ b/biblio/how_to_cite_cgal.bib
@@ -1,947 +1,1057 @@
-@book{ cgal:eb-15b
+@book{ cgal:eb-19b
, title = "{CGAL} User and Reference Manual"
, author = "{The CGAL Project}"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
-, year = 2015
-, url = "http://doc.cgal.org/4.7/Manual/packages.html"
+, edition = "{5.0}"
+, year = 2019
+, url = "https://doc.cgal.org/5.0/Manual/packages.html"
}
-@incollection{cgal:h-af-15b
+@incollection{cgal:h-af-19b
, author = "Michael Hemmer"
, title = "Algebraic Foundations"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgAlgebraicFoundationsSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgAlgebraicFoundations"
+, year = 2019
}
-@incollection{cgal:hhkps-nt-15b
+@incollection{cgal:hhkps-nt-19b
, author = "Michael Hemmer and Susan Hert and Sylvain Pion and Stefan Schirra"
, title = "Number Types"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgNumberTypesSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgNumberTypes"
+, year = 2019
}
-@incollection{cgal:h-ma-15b
+@incollection{cgal:h-ma-19b
, author = "Michael Hemmer and Sylvain Pion"
, title = "Modular Arithmetic"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgModularArithmeticSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgModularArithmetic"
+, year = 2019
}
-@incollection{cgal:h-p-15b
+@incollection{cgal:h-p-19b
, author = "Michael Hemmer"
, title = "Polynomial"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgPolynomialSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPolynomial"
+, year = 2019
}
-@incollection{cgal:bht-ak-15b
+@incollection{cgal:bht-ak-19b
, author = "Eric Berberich and Michael Hemmer and Michael Kerber and Sylvain Lazard and Luis Pe{\~n}aranda and Monique Teillaud"
, title = "Algebraic Kernel"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgAlgebraicKerneldSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgAlgebraicKernelD"
+, year = 2019
}
-@incollection{cgal:h-msms-15b
+@incollection{cgal:h-msms-19b
, author = "Michael Hoffmann"
, title = "Monotone and Sorted Matrix Search"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgMatrixSearchSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgMatrixSearch"
+, year = 2019
}
-@incollection{cgal:fgsw-lqps-15b
+@incollection{cgal:fgsw-lqps-19b
, author = "Kaspar Fischer and Bernd G{\"a}rtner and Sven Sch{\"o}nherr and Frans Wessendorp"
, title = "Linear and Quadratic Programming Solver"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgQPSolverSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgQPSolver"
+, year = 2019
}
-@incollection{cgal:bfghhkps-lgk23-15b
+@incollection{cgal:bfghhkps-lgk23-19b
, author = "Herv{\'e} Br{\"o}nnimann and Andreas Fabri and Geert-Jan Giezeman and Susan Hert and Michael Hoffmann and Lutz Kettner and Sylvain Pion and Stefan Schirra"
, title = "{2D} and {3D} Linear Geometry Kernel"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgKernel23Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgKernel23"
+, year = 2019
}
-@incollection{cgal:s-gkd-15b
+@incollection{cgal:s-gkd-19b
, author = "Michael Seel"
, title = "{dD} Geometry Kernel"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgKernelDSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgKernelD"
+, year = 2019
}
-@incollection{cgal:cpt-cgk2-15b
+@incollection{cgal:cpt-cgk2-19b
, author = "Pedro Machado Manh{\~a}es de Castro and Sylvain Pion and Monique Teillaud"
, title = "{2D} Circular Geometry Kernel"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgCircularKernel2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgCircularKernel2"
+, year = 2019
}
-@incollection{cgal:cclt-sgk3-15b
+@incollection{cgal:cclt-sgk3-19b
, author = "Pedro Machado Manh{\~a}es de Castro and Fr{\'e}d{\'e}ric Cazals and S{\'e}bastien Loriot and Monique Teillaud"
, title = "{3D} Spherical Geometry Kernel"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSphericalKernel3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgCircularKernel3"
+, year = 2019
}
-@incollection{cgal:hs-chep2-15b
+@incollection{cgal:hs-chep2-19b
, author = "Susan Hert and Stefan Schirra"
, title = "{2D} Convex Hulls and Extreme Points"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgConvexHull2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgConvexHull2"
+, year = 2019
}
-@incollection{cgal:hs-ch3-15b
+@incollection{cgal:hs-ch3-19b
, author = "Susan Hert and Stefan Schirra"
, title = "{3D} Convex Hulls"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgConvexHull3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgConvexHull3"
+, year = 2019
}
-@incollection{cgal:hs-chdt3-15b
+@incollection{cgal:hs-chdt3-19b
, author = "Susan Hert and Michael Seel"
, title = "{dD} Convex Hulls and Delaunay Triangulations"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgConvexHullDSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgConvexHullD"
+, year = 2019
}
-@incollection{cgal:gw-p2-15b
+@incollection{cgal:gw-p2-19b
, author = "Geert-Jan Giezeman and Wieger Wesselink"
, title = "{2D} Polygons"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgPolygon2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPolygon2"
+, year = 2019
}
-@incollection{cgal:fwzh-rbso2-15b
+@incollection{cgal:fwzh-rbso2-19b
, author = "Efi Fogel and Ophir Setter and Ron Wein and Guy Zucker and Baruch Zukerman and Dan Halperin"
, title = "{2D} Regularized Boolean Set-Operations"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgBooleanSetOperations2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgBooleanSetOperations2"
+, year = 2019
}
-@incollection{cgal:s-bonp2-15b
+@incollection{cgal:s-bonp2-19b
, author = "Michael Seel"
, title = "{2D} Boolean Operations on Nef Polygons"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgNef2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgNef2"
+, year = 2019
}
-@incollection{cgal:hk-bonpes2-15b
+@incollection{cgal:hk-bonpes2-19b
, author = "Peter Hachenberger and Lutz Kettner"
, title = "{2D} Boolean Operations on Nef Polygons Embedded on the Sphere"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgNefS2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgNefS2"
+, year = 2019
}
-@incollection{cgal:h-pp2-15b
+@incollection{cgal:h-pp2-19b
, author = "Susan Hert"
, title = "{2D} Polygon Partitioning"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgPolygonPartitioning2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPartition2"
+, year = 2019
}
-@incollection{cgal:c-sspo2-15b
+@incollection{cgal:c-sspo2-19b
, author = "Fernando Cacciola"
, title = "{2D} Straight Skeleton and Polygon Offsetting"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgStraightSkeleton2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgStraightSkeleton2"
+, year = 2019
}
-@incollection{cgal:w-rms2-15b
+@incollection{cgal:w-rms2-19b
, author = "Ron Wein and Alon Baram and Eyal Flato and Efi Fogel and Michael Hemmer and Sebastian Morr"
, title = "{2D} Minkowski Sums"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgMinkowskiSum2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgMinkowskiSum2"
+, year = 2019
}
-@incollection{cgal:f-ps2-15b
+@incollection{cgal:f-ps2-19b
, author = "Andreas Fabri"
, title = "{2D} Polyline Simplification"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgPolylineSimplification2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPolylineSimplification2"
+, year = 2019
}
-@incollection{hhb-visibility-2-15b
+@incollection{hhb-visibility-2-19b
, author = "Michael Hemmer and Kan Huang and Francisc Bungiu and Ning Xu"
, title = "{2D} Visibility Computation"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgVisibility_2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgVisibility2"
+, year = 2019
}
-@incollection{cgal:k-ps-15b
+@incollection{cgal:sf-sms2-19b
+, author = "Shahar Shamai and Efi Fogel"
+, title = "{2D} Movable Separability of Sets"
+, publisher = "{CGAL Editorial Board}"
+, edition = "{5.0}"
+, booktitle = "{CGAL} User and Reference Manual"
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSetMovableSeparability2"
+, year = 2019
+}
+
+@incollection{cgal:k-ps-19b
, author = "Lutz Kettner"
, title = "{3D} Polyhedral Surface"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgPolyhedronSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPolyhedron"
+, year = 2019
}
-@incollection{cgal:k-hds-15b
+@incollection{cgal:k-hds-19b
, author = "Lutz Kettner"
, title = "Halfedge Data Structures"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgHDSSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgHalfedgeDS"
+, year = 2019
}
-@incollection{cgal:bsmf-sm-15b
+@incollection{cgal:bsmf-sm-19b
, author = "Mario Botsch and Daniel Sieger and Philipp Moeller and Andreas Fabri"
, title = "Surface Mesh"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSurfaceMeshSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSurfaceMesh"
+, year = 2019
}
-@incollection{cgal:d-cm-15b
+@incollection{cgal:d-cm-19b
, author = "Guillaume Damiand"
, title = "Combinatorial Maps"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
+, booktitle = "{CGAL} User and Reference Manual"
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgCombinatorialMaps"
+, year = 2019
+}
+
+@incollection{cgal:d-gm-19b
+, author = "Guillaume Damiand"
+, title = "Generalized Maps"
+, publisher = "{CGAL Editorial Board}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgCombinatorialMapsSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgGeneralizedMaps"
+, year = 2019
}
-@incollection{cgal:d-lcc-12-15b
+@incollection{cgal:d-lcc-12-19b
, author = "Guillaume Damiand"
, title = "Linear Cell Complex"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgLinearCellComplexSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgLinearCellComplex"
+, year = 2019
}
-@incollection{cgal:hk-bonp3-15b
+@incollection{cgal:hk-bonp3-19b
, author = "Peter Hachenberger and Lutz Kettner"
, title = "{3D} Boolean Operations on Nef Polyhedra"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgNef3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgNef3"
+, year = 2019
}
-@incollection{cgal:h-emspe-15b
+@incollection{cgal:h-emspe-19b
, author = "Peter Hachenberger"
, title = "Convex Decomposition of Polyhedra"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgConvexDecomposition3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgConvexDecomposition3"
+, year = 2019
}
-@incollection{cgal:h-msp3-15b
+@incollection{cgal:h-msp3-19b
, author = "Peter Hachenberger"
, title = "{3D} Minkowski Sum of Polyhedra"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgMinkowskiSum3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgMinkowskiSum3"
+, year = 2019
}
-@incollection{cgal:wfzh-a2-15b
+@incollection{cgal:wfzh-a2-19b
, author = "Ron Wein and Eric Berberich and Efi Fogel and Dan Halperin and Michael Hemmer and Oren Salzman and Baruch Zukerman"
, title = "{2D} Arrangements"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgArrangement2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgArrangementOnSurface2"
+, year = 2019
}
-@incollection{cgal:wfz-ic2-15b
+@incollection{cgal:wfz-ic2-19b
, author = "Baruch Zukerman and Ron Wein and Efi Fogel"
, title = "{2D} Intersection of Curves"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgIntersectionOfCurves2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSurfaceSweep2"
+, year = 2019
}
-@incollection{cgal:p-sr2-15b
+@incollection{cgal:p-sr2-19b
, author = "Eli Packer"
, title = "{2D} Snap Rounding"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSnapRounding2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSnapRounding2"
+, year = 2019
}
-@incollection{cgal:w-e2-15b
+@incollection{cgal:w-e2-19b
, author = "Ron Wein"
, title = "{2D} Envelopes"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgEnvelope2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgEnvelope2"
+, year = 2019
}
-@incollection{cgal:mwz-e3-15b
+@incollection{cgal:mwz-e3-19b
, author = "Dan Halperin and Michal Meyerovitch and Ron Wein and Baruch Zukerman"
, title = "{3D} Envelopes"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgEnvelope3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgEnvelope3"
+, year = 2019
}
-@incollection{cgal:y-t2-15b
+@incollection{cgal:y-t2-19b
, author = "Mariette Yvinec"
, title = "{2D} Triangulation"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgTriangulation2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgTriangulation2"
+, year = 2019
}
-@incollection{cgal:py-tds2-15b
+@incollection{cgal:py-tds2-19b
, author = "Sylvain Pion and Mariette Yvinec"
, title = "{2D} Triangulation Data Structure"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgTDS2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgTDS2"
+, year = 2019
}
-@incollection{cgal:k-pt2-13-15b
+@incollection{cgal:k-pt2-13-19b
, author = "Nico Kruithof"
, title = "{2D} Periodic Triangulations"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
+, booktitle = "{CGAL} User and Reference Manual"
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPeriodic2Triangulation2"
+, year = 2019
+}
+
+@incollection{cgal:bt-ht2-17-19b
+, author = "Mikhail Bogdanov and Iordan Iordanov and Monique Teillaud"
+, title = "{2D} Hyperbolic Delaunay Triangulations"
+, publisher = "{CGAL Editorial Board}"
+, edition = "{5.0}"
+, booktitle = "{CGAL} User and Reference Manual"
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgHyperbolicTriangulation2"
+, year = 2019
+}
+
+@incollection{cgal:i-p4ht2-17-19b
+, author = "Iordan Iordanov and Monique Teillaud"
+, title = "{2D} Periodic Hyperbolic Triangulations"
+, publisher = "{CGAL Editorial Board}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgPeriodic2Triangulation2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPeriodic4HyperbolicTriangulation2"
+, year = 2019
}
-@incollection{cgal:pt-t3-15b
+@incollection{cgal:pt-t3-19b
, author = "Cl{\'e}ment Jamin and Sylvain Pion and Monique Teillaud"
, title = "{3D} Triangulations"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgTriangulation3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgTriangulation3"
+, year = 2019
}
-@incollection{cgal:pt-tds3-15b
+@incollection{cgal:pt-tds3-19b
, author = "Cl{\'e}ment Jamin and Sylvain Pion and Monique Teillaud"
, title = "{3D} Triangulation Data Structure"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgTDS3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgTDS3"
+, year = 2019
}
-@incollection{cgal:ct-pt3-15b
-, author = "Manuel Caroli and Monique Teillaud"
+@incollection{cgal:ct-pt3-19b
+, author = "Manuel Caroli and Aymeric Pell{\'e} and Mael Rouxel-Labb{\'e} and Monique Teillaud"
, title = "{3D} Periodic Triangulations"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgPeriodic3Triangulation3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPeriodic3Triangulation3"
+, year = 2019
}
-@incollection{cgal:hdj-t-15b
-, author = "Samuel Hornus and Olivier Devillers and Cl{\'e}ment Jamin"
+@incollection{cgal:hdj-t-19b
+, author = "Olivier Devillers and Samuel Hornus and Cl{\'e}ment Jamin"
, title = "{dD} Triangulations"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgTriangulationsSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgTriangulations"
+, year = 2019
}
-@incollection{cgal:d-as2-15b
+@incollection{cgal:d-as2-19b
, author = "Tran Kai Frank Da"
, title = "{2D} Alpha Shapes"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgAlphaShape2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgAlphaShapes2"
+, year = 2019
}
-@incollection{cgal:dy-as3-15b
+@incollection{cgal:dy-as3-19b
, author = "Tran Kai Frank Da and S{\'e}bastien Loriot and Mariette Yvinec"
, title = "{3D} Alpha Shapes"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgAlphaShapes3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgAlphaShapes3"
+, year = 2019
}
-@incollection{cgal:k-sdg2-15b
+@incollection{cgal:k-sdg2-19b
, author = "Menelaos Karavelas"
, title = "{2D} Segment Delaunay Graphs"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSegmentDelaunayGraph2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSegmentDelaunayGraph2"
+, year = 2019
}
-@incollection{cgal:cdp-sdglinf2-15b
+@incollection{cgal:cdp-sdglinf2-19b
, author = "Panagiotis Cheilaris and Sandeep Kumar Dey and Evanthia Papadopoulou"
, title = "L Infinity Segment Delaunay Graphs"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSDGLinfSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSegmentDelaunayGraphLinf2"
+, year = 2019
}
-@incollection{cgal:ky-ag2-15b
+@incollection{cgal:ky-ag2-19b
, author = "Menelaos Karavelas and Mariette Yvinec"
, title = "{2D} Apollonius Graphs (Delaunay Graphs of Disks)"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgApolloniusGraph2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgApolloniusGraph2"
+, year = 2019
}
-@incollection{cgal:k-vda2-15b
+@incollection{cgal:k-vda2-19b
, author = "Menelaos Karavelas"
, title = "{2D} Voronoi Diagram Adaptor"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgVoronoiDiagramAdaptor2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgVoronoiDiagram2"
+, year = 2019
}
-@incollection{cgal:r-ctm2-15b
+@incollection{cgal:r-ctm2-19b
, author = "Laurent Rineau"
, title = "{2D} Conforming Triangulations and Meshes"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgMesh2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgMesh2"
+, year = 2019
}
-@incollection{cgal:ry-smg-15b
+@incollection{cgal:ry-smg-19b
, author = "Laurent Rineau and Mariette Yvinec"
, title = "{3D} Surface Mesh Generation"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
+, booktitle = "{CGAL} User and Reference Manual"
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSurfaceMesher3"
+, year = 2019
+}
+
+@incollection{cgal:k-ssm3-19b
+, author = "Nico Kruithof"
+, title = "{3D} Skin Surface Meshing"
+, publisher = "{CGAL Editorial Board}"
+, edition = "{5.0}"
+, booktitle = "{CGAL} User and Reference Manual"
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSkinSurface3"
+, year = 2019
+}
+
+@incollection{cgal:rty-m3-19b
+, author = "Pierre Alliez and Cl{\'e}ment Jamin and Laurent Rineau and St{\'e}phane Tayeb and Jane Tournois and Mariette Yvinec"
+, title = "{3D} Mesh Generation"
+, publisher = "{CGAL Editorial Board}"
+, edition = "{5.0}"
+, booktitle = "{CGAL} User and Reference Manual"
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgMesh3"
+, year = 2019
+}
+
+@incollection{cgal:btprl-p3m3-19b
+, author = "Mikhail Bogdanov and Aymeric Pell{\'e} and Mael Rouxel-Labb{\'e} and Monique Teillaud"
+, title = "{3D} Periodic Mesh Generation"
+, publisher = "{CGAL Editorial Board}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSurfaceMesher3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPeriodic3Mesh3"
+, year = 2019
}
-@incollection{cgal:asg-srps-15b
+@incollection{cgal:asg-srps-19b
, author = "Pierre Alliez and Laurent Saboret and Ga{\"e}l Guennebaud"
-, title = "Surface Reconstruction from Point Sets"
+, title = "Poisson Surface Reconstruction"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSurfaceReconstructionFromPointSetsSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPoissonSurfaceReconstruction3"
+, year = 2019
}
-@incollection{cgal:ssr3-15b
+@incollection{cgal:ssr3-19b
, author = "Thijs van Lankveld"
, title = "Scale-Space Surface Reconstruction"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgScaleSpaceReconstruction3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgScaleSpaceReconstruction3"
+, year = 2019
}
-@incollection{cgal:dc-afsr-15b
+@incollection{cgal:dc-afsr-19b
, author = "Tran Kai Frank Da and David Cohen-Steiner"
, title = "Advancing Front Surface Reconstruction"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgAdvancingFrontSurfaceReconstructionSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgAdvancingFrontSurfaceReconstruction"
+, year = 2019
}
-@incollection{cgal:k-ssm3-15b
-, author = "Nico Kruithof"
-, title = "{3D} Skin Surface Meshing"
+@incollection{cgal:x-x-19b
+, author = "Liangliang Nan"
+, title = "Polygonal Surface Reconstruction"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSkinSurface3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPolygonalSurfaceReconstruction"
+, year = 2019
}
-@incollection{cgal:rty-m3-15b
-, author = "Pierre Alliez and Cl{\'e}ment Jamin and Laurent Rineau and St{\'e}phane Tayeb and Jane Tournois and Mariette Yvinec"
-, title = "{3D} Mesh Generation"
+@incollection{cgal:gavj-rs-19b
+, author = "Pierre Alliez and David Cohen-Steiner and Fernando de Goes and Cl{\'e}ment Jamin and Ivo Vigan"
+, title = "Optimal Transportation Curve Reconstruction"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgMesh_3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgOptimalTransportationReconstruction2"
+, year = 2019
}
-@incollection{cgal:lty-pmp-15b
-, author = "S{\'e}bastien Loriot and Jane Tournois and Ilker O. Yaz"
+@incollection{cgal:lty-pmp-19b
+, author = "S{\'e}bastien Loriot and Mael Rouxel-Labb{\'e} and Jane Tournois and Ilker O. Yaz"
, title = "Polygon Mesh Processing"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgPolygonMeshProcessingSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPolygonMeshProcessing"
+, year = 2019
}
-@incollection{cgal:s-ssm2-15b
+@incollection{cgal:s-ssm2-19b
, author = "Le-Jeng Andy Shiue"
, title = "{3D} Surface Subdivision Methods"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSurfaceSubdivisionMethods3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSurfaceSubdivisionMethod3"
+, year = 2019
}
-@incollection{cgal:y-smsimpl-15b
+@incollection{cgal:y-smsimpl-19b
, author = "Ilker O. Yaz and S{\'e}bastien Loriot"
, title = "Triangulated Surface Mesh Segmentation"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSurfaceSegmentationSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSurfaceMeshSegmentation"
+, year = 2019
}
-@incollection{cgal:c-tsms-12-15b
+@incollection{cgal:c-tsms-12-19b
, author = "Fernando Cacciola"
, title = "Triangulated Surface Mesh Simplification"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSurfaceMeshSimplificationSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSurfaceMeshSimplification"
+, year = 2019
}
-@incollection{cgal:lsxy-tsmd-15b
+@incollection{cgal:lsxy-tsmd-19b
, author = "S{\'e}bastien Loriot and Olga Sorkine-Hornung and Yin Xu and Ilker O. Yaz"
, title = "Triangulated Surface Mesh Deformation"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSurfaceModelingSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSurfaceMeshDeformation"
+, year = 2019
}
-@incollection{cgal:sal-pptsm2-15b
-, author = "Laurent Saboret and Pierre Alliez and Bruno L{\'e}vy"
-, title = "Planar Parameterization of Triangulated Surface Meshes"
+@incollection{cgal:salf-pptsm2-19b
+, author = "Laurent Saboret and Pierre Alliez and Bruno L{\'e}vy and Mael Rouxel-Labb{\'e} and Andreas Fabri"
+, title = "Triangulated Surface Mesh Parameterization"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSurfaceParameterizationSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSurfaceMeshParameterization"
+, year = 2019
}
-@incollection{cgal:klcdv-tsmsp-15b
+@incollection{cgal:klcdv-tsmsp-19b
, author = "Stephen Kiazyk and S{\'e}bastien Loriot and {\'E}ric Colin de Verdi{\`e}re"
, title = "Triangulated Surface Mesh Shortest Paths"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSurfaceMeshShortestPathSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSurfaceMeshShortestPath"
+, year = 2019
}
-@incollection{cgal:glt-tsms-15b
+@incollection{cgal:glt-tsms-19b
, author = "Xiang Gao and S{\'e}bastien Loriot and Andrea Tagliasacchi"
, title = "Triangulated Surface Mesh Skeletonization"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgMeanCurvatureSkeleton3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSurfaceMeshSkeletonization"
+, year = 2019
}
-@incollection{cgal:cp-arutsm-15b
+@incollection{cgal:az-tsma-19b
+, author = "Pierre Alliez and David Cohen-Steiner and Lingjie Zhu"
+, title = "Triangulated Surface Mesh Approximation"
+, publisher = "{CGAL Editorial Board}"
+, edition = "{5.0}"
+, booktitle = "{CGAL} User and Reference Manual"
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgTSMA"
+, year = 2019
+}
+
+@incollection{cgal:cp-arutsm-19b
, author = "Marc Pouget and Fr{\'e}d{\'e}ric Cazals"
, title = "Approximation of Ridges and Umbilics on Triangulated Surface Meshes"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgRidges_3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgRidges3"
+, year = 2019
}
-@incollection{cgal:pc-eldp-15b
+@incollection{cgal:pc-eldp-19b
, author = "Marc Pouget and Fr{\'e}d{\'e}ric Cazals"
, title = "Estimation of Local Differential Properties of Point-Sampled Surfaces"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgJet_fitting_3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgJetFitting3"
+, year = 2019
}
-@incollection{cgal:ass-psp-15b
-, author = "Pierre Alliez and Cl{\'e}ment Jamin and Quentin M{\'e}rigot and Jocelyn Meyron and Laurent Saboret and Nader Salman and Shihao Wu"
+@incollection{cgal:g-ps-19b
+, author = "Simon Giraudot"
+, title = "{3D} Point Set"
+, publisher = "{CGAL Editorial Board}"
+, edition = "{5.0}"
+, booktitle = "{CGAL} User and Reference Manual"
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPointSet3"
+, year = 2019
+}
+
+@incollection{cgal:ass-psp-19b
+, author = "Pierre Alliez and Simon Giraudot and Cl{\'e}ment Jamin and Florent Lafarge and Quentin M{\'e}rigot and Jocelyn Meyron and Laurent Saboret and Nader Salman and Shihao Wu"
, title = "Point Set Processing"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgPointSetProcessingSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPointSetProcessing3"
+, year = 2019
}
-@incollection{cgal:ovja-pssd-15b
-, author = "Sven Oesau and Yannick Verdie and Cl{\'e}ment Jamin and Pierre Alliez"
-, title = "Point Set Shape Detection"
+@incollection{cgal:ovja-pssd-19b
+, author = "Sven Oesau and Yannick Verdie and Cl{\'e}ment Jamin and Pierre Alliez and Florent Lafarge and Simon Giraudot and Thien Hoang and Dmitry Anisimov"
+, title = "Shape Detection"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgPointSetShapeDetection3Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgShapeDetection"
+, year = 2019
}
-@incollection{cgal:m-ps-15b
+@incollection{cgal:m-ps-19b
, author = "Abdelkrim Mebarki"
, title = "{2D} Placement of Streamlines"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
+, booktitle = "{CGAL} User and Reference Manual"
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgStreamLines2"
+, year = 2019
+}
+
+@incollection{cgal:lm-clscm-12-19b
+, author = "Simon Giraudot and Florent Lafarge"
+, title = "Classification"
+, publisher = "{CGAL Editorial Board}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgPlacementOfStreamlines2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgClassification"
+, year = 2019
}
-@incollection{cgal:b-ss2-15b
+@incollection{cgal:cvf-hm3-19b
+, author = "Keenan Crane and Christina Vaz and Andreas Fabri"
+, title = "The Heat Method"
+, publisher = "{CGAL Editorial Board}"
+, edition = "{5.0}"
+, booktitle = "{CGAL} User and Reference Manual"
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgHeatMethodSummary"
+, year = 2019
+}
+
+@incollection{cgal:b-ss2-19b
, author = "Matthias B{\"a}sken"
, title = "{2D} Range and Neighbor Search"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgPointSet2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPointSet2"
+, year = 2019
}
-@incollection{cgal:f-isl-15b
+@incollection{cgal:f-isl-19b
, author = "Andreas Fabri"
, title = "Interval Skip List"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgIntervalSkipListSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgIntervalSkipList"
+, year = 2019
}
-@incollection{cgal:tf-ssd-15b
+@incollection{cgal:tf-ssd-19b
, author = "Hans Tangelder and Andreas Fabri"
, title = "{dD} Spatial Searching"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSpatialSearchingDSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSpatialSearchingD"
+, year = 2019
}
-@incollection{cgal:n-rstd-15b
+@incollection{cgal:n-rstd-19b
, author = "Gabriele Neyer"
, title = "{dD} Range and Segment Trees"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgRangeSegmentTreesDSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSearchStructures"
+, year = 2019
}
-@incollection{cgal:kmz-isiobd-15b
+@incollection{cgal:kmz-isiobd-19b
, author = "Lutz Kettner and Andreas Meyer and Afra Zomorodian"
, title = "Intersecting Sequences of {dD} Iso-oriented Boxes"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgBoxIntersectionDSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgBoxIntersectionD"
+, year = 2019
}
-@incollection{cgal:atw-aabb-15b
+@incollection{cgal:atw-aabb-19b
, author = "Pierre Alliez and St{\'e}phane Tayeb and Camille Wormser"
, title = "{3D} Fast Intersection and Distance Computation"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgAABB_treeSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgAABBTree"
+, year = 2019
}
-@incollection{cgal:dd-ss-15b
+@incollection{cgal:dd-ss-19b
, author = "Christophe Delage and Olivier Devillers"
, title = "Spatial Sorting"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgSpatialSortingSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSpatialSorting"
+, year = 2019
}
-@incollection{cgal:fghhs-bv-15b
+@incollection{cgal:fghhs-bv-19b
, author = "Kaspar Fischer and Bernd G{\"a}rtner and Thomas Herrmann and Michael Hoffmann and Sven Sch{\"o}nherr"
, title = "Bounding Volumes"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgBoundingVolumesSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgBoundingVolumes"
+, year = 2019
}
-@incollection{cgal:hp-ia-15b
+@incollection{cgal:hp-ia-19b
, author = "Michael Hoffmann and Eli Packer"
, title = "Inscribed Areas"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgInscribedAreasSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgInscribedAreas"
+, year = 2019
}
-@incollection{cgal:fghhs-od-15b
+@incollection{cgal:fghhs-od-19b
, author = "Kaspar Fischer and Bernd G{\"a}rtner and Thomas Herrmann and Michael Hoffmann and Sven Sch{\"o}nherr"
, title = "Optimal Distances"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgOptimalDistancesSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPolytopeDistanceD"
+, year = 2019
}
-@incollection{cgal:ap-pcad-15b
+@incollection{cgal:ap-pcad-19b
, author = "Pierre Alliez and Sylvain Pion and Ankit Gupta"
, title = "Principal Component Analysis"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgPrincipalComponentAnalysisDSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPrincipalComponentAnalysisD"
+, year = 2019
}
-@incollection{cgal:f-i-15b
+@incollection{cgal:f-i-19b
, author = "Julia Fl{\"o}totto"
, title = "{2D} and Surface Function Interpolation"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgInterpolation2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgInterpolation2"
+, year = 2019
}
-@incollection{cgal:abha-gbc-15b
+@incollection{cgal:abha-gbc-19b
, author = "Dmitry Anisimov and David Bommes and Kai Hormann and Pierre Alliez"
, title = "{2D} Generalized Barycentric Coordinates"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgBarycentric_coordinates_2Summary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgBarycentricCoordinates2"
+, year = 2019
}
-@incollection{cgal:r-kds-15b
-, author = "Daniel Russel"
-, title = "Kinetic Data Structures"
+@incollection{cgal:hkpw-se-19b
+, author = "Michael Hoffmann and Lutz Kettner and Sylvain Pion and Ron Wein"
+, title = "STL Extensions for {CGAL}"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgKdsSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSTLExtension"
+, year = 2019
}
-@incollection{cgal:r-kdsf-15b
-, author = "Daniel Russel"
-, title = "Kinetic Framework"
+@incollection{cgal:cfw-cbgl-19b
+, author = "Andreas Fabri and Fernando Cacciola and Philipp Moeller and Ron Wein"
+, title = "{CGAL} and the {Boost} Graph Library"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgKdsFrameworkSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgBGL"
+, year = 2019
}
-@incollection{cgal:hkpw-se-15b
-, author = "Michael Hoffmann and Lutz Kettner and Sylvain Pion and Ron Wein"
-, title = "STL Extensions for {CGAL}"
+@incollection{cgal:eb-solver-19b
+, author = "Simon Giraudot and Pierre Alliez and Fr{\'e}d{\'e}ric Cazals and Ga{\"e}l Guennebaud and Bruno L{\'e}vy and Marc Pouget and Laurent Saboret and Liangliang Nan"
+, title = "{CGAL} and Solvers"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgStlExtensionSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgSolverInterface"
+, year = 2019
}
-@incollection{cgal:cfw-cbgl-15b
-, author = "Andreas Fabri and Fernando Cacciola and Philipp Moeller and Ron Wein"
-, title = "{CGAL} and the {Boost} Graph Library"
+@incollection{cgal:fs-cbpm-19b
+, author = "Andreas Fabri and Laurent Saboret"
+, title = "{CGAL} and {Boost} Property Maps"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgBGLSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgPropertyMap"
+, year = 2019
}
-@incollection{cgal:fs-cbpm-15b
-, author = "Andreas Fabri and Laurent Saboret"
-, title = "{CGAL} and {Boost} Property Maps"
+@incollection{cgal:st-cbs-19b
+, author = "Weisheng Si and Quincy Tse and Fr{\'e}d{\'e}rik Paradis"
+, title = "Cone-Based Spanners"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgProperty_mapSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgConeSpanners2"
+, year = 2019
}
-@incollection{cgal:dksy-hc-15b
+@incollection{cgal:dksy-hc-19b
, author = "Olivier Devillers and Lutz Kettner and Sylvain Pion and Michael Seel and Mariette Yvinec"
, title = "Handles and Circulators"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgHandlesAndCirculatorsSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgHandlesAndCirculators"
+, year = 2019
}
-@incollection{cgal:dhhk-gog-15b
-, author = "Pedro M. M. de Castro and Olivier Devillers and Susan Hert and Michael Hoffmann and Lutz Kettner and Sven Sch{\"o}nherr and Alexandru Tifrea"
+@incollection{cgal:dhhk-gog-19b
+, author = "Pedro M. M. de Castro and Olivier Devillers and Susan Hert and Michael Hoffmann and Lutz Kettner and Sven Sch{\"o}nherr and Alexandru Tifrea and Maxime Gimeno"
, title = "Geometric Object Generators"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgGeneratorsSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgGenerators"
+, year = 2019
}
-@incollection{cgal:kps-pthum-15b
+@incollection{cgal:kps-pthum-19b
, author = "Lutz Kettner and Sylvain Pion and Michael Seel"
, title = "Profiling tools, Hash Map, Union-find, Modifiers"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgProfilingToolsSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#Miscellany"
+, year = 2019
}
-@incollection{cgal:fgk-ios-12-15b
+@incollection{cgal:fgk-ios-12-19b
, author = "Andreas Fabri and Geert-Jan Giezeman and Lutz Kettner"
, title = "IO Streams"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgIOstreamsSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgStreamSupport"
+, year = 2019
}
-@incollection{cgal:fp-gv-15b
+@incollection{cgal:fp-gv-19b
, author = "Andreas Fabri and Sylvain Pion"
, title = "Geomview"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgGeomviewSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgGeomview"
+, year = 2019
}
-@incollection{cgal:fr-cqgvf-15b
+@incollection{cgal:fr-cqgvf-19b
, author = "Andreas Fabri and Laurent Rineau"
, title = "{CGAL} and the {Qt} Graphics View Framework"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgGraphicsViewSummary"
-, year = 2015
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgGraphicsView"
+, year = 2019
}
-@incollection{cgal:lp-gi-15b
+@incollection{cgal:lp-gi-19b
, author = "Olivier Devillers and S{\'e}bastien Loriot and Sylvain Pion"
, title = "{CGAL} Ipelets"
, publisher = "{CGAL Editorial Board}"
-, edition = "{4.7}"
+, edition = "{5.0}"
, booktitle = "{CGAL} User and Reference Manual"
-, url = "http://doc.cgal.org/4.7/Manual/packages.html#PkgCGALIpeletsSummary"
-, year = 2015
-}
+, url = "https://doc.cgal.org/5.0/Manual/packages.html#PkgCGALIpelets"
+, year = 2019
+} \ No newline at end of file
diff --git a/biblio/how_to_cite_gudhi.bib b/biblio/how_to_cite_gudhi.bib.in
index 942f8d7e..05d3cc98 100644
--- a/biblio/how_to_cite_gudhi.bib
+++ b/biblio/how_to_cite_gudhi.bib.in
@@ -2,141 +2,157 @@
, title = "{GUDHI} User and Reference Manual"
, author = "{The GUDHI Project}"
, publisher = "{GUDHI Editorial Board}"
-, year = 2015
-, url = "http://gudhi.gforge.inria.fr/doc/latest/"
+, edition = "{@GUDHI_VERSION@}"
+, year = @GUDHI_VERSION_YEAR@
+, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/"
}
@incollection{gudhi:FilteredComplexes
, author = "Cl\'ement Maria"
, title = "Filtered Complexes"
, publisher = "{GUDHI Editorial Board}"
+, edition = "{@GUDHI_VERSION@}"
, booktitle = "{GUDHI} User and Reference Manual"
-, url = "http://gudhi.gforge.inria.fr/doc/latest/group__simplex__tree.html"
-, year = 2015
+, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__simplex__tree.html"
+, year = @GUDHI_VERSION_YEAR@
}
@incollection{gudhi:PersistentCohomology
, author = "Cl\'ement Maria"
, title = "Persistent Cohomology"
, publisher = "{GUDHI Editorial Board}"
+, edition = "{@GUDHI_VERSION@}"
, booktitle = "{GUDHI} User and Reference Manual"
-, url = "http://gudhi.gforge.inria.fr/doc/latest/group__persistent__cohomology.html"
-, year = 2015
+, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__persistent__cohomology.html"
+, year = @GUDHI_VERSION_YEAR@
}
@incollection{gudhi:Contraction
, author = "David Salinas"
, title = "Contraction"
, publisher = "{GUDHI Editorial Board}"
+, edition = "{@GUDHI_VERSION@}"
, booktitle = "{GUDHI} User and Reference Manual"
-, url = "http://gudhi.gforge.inria.fr/doc/latest/group__contr.html"
-, year = 2015
+, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__contr.html"
+, year = @GUDHI_VERSION_YEAR@
}
@incollection{gudhi:SkeletonBlocker
, author = "David Salinas"
, title = "Skeleton-Blocker"
, publisher = "{GUDHI Editorial Board}"
+, edition = "{@GUDHI_VERSION@}"
, booktitle = "{GUDHI} User and Reference Manual"
-, url = "http://gudhi.gforge.inria.fr/doc/latest/group__skbl.html"
-, year = 2015
+, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__skbl.html"
+, year = @GUDHI_VERSION_YEAR@
}
@incollection{gudhi:AlphaComplex
, author = "Vincent Rouvreau"
, title = "Alpha complex"
, publisher = "{GUDHI Editorial Board}"
+, edition = "{@GUDHI_VERSION@}"
, booktitle = "{GUDHI} User and Reference Manual"
-, url = "http://gudhi.gforge.inria.fr/doc/latest/group__alpha__complex.html"
-, year = 2015
+, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__alpha__complex.html"
+, year = @GUDHI_VERSION_YEAR@
}
@incollection{gudhi:CubicalComplex
, author = "Pawel Dlotko"
, title = "Cubical complex"
, publisher = "{GUDHI Editorial Board}"
+, edition = "{@GUDHI_VERSION@}"
, booktitle = "{GUDHI} User and Reference Manual"
-, url = "http://gudhi.gforge.inria.fr/doc/latest/group__cubical__complex.html"
-, year = 2015
+, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__cubical__complex.html"
+, year = @GUDHI_VERSION_YEAR@
}
@incollection{gudhi:WitnessComplex
, author = "Siargey Kachanovich"
, title = "Witness complex"
, publisher = "{GUDHI Editorial Board}"
+, edition = "{@GUDHI_VERSION@}"
, booktitle = "{GUDHI} User and Reference Manual"
-, url = "http://gudhi.gforge.inria.fr/doc/latest/group__witness__complex.html"
-, year = 2015
+, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__witness__complex.html"
+, year = @GUDHI_VERSION_YEAR@
}
@incollection{gudhi:SubSampling
, author = "Cl\'ement Jamin, Siargey Kachanovich"
, title = "Subsampling"
, publisher = "{GUDHI Editorial Board}"
+, edition = "{@GUDHI_VERSION@}"
, booktitle = "{GUDHI} User and Reference Manual"
-, url = "http://gudhi.gforge.inria.fr/doc/latest/group__subsampling.html"
-, year = 2016
+, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__subsampling.html"
+, year = @GUDHI_VERSION_YEAR@
}
@incollection{gudhi:SpatialSearching
, author = "Cl\'ement Jamin"
, title = "Spatial searching"
, publisher = "{GUDHI Editorial Board}"
+, edition = "{@GUDHI_VERSION@}"
, booktitle = "{GUDHI} User and Reference Manual"
-, url = "http://gudhi.gforge.inria.fr/doc/latest/group__spatial__searching.html"
-, year = 2016
+, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__spatial__searching.html"
+, year = @GUDHI_VERSION_YEAR@
}
@incollection{gudhi:TangentialComplex
, author = "Cl\'ement Jamin"
, title = "Tangential complex"
, publisher = "{GUDHI Editorial Board}"
+, edition = "{@GUDHI_VERSION@}"
, booktitle = "{GUDHI} User and Reference Manual"
-, url = "http://gudhi.gforge.inria.fr/doc/latest/group__tangential__complex.html"
-, year = 2016
+, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__tangential__complex.html"
+, year = @GUDHI_VERSION_YEAR@
}
@incollection{gudhi:RipsComplex
, author = "Cl\'ement Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse"
, title = "Rips complex"
, publisher = "{GUDHI Editorial Board}"
+, edition = "{@GUDHI_VERSION@}"
, booktitle = "{GUDHI} User and Reference Manual"
-, url = "http://gudhi.gforge.inria.fr/doc/latest/group__rips__complex.html"
-, year = 2016
+, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__rips__complex.html"
+, year = @GUDHI_VERSION_YEAR@
}
@incollection{gudhi:BottleneckDistance
, author = "Fran{{\c{c}}ois Godi"
, title = "Bottleneck distance"
, publisher = "{GUDHI Editorial Board}"
+, edition = "{@GUDHI_VERSION@}"
, booktitle = "{GUDHI} User and Reference Manual"
-, url = "http://gudhi.gforge.inria.fr/doc/latest/group__bottleneck__distance.html"
-, year = 2016
+, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__bottleneck__distance.html"
+, year = @GUDHI_VERSION_YEAR@
}
@incollection{gudhi:cython
, author = "Vincent Rouvreau"
, title = "Cython interface"
, publisher = "{GUDHI Editorial Board}"
+, edition = "{@GUDHI_VERSION@}"
, booktitle = "{GUDHI} User and Reference Manual"
-, url = "http://gudhi.gforge.inria.fr/python/latest/"
-, year = 2016
+, url = "https://gudhi.inria.fr/python/@GUDHI_VERSION@/"
+, year = @GUDHI_VERSION_YEAR@
}
@incollection{gudhi:CoverComplex
, author = "Mathieu Carri\`ere"
, title = "Cover complex"
, publisher = "{GUDHI Editorial Board}"
+, edition = "{@GUDHI_VERSION@}"
, booktitle = "{GUDHI} User and Reference Manual"
-, url = "http://gudhi.gforge.inria.fr/doc/latest/group__cover__complex.html"
-, year = 2017
+, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group__cover__complex.html"
+, year = @GUDHI_VERSION_YEAR@
}
@incollection{gudhi:PersistenceRepresentations
, author = "Pawel Dlotko"
, title = "Persistence representations"
, publisher = "{GUDHI Editorial Board}"
+, edition = "{@GUDHI_VERSION@}"
, booktitle = "{GUDHI} User and Reference Manual"
-, url = "http://gudhi.gforge.inria.fr/doc/latest/group___persistence__representations.html"
-, year = 2017
+, url = "https://gudhi.inria.fr/doc/@GUDHI_VERSION@/group___persistence__representations.html"
+, year = @GUDHI_VERSION_YEAR@
}
diff --git a/src/Alpha_complex/benchmark/Alpha_complex_3d_benchmark.cpp b/src/Alpha_complex/benchmark/Alpha_complex_3d_benchmark.cpp
index 005a712a..99ad94b9 100644
--- a/src/Alpha_complex/benchmark/Alpha_complex_3d_benchmark.cpp
+++ b/src/Alpha_complex/benchmark/Alpha_complex_3d_benchmark.cpp
@@ -115,7 +115,7 @@ void benchmark_weighted_points_on_torus_3D(const std::string& msg) {
std::cout << " Alpha complex 3d on torus with " << nb_points << " points." << std::endl;
std::vector<K::Point_d> points_on_torus = Gudhi::generate_points_on_torus_3D<K>(nb_points, 1.0, 0.5);
- using Point = typename Weighted_alpha_complex_3d::Point_3;
+ using Point = typename Weighted_alpha_complex_3d::Bare_point_3;
using Weighted_point = typename Weighted_alpha_complex_3d::Weighted_point_3;
std::vector<Weighted_point> points;
@@ -206,7 +206,7 @@ void benchmark_weighted_periodic_points(const std::string& msg) {
std::cout << " Weighted periodic alpha complex 3d with " << nb_points * nb_points * nb_points << " points."
<< std::endl;
- using Point = typename Weighted_periodic_alpha_complex_3d::Point_3;
+ using Point = typename Weighted_periodic_alpha_complex_3d::Bare_point_3;
using Weighted_point = typename Weighted_periodic_alpha_complex_3d::Weighted_point_3;
std::vector<Weighted_point> points;
diff --git a/src/Alpha_complex/doc/Intro_alpha_complex.h b/src/Alpha_complex/doc/Intro_alpha_complex.h
index b075d1fc..a8b1a106 100644
--- a/src/Alpha_complex/doc/Intro_alpha_complex.h
+++ b/src/Alpha_complex/doc/Intro_alpha_complex.h
@@ -31,26 +31,37 @@ namespace alpha_complex {
* circumsphere is empty (the simplex is then said to be Gabriel), and as the minimum of the filtration
* values of the codimension 1 cofaces that make it not Gabriel otherwise.
*
- * All simplices that have a filtration value strictly greater than a given alpha squared value are not inserted into
- * the complex.
+ * All simplices that have a filtration value \f$ > \alpha^2 \f$ are removed from the Delaunay complex
+ * when creating the simplicial complex if it is specified.
*
* \image html "alpha_complex_representation.png" "Alpha-complex representation"
*
* Alpha_complex is constructing a <a target="_blank"
* href="http://doc.cgal.org/latest/Triangulation/index.html#Chapter_Triangulations">Delaunay Triangulation</a>
- * \cite cgal:hdj-t-15b from <a target="_blank" href="http://www.cgal.org/">CGAL</a> (the Computational Geometry
- * Algorithms Library \cite cgal:eb-15b) and is able to create a `SimplicialComplexForAlpha`.
+ * \cite cgal:hdj-t-19b from <a target="_blank" href="http://www.cgal.org/">CGAL</a> (the Computational Geometry
+ * Algorithms Library \cite cgal:eb-19b) and is able to create a `SimplicialComplexForAlpha`.
*
* The complex is a template class requiring an Epick_d <a target="_blank"
* href="http://doc.cgal.org/latest/Kernel_d/index.html#Chapter_dD_Geometry_Kernel">dD Geometry Kernel</a>
- * \cite cgal:s-gkd-15b from CGAL as template parameter.
+ * \cite cgal:s-gkd-19b from CGAL as template parameter.
*
* \remark
- * - When the simplicial complex is constructed with an infinite value of alpha, the complex is a Delaunay
- * complex.
+ * - When an \f$\alpha\f$-complex is constructed with an infinite value of \f$ \alpha^2 \f$, the complex is a Delaunay
+ * complex (with special filtration values).
* - For people only interested in the topology of the \ref alpha_complex (for instance persistence),
* \ref alpha_complex is equivalent to the \ref cech_complex and much smaller if you do not bound the radii.
* \ref cech_complex can still make sense in higher dimension precisely because you can bound the radii.
+ * - Using the default `CGAL::Epeck_d` makes the construction safe. If you pass exact=true to create_complex, the
+ * filtration values are the exact ones converted to the filtration value type of the simplicial complex. This can be
+ * very slow. If you pass exact=false (the default), the filtration values are only guaranteed to have a small
+ * multiplicative error compared to the exact value, see <code><a class="el" target="_blank"
+ * href="https://doc.cgal.org/latest/Number_types/classCGAL_1_1Lazy__exact__nt.html">
+ * CGAL::Lazy_exact_nt<NT>::set_relative_precision_of_to_double</a></code> for details. A drawback, when computing
+ * persistence, is that an empty exact interval [10^12,10^12] may become a non-empty approximate interval
+ * [10^12,10^12+10^6]. Using `CGAL::Epick_d` makes the computations slightly faster, and the combinatorics are still
+ * exact, but the computation of filtration values can exceptionally be arbitrarily bad. In all cases, we still
+ * guarantee that the output is a valid filtration (faces have a filtration value no larger than their cofaces).
+ * - For performances reasons, it is advised to use `Alpha_complex` with \ref cgal &ge; 5.0.0.
*
* \section pointsexample Example from points
*
@@ -124,13 +135,13 @@ namespace alpha_complex {
*
* \subsubsection nondecreasing Non decreasing filtration values
*
- * As the squared radii computed by CGAL are an approximation, it might happen that these alpha squared values do not
- * quite define a proper filtration (i.e. non-decreasing with respect to inclusion).
+ * As the squared radii computed by CGAL are an approximation, it might happen that these \f$ \alpha^2 \f$ values do
+ * not quite define a proper filtration (i.e. non-decreasing with respect to inclusion).
* We fix that up by calling `SimplicialComplexForAlpha::make_filtration_non_decreasing()`.
*
* \subsubsection pruneabove Prune above given filtration value
*
- * The simplex tree is pruned from the given maximum alpha squared value (cf.
+ * The simplex tree is pruned from the given maximum \f$ \alpha^2 \f$ value (cf.
* `SimplicialComplexForAlpha::prune_above_filtration()`).
* In the following example, the value is given by the user as argument of the program.
*
diff --git a/src/Alpha_complex/doc/alpha_complex_representation.ipe b/src/Alpha_complex/doc/alpha_complex_representation.ipe
index e8096b93..40ff1d0f 100644
--- a/src/Alpha_complex/doc/alpha_complex_representation.ipe
+++ b/src/Alpha_complex/doc/alpha_complex_representation.ipe
@@ -1,7 +1,7 @@
<?xml version="1.0"?>
<!DOCTYPE ipe SYSTEM "ipe.dtd">
-<ipe version="70107" creator="Ipe 7.1.10">
-<info created="D:20150603143945" modified="D:20160404172133"/>
+<ipe version="70206" creator="Ipe 7.2.7">
+<info created="D:20150603143945" modified="D:20200110100102"/>
<ipestyle name="basic">
<symbol name="arrow/arc(spx)">
<path stroke="sym-stroke" fill="sym-stroke" pen="sym-pen">
@@ -305,7 +305,7 @@ h
108.275 743.531 m
166.45 743.531 l
</path>
-<text matrix="1 0 0 1 142.618 -109.867" transformations="translations" pos="127.397 746.763" stroke="darkgray" type="label" width="6.41" height="4.289" depth="0" valign="baseline">$\alpha$</text>
+<text matrix="1 0 0 1 126.618 -109.867" transformations="translations" pos="127.397 746.763" stroke="darkgray" type="label" width="45.707" height="9.041" depth="1.32" valign="baseline" style="math">\alpha = \sqrt{32.0}</text>
<use matrix="1 0 0 1 -209.478 12.0238" name="mark/fdisk(sfx)" pos="300 720" size="normal" stroke="black" fill="white"/>
<use matrix="1 0 0 1 -210.178 22.1775" name="mark/fdisk(sfx)" pos="280 660" size="normal" stroke="black" fill="white"/>
<use matrix="1 0 0 1 -210.178 22.1775" name="mark/fdisk(sfx)" pos="370 690" size="normal" stroke="black" fill="white"/>
diff --git a/src/Alpha_complex/doc/alpha_complex_representation.png b/src/Alpha_complex/doc/alpha_complex_representation.png
index 7b81cd69..5ebb1e75 100644
--- a/src/Alpha_complex/doc/alpha_complex_representation.png
+++ b/src/Alpha_complex/doc/alpha_complex_representation.png
Binary files differ
diff --git a/src/Alpha_complex/example/Alpha_complex_from_off.cpp b/src/Alpha_complex/example/Alpha_complex_from_off.cpp
index d411e90a..220a66de 100644
--- a/src/Alpha_complex/example/Alpha_complex_from_off.cpp
+++ b/src/Alpha_complex/example/Alpha_complex_from_off.cpp
@@ -2,8 +2,6 @@
// to construct a simplex_tree from alpha complex
#include <gudhi/Simplex_tree.h>
-#include <CGAL/Epick_d.h>
-
#include <iostream>
#include <string>
@@ -23,22 +21,21 @@ int main(int argc, char **argv) {
// ----------------------------------------------------------------------------
// Init of an alpha complex from an OFF file
// ----------------------------------------------------------------------------
- using Kernel = CGAL::Epick_d< CGAL::Dynamic_dimension_tag >;
- Gudhi::alpha_complex::Alpha_complex<Kernel> alpha_complex_from_file(off_file_name);
+ Gudhi::alpha_complex::Alpha_complex<> alpha_complex_from_file(off_file_name);
- std::streambuf* streambufffer;
+ std::streambuf* streambuffer;
std::ofstream ouput_file_stream;
if (argc == 4) {
ouput_file_stream.open(std::string(argv[3]));
- streambufffer = ouput_file_stream.rdbuf();
+ streambuffer = ouput_file_stream.rdbuf();
} else {
- streambufffer = std::cout.rdbuf();
+ streambuffer = std::cout.rdbuf();
}
Gudhi::Simplex_tree<> simplex;
if (alpha_complex_from_file.create_complex(simplex, alpha_square_max_value)) {
- std::ostream output_stream(streambufffer);
+ std::ostream output_stream(streambuffer);
// ----------------------------------------------------------------------------
// Display information about the alpha complex
diff --git a/src/Alpha_complex/example/Alpha_complex_from_points.cpp b/src/Alpha_complex/example/Alpha_complex_from_points.cpp
index 981aa470..6526ca3a 100644
--- a/src/Alpha_complex/example/Alpha_complex_from_points.cpp
+++ b/src/Alpha_complex/example/Alpha_complex_from_points.cpp
@@ -2,12 +2,13 @@
// to construct a simplex_tree from alpha complex
#include <gudhi/Simplex_tree.h>
-#include <CGAL/Epick_d.h>
+#include <CGAL/Epeck_d.h>
#include <iostream>
#include <vector>
-using Kernel = CGAL::Epick_d< CGAL::Dimension_tag<2> >;
+// Explicit dimension 2 Epeck_d kernel
+using Kernel = CGAL::Epeck_d< CGAL::Dimension_tag<2> >;
using Point = Kernel::Point_d;
using Vector_of_points = std::vector<Point>;
diff --git a/src/Alpha_complex/example/CMakeLists.txt b/src/Alpha_complex/example/CMakeLists.txt
index b069b443..b0337934 100644
--- a/src/Alpha_complex/example/CMakeLists.txt
+++ b/src/Alpha_complex/example/CMakeLists.txt
@@ -5,9 +5,12 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
target_link_libraries(Alpha_complex_example_from_points ${CGAL_LIBRARY})
add_executable ( Alpha_complex_example_from_off Alpha_complex_from_off.cpp )
target_link_libraries(Alpha_complex_example_from_off ${CGAL_LIBRARY})
+ add_executable ( Alpha_complex_example_fast_from_off Fast_alpha_complex_from_off.cpp )
+ target_link_libraries(Alpha_complex_example_fast_from_off ${CGAL_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Alpha_complex_example_from_points ${TBB_LIBRARIES})
target_link_libraries(Alpha_complex_example_from_off ${TBB_LIBRARIES})
+ target_link_libraries(Alpha_complex_example_fast_from_off ${TBB_LIBRARIES})
endif()
add_test(NAME Alpha_complex_example_from_points COMMAND $<TARGET_FILE:Alpha_complex_example_from_points>)
@@ -16,7 +19,13 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
"${CMAKE_SOURCE_DIR}/data/points/alphacomplexdoc.off" "60.0" "${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_result_60.txt")
add_test(NAME Alpha_complex_example_from_off_32 COMMAND $<TARGET_FILE:Alpha_complex_example_from_off>
"${CMAKE_SOURCE_DIR}/data/points/alphacomplexdoc.off" "32.0" "${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_result_32.txt")
- if (DIFF_PATH)
+
+ add_test(NAME Alpha_complex_example_fast_from_off_60 COMMAND $<TARGET_FILE:Alpha_complex_example_fast_from_off>
+ "${CMAKE_SOURCE_DIR}/data/points/alphacomplexdoc.off" "60.0" "${CMAKE_CURRENT_BINARY_DIR}/fastalphaoffreader_result_60.txt")
+ add_test(NAME Alpha_complex_example_fast_from_off_32 COMMAND $<TARGET_FILE:Alpha_complex_example_fast_from_off>
+ "${CMAKE_SOURCE_DIR}/data/points/alphacomplexdoc.off" "32.0" "${CMAKE_CURRENT_BINARY_DIR}/fastalphaoffreader_result_32.txt")
+
+if (DIFF_PATH)
# Do not forget to copy test results files in current binary dir
file(COPY "alphaoffreader_for_doc_32.txt" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
file(COPY "alphaoffreader_for_doc_60.txt" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
@@ -25,6 +34,11 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_result_60.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_60.txt)
add_test(Alpha_complex_example_from_off_32_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_result_32.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_32.txt)
+
+ add_test(Alpha_complex_example_fast_from_off_60_diff_files ${DIFF_PATH}
+ ${CMAKE_CURRENT_BINARY_DIR}/fastalphaoffreader_result_60.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_60.txt)
+ add_test(Alpha_complex_example_fast_from_off_32_diff_files ${DIFF_PATH}
+ ${CMAKE_CURRENT_BINARY_DIR}/fastalphaoffreader_result_32.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_32.txt)
endif()
add_executable ( Alpha_complex_example_weighted_3d_from_points Weighted_alpha_complex_3d_from_points.cpp )
diff --git a/src/Alpha_complex/example/Fast_alpha_complex_from_off.cpp b/src/Alpha_complex/example/Fast_alpha_complex_from_off.cpp
new file mode 100644
index 00000000..f181005a
--- /dev/null
+++ b/src/Alpha_complex/example/Fast_alpha_complex_from_off.cpp
@@ -0,0 +1,65 @@
+#include <gudhi/Alpha_complex.h>
+// to construct a simplex_tree from alpha complex
+#include <gudhi/Simplex_tree.h>
+
+#include <CGAL/Epick_d.h>
+
+#include <iostream>
+#include <string>
+
+void usage(int nbArgs, char * const progName) {
+ std::cerr << "Error: Number of arguments (" << nbArgs << ") is not correct\n";
+ std::cerr << "Usage: " << progName << " filename.off alpha_square_max_value [ouput_file.txt]\n";
+ std::cerr << " i.e.: " << progName << " ../../data/points/alphacomplexdoc.off 60.0\n";
+ exit(-1); // ----- >>
+}
+
+int main(int argc, char **argv) {
+ if ((argc != 3) && (argc != 4)) usage(argc, (argv[0] - 1));
+
+ std::string off_file_name {argv[1]};
+ double alpha_square_max_value {atof(argv[2])};
+
+ // WARNING : CGAL::Epick_d is fast but not safe (unlike CGAL::Epeck_d)
+ // (i.e. when the points are on a grid)
+ using Fast_kernel = CGAL::Epick_d<CGAL::Dynamic_dimension_tag>;
+ // ----------------------------------------------------------------------------
+ // Init of an alpha complex from an OFF file
+ // ----------------------------------------------------------------------------
+ Gudhi::alpha_complex::Alpha_complex<Fast_kernel> alpha_complex_from_file(off_file_name);
+
+ std::streambuf* streambuffer;
+ std::ofstream ouput_file_stream;
+
+ if (argc == 4) {
+ ouput_file_stream.open(std::string(argv[3]));
+ streambuffer = ouput_file_stream.rdbuf();
+ } else {
+ streambuffer = std::cout.rdbuf();
+ }
+
+ Gudhi::Simplex_tree<> simplex;
+ if (alpha_complex_from_file.create_complex(simplex, alpha_square_max_value)) {
+ std::ostream output_stream(streambuffer);
+
+ // ----------------------------------------------------------------------------
+ // Display information about the alpha complex
+ // ----------------------------------------------------------------------------
+ output_stream << "Alpha complex is of dimension " << simplex.dimension() <<
+ " - " << simplex.num_simplices() << " simplices - " <<
+ simplex.num_vertices() << " vertices." << std::endl;
+
+ output_stream << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" <<
+ std::endl;
+ for (auto f_simplex : simplex.filtration_simplex_range()) {
+ output_stream << " ( ";
+ for (auto vertex : simplex.simplex_vertex_range(f_simplex)) {
+ output_stream << vertex << " ";
+ }
+ output_stream << ") -> " << "[" << simplex.filtration(f_simplex) << "] ";
+ output_stream << std::endl;
+ }
+ }
+ ouput_file_stream.close();
+ return 0;
+}
diff --git a/src/Alpha_complex/example/Weighted_alpha_complex_3d_from_points.cpp b/src/Alpha_complex/example/Weighted_alpha_complex_3d_from_points.cpp
index ac11b68c..fcf80802 100644
--- a/src/Alpha_complex/example/Weighted_alpha_complex_3d_from_points.cpp
+++ b/src/Alpha_complex/example/Weighted_alpha_complex_3d_from_points.cpp
@@ -10,7 +10,7 @@
// Complexity = FAST, weighted = true, periodic = false
using Weighted_alpha_complex_3d =
Gudhi::alpha_complex::Alpha_complex_3d<Gudhi::alpha_complex::complexity::SAFE, true, false>;
-using Point = Weighted_alpha_complex_3d::Point_3;
+using Bare_point = Weighted_alpha_complex_3d::Bare_point_3;
using Weighted_point = Weighted_alpha_complex_3d::Weighted_point_3;
int main(int argc, char **argv) {
@@ -18,11 +18,11 @@ int main(int argc, char **argv) {
// Init of a list of points and weights from a small molecule
// ----------------------------------------------------------------------------
std::vector<Weighted_point> weighted_points;
- weighted_points.push_back(Weighted_point(Point(1, -1, -1), 4.));
- weighted_points.push_back(Weighted_point(Point(-1, 1, -1), 4.));
- weighted_points.push_back(Weighted_point(Point(-1, -1, 1), 4.));
- weighted_points.push_back(Weighted_point(Point(1, 1, 1), 4.));
- weighted_points.push_back(Weighted_point(Point(2, 2, 2), 1.));
+ weighted_points.push_back(Weighted_point(Bare_point(1, -1, -1), 4.));
+ weighted_points.push_back(Weighted_point(Bare_point(-1, 1, -1), 4.));
+ weighted_points.push_back(Weighted_point(Bare_point(-1, -1, 1), 4.));
+ weighted_points.push_back(Weighted_point(Bare_point(1, 1, 1), 4.));
+ weighted_points.push_back(Weighted_point(Bare_point(2, 2, 2), 1.));
// ----------------------------------------------------------------------------
// Init of an alpha complex from the list of points
diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex.h b/src/Alpha_complex/include/gudhi/Alpha_complex.h
index 8919cdb9..f2a05e95 100644
--- a/src/Alpha_complex/include/gudhi/Alpha_complex.h
+++ b/src/Alpha_complex/include/gudhi/Alpha_complex.h
@@ -20,11 +20,12 @@
#include <math.h> // isnan, fmax
#include <CGAL/Delaunay_triangulation.h>
-#include <CGAL/Epick_d.h>
+#include <CGAL/Epeck_d.h> // For EXACT or SAFE version
+#include <CGAL/Epick_d.h> // For FAST version
#include <CGAL/Spatial_sort_traits_adapter_d.h>
#include <CGAL/property_map.h> // for CGAL::Identity_property_map
-#include <CGAL/NT_converter.h>
#include <CGAL/version.h> // for CGAL_VERSION_NR
+#include <CGAL/NT_converter.h>
#include <Eigen/src/Core/util/Macros.h> // for EIGEN_VERSION_AT_LEAST
@@ -39,17 +40,20 @@
// Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10
#if CGAL_VERSION_NR < 1041101000
-# error Alpha_complex_3d is only available for CGAL >= 4.11
+# error Alpha_complex is only available for CGAL >= 4.11
#endif
#if !EIGEN_VERSION_AT_LEAST(3,1,0)
-# error Alpha_complex_3d is only available for Eigen3 >= 3.1.0 installed with CGAL
+# error Alpha_complex is only available for Eigen3 >= 3.1.0 installed with CGAL
#endif
namespace Gudhi {
namespace alpha_complex {
+template<typename D> struct Is_Epeck_D { static const bool value = false; };
+template<typename D> struct Is_Epeck_D<CGAL::Epeck_d<D>> { static const bool value = true; };
+
/**
* \class Alpha_complex Alpha_complex.h gudhi/Alpha_complex.h
* \brief Alpha complex data structure.
@@ -63,17 +67,31 @@ namespace alpha_complex {
*
* Please refer to \ref alpha_complex for examples.
*
- * The complex is a template class requiring an Epick_d <a target="_blank"
+ * The complex is a template class requiring an <a target="_blank"
+ * href="https://doc.cgal.org/latest/Kernel_d/structCGAL_1_1Epeck__d.html">CGAL::Epeck_d</a>,
+ * or an <a target="_blank"
+ * href="https://doc.cgal.org/latest/Kernel_d/structCGAL_1_1Epick__d.html">CGAL::Epick_d</a> <a target="_blank"
* href="http://doc.cgal.org/latest/Kernel_d/index.html#Chapter_dD_Geometry_Kernel">dD Geometry Kernel</a>
- * \cite cgal:s-gkd-15b from CGAL as template, default value is <a target="_blank"
- * href="http://doc.cgal.org/latest/Kernel_d/classCGAL_1_1Epick__d.html">CGAL::Epick_d</a>
+ * \cite cgal:s-gkd-19b from CGAL as template, default value is <a target="_blank"
+ * href="https://doc.cgal.org/latest/Kernel_d/structCGAL_1_1Epeck__d.html">CGAL::Epeck_d</a>
* < <a target="_blank" href="http://doc.cgal.org/latest/Kernel_23/classCGAL_1_1Dynamic__dimension__tag.html">
* CGAL::Dynamic_dimension_tag </a> >
*
- * \remark When Alpha_complex is constructed with an infinite value of alpha, the complex is a Delaunay complex.
- *
+ * \remark
+ * - When Alpha_complex is constructed with an infinite value of alpha, the complex is a Delaunay complex.
+ * - Using the default `CGAL::Epeck_d` makes the construction safe. If you pass exact=true to create_complex, the
+ * filtration values are the exact ones converted to the filtration value type of the simplicial complex. This can be
+ * very slow. If you pass exact=false (the default), the filtration values are only guaranteed to have a small
+ * multiplicative error compared to the exact value, see <code><a class="el" target="_blank"
+ * href="https://doc.cgal.org/latest/Number_types/classCGAL_1_1Lazy__exact__nt.html">
+ * CGAL::Lazy_exact_nt<NT>::set_relative_precision_of_to_double</a></code> for details. A drawback, when computing
+ * persistence, is that an empty exact interval [10^12,10^12] may become a non-empty approximate interval
+ * [10^12,10^12+10^6]. Using `CGAL::Epick_d` makes the computations slightly faster, and the combinatorics are still
+ * exact, but the computation of filtration values can exceptionally be arbitrarily bad. In all cases, we still
+ * guarantee that the output is a valid filtration (faces have a filtration value no larger than their cofaces).
+ * - For performances reasons, it is advised to use `Alpha_complex` with \ref cgal &ge; 5.0.0.
*/
-template<class Kernel = CGAL::Epick_d<CGAL::Dynamic_dimension_tag>>
+template<class Kernel = CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>>
class Alpha_complex {
public:
// Add an int in TDS to save point index in the structure
@@ -103,8 +121,8 @@ class Alpha_complex {
// size_type type from CGAL.
typedef typename Delaunay_triangulation::size_type size_type;
- // Map type to switch from simplex tree vertex handle to CGAL vertex iterator.
- typedef typename std::map< std::size_t, CGAL_vertex_iterator > Vector_vertex_iterator;
+ // Structure to switch from simplex tree vertex handle to CGAL vertex iterator.
+ typedef typename std::vector< CGAL_vertex_iterator > Vector_vertex_iterator;
private:
/** \brief Vertex iterator vector to switch from simplex tree vertex handle to CGAL vertex iterator.
@@ -173,17 +191,15 @@ class Alpha_complex {
return vertex_handle_to_iterator_.at(vertex)->point();
}
- /** \brief number_of_vertices returns the number of vertices (same as the number of points).
- *
- * @return The number of vertices.
- */
- std::size_t number_of_vertices() const {
- return vertex_handle_to_iterator_.size();
- }
-
private:
template<typename InputPointRange >
void init_from_range(const InputPointRange& points) {
+ #if CGAL_VERSION_NR < 1050000000
+ if (Is_Epeck_D<Kernel>::value)
+ std::cerr << "It is strongly advised to use a CGAL version >= 5.0 with Epeck_d Kernel for performance reasons."
+ << std::endl;
+ #endif
+
auto first = std::begin(points);
auto last = std::end(points);
@@ -214,14 +230,16 @@ class Alpha_complex {
hint = pos->full_cell();
}
// --------------------------------------------------------------------------------------------
- // double map to retrieve simplex tree vertex handles from CGAL vertex iterator and vice versa
+ // structure to retrieve CGAL points from vertex handle - one vertex handle per point.
+ // Needs to be constructed before as vertex handles arrives in no particular order.
+ vertex_handle_to_iterator_.resize(point_cloud.size());
// Loop on triangulation vertices list
for (CGAL_vertex_iterator vit = triangulation_->vertices_begin(); vit != triangulation_->vertices_end(); ++vit) {
if (!triangulation_->is_infinite(*vit)) {
#ifdef DEBUG_TRACES
std::cout << "Vertex insertion - " << vit->data() << " -> " << vit->point() << std::endl;
#endif // DEBUG_TRACES
- vertex_handle_to_iterator_.emplace(vit->data(), vit);
+ vertex_handle_to_iterator_[vit->data()] = vit;
}
}
// --------------------------------------------------------------------------------------------
@@ -237,6 +255,8 @@ class Alpha_complex {
* @param[in] complex SimplicialComplexForAlpha to be created.
* @param[in] max_alpha_square maximum for alpha square value. Default value is +\f$\infty\f$, and there is very
* little point using anything else since it does not save time.
+ * @param[in] exact Exact filtration values computation. Not exact if `Kernel` is not <a target="_blank"
+ * href="https://doc.cgal.org/latest/Kernel_d/structCGAL_1_1Epeck__d.html">CGAL::Epeck_d</a>.
*
* @return true if creation succeeds, false otherwise.
*
@@ -248,7 +268,8 @@ class Alpha_complex {
template <typename SimplicialComplexForAlpha,
typename Filtration_value = typename SimplicialComplexForAlpha::Filtration_value>
bool create_complex(SimplicialComplexForAlpha& complex,
- Filtration_value max_alpha_square = std::numeric_limits<Filtration_value>::infinity()) {
+ Filtration_value max_alpha_square = std::numeric_limits<Filtration_value>::infinity(),
+ bool exact = false) {
// From SimplicialComplexForAlpha type required to insert into a simplicial complex (with or without subfaces).
typedef typename SimplicialComplexForAlpha::Vertex_handle Vertex_handle;
typedef typename SimplicialComplexForAlpha::Simplex_handle Simplex_handle;
@@ -324,9 +345,13 @@ class Alpha_complex {
if (f_simplex_dim > 0) {
// squared_radius function initialization
Squared_Radius squared_radius = kernel_.compute_squared_radius_d_object();
- CGAL::NT_converter<typename Geom_traits::FT, Filtration_value> cv;
- alpha_complex_filtration = cv(squared_radius(pointVector.begin(), pointVector.end()));
+ CGAL::NT_converter<typename Geom_traits::FT, Filtration_value> cv;
+ auto sqrad = squared_radius(pointVector.begin(), pointVector.end());
+#if CGAL_VERSION_NR >= 1050000000
+ if(exact) CGAL::exact(sqrad);
+#endif
+ alpha_complex_filtration = cv(sqrad);
}
complex.assign_filtration(f_simplex, alpha_complex_filtration);
#ifdef DEBUG_TRACES
diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
index 13ebb9c1..7f96c94c 100644
--- a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
+++ b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
@@ -43,7 +43,7 @@
#include <vector>
#include <unordered_map>
#include <stdexcept>
-#include <cstddef>
+#include <cstddef> // for std::size_t
#include <memory> // for std::unique_ptr
#include <type_traits> // for std::conditional and std::enable_if
#include <limits> // for numeric_limits<>
@@ -97,7 +97,7 @@ struct Value_from_iterator<complexity::EXACT> {
* \details
* The data structure is constructing a <a href="https://doc.cgal.org/latest/Alpha_shapes_3/index.html">CGAL 3D Alpha
* Shapes</a> from a range of points (can be read from an OFF file, cf. Points_off_reader).
- * Duplicate points are inserted once in the Alpha_complex. This is the reason why the vertices may be not contiguous.
+ * Duplicate points are inserted once in the Alpha_complex.
*
* \tparam Complexity shall be `Gudhi::alpha_complex::complexity` type. Default value is
* `Gudhi::alpha_complex::complexity::SAFE`.
@@ -225,23 +225,23 @@ class Alpha_complex_3d {
* Must be compatible with double. */
using FT = typename Alpha_shape_3::FT;
- /** \brief Gives public access to the Point_3 type. Here is a Point_3 constructor example:
+ /** \brief Gives public access to the Bare_point_3 (bare aka. unweighed) type.
+ * Here is a Bare_point_3 constructor example:
\code{.cpp}
using Alpha_complex_3d = Gudhi::alpha_complex::Alpha_complex_3d<Gudhi::alpha_complex::complexity::SAFE, false, false>;
// x0 = 1., y0 = -1.1, z0 = -1..
-Alpha_complex_3d::Point_3 p0(1., -1.1, -1.);
+Alpha_complex_3d::Bare_point_3 p0(1., -1.1, -1.);
\endcode
* */
- using Point_3 = typename Kernel::Point_3;
+ using Bare_point_3 = typename Kernel::Point_3;
/** \brief Gives public access to the Weighted_point_3 type. A Weighted point can be constructed as follows:
\code{.cpp}
-using Weighted_alpha_complex_3d =
- Gudhi::alpha_complex::Alpha_complex_3d<Gudhi::alpha_complex::complexity::SAFE, true, false>;
+using Weighted_alpha_complex_3d = Gudhi::alpha_complex::Alpha_complex_3d<Gudhi::alpha_complex::complexity::SAFE, true, false>;
// x0 = 1., y0 = -1.1, z0 = -1., weight = 4.
-Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point_3(1., -1.1, -1.), 4.);
+Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Bare_point_3(1., -1.1, -1.), 4.);
\endcode
*
* Note: This type is defined to void if Alpha complex is not weighted.
@@ -249,6 +249,11 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
* */
using Weighted_point_3 = typename Triangulation_3<Kernel, Tds, Weighted, Periodic>::Weighted_point_3;
+ /** \brief `Alpha_complex_3d::Point_3` type is either a `Alpha_complex_3d::Bare_point_3` (Weighted = false) or a
+ * `Alpha_complex_3d::Weighted_point_3` (Weighted = true).
+ */
+ using Point_3 = typename Alpha_shape_3::Point;
+
private:
using Dispatch =
CGAL::Dispatch_output_iterator<CGAL::cpp11::tuple<CGAL::Object, FT>,
@@ -264,13 +269,12 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
public:
/** \brief Alpha_complex constructor from a list of points.
*
- * @param[in] points Range of points to triangulate. Points must be in `Alpha_complex_3d::Point_3` or
- * `Alpha_complex_3d::Weighted_point_3`.
+ * @param[in] points Range of points to triangulate. Points must be in `Alpha_complex_3d::Point_3`.
*
* @pre Available if Alpha_complex_3d is not Periodic.
*
* The type InputPointRange must be a range for which std::begin and std::end return input iterators on a
- * `Alpha_complex_3d::Point_3` or a `Alpha_complex_3d::Weighted_point_3`.
+ * `Alpha_complex_3d::Point_3`.
*/
template <typename InputPointRange>
Alpha_complex_3d(const InputPointRange& points) {
@@ -284,13 +288,13 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
*
* @exception std::invalid_argument In debug mode, if points and weights do not have the same size.
*
- * @param[in] points Range of points to triangulate. Points must be in `Alpha_complex_3d::Point_3`.
+ * @param[in] points Range of points to triangulate. Points must be in `Alpha_complex_3d::Bare_point_3`.
* @param[in] weights Range of weights on points. Weights shall be in double.
*
* @pre Available if Alpha_complex_3d is Weighted and not Periodic.
*
* The type InputPointRange must be a range for which std::begin and
- * std::end return input iterators on a `Alpha_complex_3d::Point_3`.
+ * std::end return input iterators on a `Alpha_complex_3d::Bare_point_3`.
* The type WeightRange must be a range for which std::begin and
* std::end return an input iterator on a double.
*/
@@ -318,8 +322,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
*
* @exception std::invalid_argument In debug mode, if the size of the cuboid in every directions is not the same.
*
- * @param[in] points Range of points to triangulate. Points must be in `Alpha_complex_3d::Point_3` or
- * `Alpha_complex_3d::Weighted_point_3`.
+ * @param[in] points Range of points to triangulate. Points must be in `Alpha_complex_3d::Point_3`.
* @param[in] x_min Iso-oriented cuboid x_min.
* @param[in] y_min Iso-oriented cuboid y_min.
* @param[in] z_min Iso-oriented cuboid z_min.
@@ -330,7 +333,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
* @pre Available if Alpha_complex_3d is Periodic.
*
* The type InputPointRange must be a range for which std::begin and std::end return input iterators on a
- * `Alpha_complex_3d::Point_3` or a `Alpha_complex_3d::Weighted_point_3`.
+ * `Alpha_complex_3d::Point_3`.
*
* @note In weighted version, please check weights are greater than zero, and lower than 1/64*cuboid length
* squared.
@@ -366,7 +369,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
* @exception std::invalid_argument In debug mode, if a weight is negative, zero, or greater than 1/64*cuboid length
* squared.
*
- * @param[in] points Range of points to triangulate. Points must be in `Alpha_complex_3d::Point_3`.
+ * @param[in] points Range of points to triangulate. Points must be in `Alpha_complex_3d::Bare_point_3`.
* @param[in] weights Range of weights on points. Weights shall be in double.
* @param[in] x_min Iso-oriented cuboid x_min.
* @param[in] y_min Iso-oriented cuboid y_min.
@@ -378,7 +381,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
* @pre Available if Alpha_complex_3d is Weighted and Periodic.
*
* The type InputPointRange must be a range for which std::begin and
- * std::end return input iterators on a `Alpha_complex_3d::Point_3`.
+ * std::end return input iterators on a `Alpha_complex_3d::Bare_point_3`.
* The type WeightRange must be a range for which std::begin and
* std::end return an input iterator on a double.
* The type of x_min, y_min, z_min, x_max, y_max and z_max must be a double.
@@ -452,9 +455,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
return false; // ----- >>
}
- // using Filtration_value = typename SimplicialComplexForAlpha3d::Filtration_value;
using Complex_vertex_handle = typename SimplicialComplexForAlpha3d::Vertex_handle;
- using Alpha_shape_simplex_tree_map = std::unordered_map<Alpha_vertex_handle, Complex_vertex_handle>;
using Simplex_tree_vector_vertex = std::vector<Complex_vertex_handle>;
#ifdef DEBUG_TRACES
@@ -474,7 +475,6 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
std::cout << "filtration_with_alpha_values returns : " << objects.size() << " objects" << std::endl;
#endif // DEBUG_TRACES
- Alpha_shape_simplex_tree_map map_cgal_simplex_tree;
using Alpha_value_iterator = typename std::vector<FT>::const_iterator;
Alpha_value_iterator alpha_value_iterator = alpha_values.begin();
for (auto object_iterator : objects) {
@@ -484,7 +484,8 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
if (const Cell_handle* cell = CGAL::object_cast<Cell_handle>(&object_iterator)) {
for (auto i = 0; i < 4; i++) {
#ifdef DEBUG_TRACES
- std::cout << "from cell[" << i << "]=" << (*cell)->vertex(i)->point() << std::endl;
+ std::cout << "from cell[" << i << "] - Point coordinates (" << (*cell)->vertex(i)->point() << ")"
+ << std::endl;
#endif // DEBUG_TRACES
vertex_list.push_back((*cell)->vertex(i));
}
@@ -495,7 +496,8 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
for (auto i = 0; i < 4; i++) {
if ((*facet).second != i) {
#ifdef DEBUG_TRACES
- std::cout << "from facet=[" << i << "]" << (*facet).first->vertex(i)->point() << std::endl;
+ std::cout << "from facet=[" << i << "] - Point coordinates (" << (*facet).first->vertex(i)->point() << ")"
+ << std::endl;
#endif // DEBUG_TRACES
vertex_list.push_back((*facet).first->vertex(i));
}
@@ -506,7 +508,8 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
} else if (const Edge* edge = CGAL::object_cast<Edge>(&object_iterator)) {
for (auto i : {(*edge).second, (*edge).third}) {
#ifdef DEBUG_TRACES
- std::cout << "from edge[" << i << "]=" << (*edge).first->vertex(i)->point() << std::endl;
+ std::cout << "from edge[" << i << "] - Point coordinates (" << (*edge).first->vertex(i)->point() << ")"
+ << std::endl;
#endif // DEBUG_TRACES
vertex_list.push_back((*edge).first->vertex(i));
}
@@ -516,7 +519,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
} else if (const Alpha_vertex_handle* vertex = CGAL::object_cast<Alpha_vertex_handle>(&object_iterator)) {
#ifdef DEBUG_TRACES
count_vertices++;
- std::cout << "from vertex=" << (*vertex)->point() << std::endl;
+ std::cout << "from vertex - Point coordinates (" << (*vertex)->point() << ")" << std::endl;
#endif // DEBUG_TRACES
vertex_list.push_back((*vertex));
}
@@ -528,7 +531,8 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
// alpha shape not found
Complex_vertex_handle vertex = map_cgal_simplex_tree.size();
#ifdef DEBUG_TRACES
- std::cout << "vertex [" << the_alpha_shape_vertex->point() << "] not found - insert " << vertex << std::endl;
+ std::cout << "Point (" << the_alpha_shape_vertex->point() << ") not found - insert new vertex id " << vertex
+ << std::endl;
#endif // DEBUG_TRACES
the_simplex.push_back(vertex);
map_cgal_simplex_tree.emplace(the_alpha_shape_vertex, vertex);
@@ -536,7 +540,7 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
// alpha shape found
Complex_vertex_handle vertex = the_map_iterator->second;
#ifdef DEBUG_TRACES
- std::cout << "vertex [" << the_alpha_shape_vertex->point() << "] found in " << vertex << std::endl;
+ std::cout << "Point (" << the_alpha_shape_vertex->point() << ") found as vertex id " << vertex << std::endl;
#endif // DEBUG_TRACES
the_simplex.push_back(vertex);
}
@@ -567,9 +571,32 @@ Weighted_alpha_complex_3d::Weighted_point_3 wp0(Weighted_alpha_complex_3d::Point
return true;
}
+ /** \brief get_point returns the point corresponding to the vertex given as parameter.
+ *
+ * @param[in] vertex Vertex handle of the point to retrieve.
+ * @return The point found.
+ * @exception std::out_of_range In case vertex is not found (cf. std::vector::at).
+ */
+ const Point_3& get_point(std::size_t vertex) {
+ if (map_cgal_simplex_tree.size() != cgal_vertex_iterator_vector.size()) {
+ cgal_vertex_iterator_vector.resize(map_cgal_simplex_tree.size());
+ for (auto map_iterator : map_cgal_simplex_tree) {
+ cgal_vertex_iterator_vector[map_iterator.second] = map_iterator.first;
+ }
+
+ }
+ auto cgal_vertex_iterator = cgal_vertex_iterator_vector.at(vertex);
+ return cgal_vertex_iterator->point();
+ }
+
private:
// use of a unique_ptr on cgal Alpha_shape_3, as copy and default constructor is not available - no need to be freed
std::unique_ptr<Alpha_shape_3> alpha_shape_3_ptr_;
+
+ // Map type to switch from CGAL vertex iterator to simplex tree vertex handle.
+ std::unordered_map<Alpha_vertex_handle, std::size_t> map_cgal_simplex_tree;
+ // Vector type to switch from simplex tree vertex handle to CGAL vertex iterator.
+ std::vector<Alpha_vertex_handle> cgal_vertex_iterator_vector;
};
} // namespace alpha_complex
diff --git a/src/Alpha_complex/test/Alpha_complex_3d_unit_test.cpp b/src/Alpha_complex/test/Alpha_complex_3d_unit_test.cpp
index 1102838a..cd698a27 100644
--- a/src/Alpha_complex/test/Alpha_complex_3d_unit_test.cpp
+++ b/src/Alpha_complex/test/Alpha_complex_3d_unit_test.cpp
@@ -56,21 +56,52 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) {
// -----------------
std::cout << "Fast alpha complex 3d" << std::endl;
- Fast_alpha_complex_3d alpha_complex(get_points<Fast_alpha_complex_3d::Point_3>());
+ std::vector<Fast_alpha_complex_3d::Bare_point_3> points = get_points<Fast_alpha_complex_3d::Bare_point_3>();
+ Fast_alpha_complex_3d alpha_complex(points);
Gudhi::Simplex_tree<> stree;
alpha_complex.create_complex(stree);
+ for (std::size_t index = 0; index < points.size(); index++) {
+ bool found = false;
+ for (auto point : points) {
+ if (point == alpha_complex.get_point(index)) {
+ found = true;
+ break;
+ }
+ }
+ // Check all points from alpha complex are found in the input point cloud
+ BOOST_CHECK(found);
+ }
+ // Exception if we go out of range
+ BOOST_CHECK_THROW(alpha_complex.get_point(points.size()), std::out_of_range);
+
// -----------------
// Exact version
// -----------------
std::cout << "Exact alpha complex 3d" << std::endl;
- Exact_alpha_complex_3d exact_alpha_complex(get_points<Exact_alpha_complex_3d::Point_3>());
+ std::vector<Exact_alpha_complex_3d::Bare_point_3> exact_points = get_points<Exact_alpha_complex_3d::Bare_point_3>();
+ Exact_alpha_complex_3d exact_alpha_complex(exact_points);
Gudhi::Simplex_tree<> exact_stree;
exact_alpha_complex.create_complex(exact_stree);
+ for (std::size_t index = 0; index < exact_points.size(); index++) {
+ bool found = false;
+ Exact_alpha_complex_3d::Bare_point_3 ap = exact_alpha_complex.get_point(index);
+ for (auto point : points) {
+ if ((point.x() == ap.x()) && (point.y() == ap.y()) && (point.z() == ap.z())) {
+ found = true;
+ break;
+ }
+ }
+ // Check all points from alpha complex are found in the input point cloud
+ BOOST_CHECK(found);
+ }
+ // Exception if we go out of range
+ BOOST_CHECK_THROW(exact_alpha_complex.get_point(exact_points.size()), std::out_of_range);
+
// ---------------------
// Compare both versions
// ---------------------
@@ -110,11 +141,27 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_3d_from_points) {
// -----------------
std::cout << "Safe alpha complex 3d" << std::endl;
- Safe_alpha_complex_3d safe_alpha_complex(get_points<Safe_alpha_complex_3d::Point_3>());
+ std::vector<Safe_alpha_complex_3d::Bare_point_3> safe_points = get_points<Safe_alpha_complex_3d::Bare_point_3>();
+ Safe_alpha_complex_3d safe_alpha_complex(safe_points);
Gudhi::Simplex_tree<> safe_stree;
safe_alpha_complex.create_complex(safe_stree);
+ for (std::size_t index = 0; index < safe_points.size(); index++) {
+ bool found = false;
+ Safe_alpha_complex_3d::Bare_point_3 ap = safe_alpha_complex.get_point(index);
+ for (auto point : points) {
+ if ((point.x() == ap.x()) && (point.y() == ap.y()) && (point.z() == ap.z())) {
+ found = true;
+ break;
+ }
+ }
+ // Check all points from alpha complex are found in the input point cloud
+ BOOST_CHECK(found);
+ }
+ // Exception if we go out of range
+ BOOST_CHECK_THROW(safe_alpha_complex.get_point(safe_points.size()), std::out_of_range);
+
// ---------------------
// Compare both versions
// ---------------------
diff --git a/src/Alpha_complex/test/Alpha_complex_unit_test.cpp b/src/Alpha_complex/test/Alpha_complex_unit_test.cpp
index 01e4cee3..27b671dd 100644
--- a/src/Alpha_complex/test/Alpha_complex_unit_test.cpp
+++ b/src/Alpha_complex/test/Alpha_complex_unit_test.cpp
@@ -15,6 +15,7 @@
#include <CGAL/Delaunay_triangulation.h>
#include <CGAL/Epick_d.h>
+#include <CGAL/Epeck_d.h>
#include <cmath> // float comparison
#include <limits>
@@ -28,12 +29,16 @@
#include <gudhi/Unitary_tests_utils.h>
// Use dynamic_dimension_tag for the user to be able to set dimension
-typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Kernel_d;
+typedef CGAL::Epeck_d< CGAL::Dynamic_dimension_tag > Exact_kernel_d;
// Use static dimension_tag for the user not to be able to set dimension
-typedef CGAL::Epick_d< CGAL::Dimension_tag<3> > Kernel_s;
+typedef CGAL::Epeck_d< CGAL::Dimension_tag<3> > Exact_kernel_s;
+// Use dynamic_dimension_tag for the user to be able to set dimension
+typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Inexact_kernel_d;
+// Use static dimension_tag for the user not to be able to set dimension
+typedef CGAL::Epick_d< CGAL::Dimension_tag<3> > Inexact_kernel_s;
// The triangulation uses the default instantiation of the TriangulationDataStructure template parameter
-typedef boost::mpl::list<Kernel_d, Kernel_s> list_of_kernel_variants;
+typedef boost::mpl::list<Exact_kernel_d, Exact_kernel_s, Inexact_kernel_d, Inexact_kernel_s> list_of_kernel_variants;
BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_OFF_file, TestedKernel, list_of_kernel_variants) {
// ----------------------------------------------------------------------------
@@ -48,20 +53,12 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_OFF_file, TestedKernel, list_of
Gudhi::alpha_complex::Alpha_complex<TestedKernel> alpha_complex_from_file(off_file_name);
- std::cout << "alpha_complex_from_points.number_of_vertices()=" << alpha_complex_from_file.number_of_vertices()
- << std::endl;
- BOOST_CHECK(alpha_complex_from_file.number_of_vertices() == 7);
-
Gudhi::Simplex_tree<> simplex_tree_60;
BOOST_CHECK(alpha_complex_from_file.create_complex(simplex_tree_60, max_alpha_square_value));
std::cout << "simplex_tree_60.dimension()=" << simplex_tree_60.dimension() << std::endl;
BOOST_CHECK(simplex_tree_60.dimension() == 2);
- std::cout << "alpha_complex_from_points.number_of_vertices()=" << alpha_complex_from_file.number_of_vertices()
- << std::endl;
- BOOST_CHECK(alpha_complex_from_file.number_of_vertices() == 7);
-
std::cout << "simplex_tree_60.num_vertices()=" << simplex_tree_60.num_vertices() << std::endl;
BOOST_CHECK(simplex_tree_60.num_vertices() == 7);
@@ -86,7 +83,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_OFF_file, TestedKernel, list_of
}
// Use static dimension_tag for the user not to be able to set dimension
-typedef CGAL::Epick_d< CGAL::Dimension_tag<4> > Kernel_4;
+typedef CGAL::Epeck_d< CGAL::Dimension_tag<4> > Kernel_4;
typedef Kernel_4::Point_d Point_4;
typedef std::vector<Point_4> Vector_4_Points;
@@ -123,10 +120,6 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_from_points) {
Gudhi::Simplex_tree<> simplex_tree;
BOOST_CHECK(alpha_complex_from_points.create_complex(simplex_tree));
- std::cout << "alpha_complex_from_points.number_of_vertices()=" << alpha_complex_from_points.number_of_vertices()
- << std::endl;
- BOOST_CHECK(alpha_complex_from_points.number_of_vertices() == points.size());
-
// Another way to check num_simplices
std::cout << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl;
int num_simplices = 0;
@@ -146,7 +139,7 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_from_points) {
std::cout << "simplex_tree.dimension()=" << simplex_tree.dimension() << std::endl;
BOOST_CHECK(simplex_tree.dimension() == 3);
std::cout << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() << std::endl;
- BOOST_CHECK(simplex_tree.num_vertices() == 4);
+ BOOST_CHECK(simplex_tree.num_vertices() == points.size());
for (auto f_simplex : simplex_tree.filtration_simplex_range()) {
switch (simplex_tree.dimension(f_simplex)) {
@@ -256,10 +249,6 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_empty_points, TestedKernel, lis
Gudhi::Simplex_tree<> simplex_tree;
BOOST_CHECK(!alpha_complex_from_points.create_complex(simplex_tree));
- std::cout << "alpha_complex_from_points.number_of_vertices()=" << alpha_complex_from_points.number_of_vertices()
- << std::endl;
- BOOST_CHECK(alpha_complex_from_points.number_of_vertices() == points.size());
-
std::cout << "simplex_tree.num_simplices()=" << simplex_tree.num_simplices() << std::endl;
BOOST_CHECK(simplex_tree.num_simplices() == 0);
@@ -267,5 +256,45 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_empty_points, TestedKernel, lis
BOOST_CHECK(simplex_tree.dimension() == -1);
std::cout << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices() << std::endl;
- BOOST_CHECK(simplex_tree.num_vertices() == 0);
+ BOOST_CHECK(simplex_tree.num_vertices() == points.size());
+}
+
+using Inexact_kernel_2 = CGAL::Epick_d< CGAL::Dimension_tag<2> >;
+using Exact_kernel_2 = CGAL::Epeck_d< CGAL::Dimension_tag<2> >;
+using list_of_kernel_2_variants = boost::mpl::list<Inexact_kernel_2, Exact_kernel_2>;
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_with_duplicated_points, TestedKernel, list_of_kernel_2_variants) {
+ std::cout << "========== Alpha_complex_with_duplicated_points ==========" << std::endl;
+
+ using Point = typename TestedKernel::Point_d;
+ using Vector_of_points = std::vector<Point>;
+
+ // ----------------------------------------------------------------------------
+ // Init of a list of points
+ // ----------------------------------------------------------------------------
+ Vector_of_points points;
+ points.push_back(Point(1.0, 1.0));
+ points.push_back(Point(7.0, 0.0));
+ points.push_back(Point(4.0, 6.0));
+ points.push_back(Point(9.0, 6.0));
+ points.push_back(Point(0.0, 14.0));
+ points.push_back(Point(2.0, 19.0));
+ points.push_back(Point(9.0, 17.0));
+ // duplicated points
+ points.push_back(Point(1.0, 1.0));
+ points.push_back(Point(7.0, 0.0));
+
+ // ----------------------------------------------------------------------------
+ // Init of an alpha complex from the list of points
+ // ----------------------------------------------------------------------------
+ std::cout << "Init" << std::endl;
+ Gudhi::alpha_complex::Alpha_complex<TestedKernel> alpha_complex_from_points(points);
+
+ Gudhi::Simplex_tree<> simplex_tree;
+ std::cout << "create_complex" << std::endl;
+ BOOST_CHECK(alpha_complex_from_points.create_complex(simplex_tree));
+
+ std::cout << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices()
+ << std::endl;
+ BOOST_CHECK(simplex_tree.num_vertices() < points.size());
}
diff --git a/src/Alpha_complex/test/CMakeLists.txt b/src/Alpha_complex/test/CMakeLists.txt
index ad5b6314..0476c6d4 100644
--- a/src/Alpha_complex/test/CMakeLists.txt
+++ b/src/Alpha_complex/test/CMakeLists.txt
@@ -1,27 +1,27 @@
project(Alpha_complex_tests)
-include(GUDHI_test_coverage)
+include(GUDHI_boost_test)
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
# Do not forget to copy test files in current binary dir
file(COPY "${CMAKE_SOURCE_DIR}/data/points/alphacomplexdoc.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
add_executable ( Alpha_complex_test_unit Alpha_complex_unit_test.cpp )
- target_link_libraries(Alpha_complex_test_unit ${CGAL_LIBRARY} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
+ target_link_libraries(Alpha_complex_test_unit ${CGAL_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Alpha_complex_test_unit ${TBB_LIBRARIES})
endif()
- gudhi_add_coverage_test(Alpha_complex_test_unit)
+ gudhi_add_boost_test(Alpha_complex_test_unit)
add_executable ( Alpha_complex_3d_test_unit Alpha_complex_3d_unit_test.cpp )
- target_link_libraries(Alpha_complex_3d_test_unit ${CGAL_LIBRARY} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
+ target_link_libraries(Alpha_complex_3d_test_unit ${CGAL_LIBRARY})
add_executable ( Weighted_alpha_complex_3d_test_unit Weighted_alpha_complex_3d_unit_test.cpp )
- target_link_libraries(Weighted_alpha_complex_3d_test_unit ${CGAL_LIBRARY} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
+ target_link_libraries(Weighted_alpha_complex_3d_test_unit ${CGAL_LIBRARY})
add_executable ( Periodic_alpha_complex_3d_test_unit Periodic_alpha_complex_3d_unit_test.cpp )
- target_link_libraries(Periodic_alpha_complex_3d_test_unit ${CGAL_LIBRARY} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
+ target_link_libraries(Periodic_alpha_complex_3d_test_unit ${CGAL_LIBRARY})
add_executable ( Weighted_periodic_alpha_complex_3d_test_unit Weighted_periodic_alpha_complex_3d_unit_test.cpp )
- target_link_libraries(Weighted_periodic_alpha_complex_3d_test_unit ${CGAL_LIBRARY} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
+ target_link_libraries(Weighted_periodic_alpha_complex_3d_test_unit ${CGAL_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Alpha_complex_3d_test_unit ${TBB_LIBRARIES})
target_link_libraries(Weighted_alpha_complex_3d_test_unit ${TBB_LIBRARIES})
@@ -29,9 +29,9 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
target_link_libraries(Weighted_periodic_alpha_complex_3d_test_unit ${TBB_LIBRARIES})
endif()
- gudhi_add_coverage_test(Alpha_complex_3d_test_unit)
- gudhi_add_coverage_test(Weighted_alpha_complex_3d_test_unit)
- gudhi_add_coverage_test(Periodic_alpha_complex_3d_test_unit)
- gudhi_add_coverage_test(Weighted_periodic_alpha_complex_3d_test_unit)
+ gudhi_add_boost_test(Alpha_complex_3d_test_unit)
+ gudhi_add_boost_test(Weighted_alpha_complex_3d_test_unit)
+ gudhi_add_boost_test(Periodic_alpha_complex_3d_test_unit)
+ gudhi_add_boost_test(Weighted_periodic_alpha_complex_3d_test_unit)
endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
diff --git a/src/Alpha_complex/test/Periodic_alpha_complex_3d_unit_test.cpp b/src/Alpha_complex/test/Periodic_alpha_complex_3d_unit_test.cpp
index ac3791a4..731763fa 100644
--- a/src/Alpha_complex/test/Periodic_alpha_complex_3d_unit_test.cpp
+++ b/src/Alpha_complex/test/Periodic_alpha_complex_3d_unit_test.cpp
@@ -44,11 +44,11 @@ typedef boost::mpl::list<Fast_periodic_alpha_complex_3d, Safe_periodic_alpha_com
BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_periodic_throw, Periodic_alpha_complex_3d, periodic_variants_type_list) {
std::cout << "Periodic alpha complex 3d exception throw" << std::endl;
- using Point_3 = typename Periodic_alpha_complex_3d::Point_3;
- std::vector<Point_3> p_points;
+ using Bare_point_3 = typename Periodic_alpha_complex_3d::Bare_point_3;
+ std::vector<Bare_point_3> p_points;
// Not important, this is not what we want to check
- p_points.push_back(Point_3(0.0, 0.0, 0.0));
+ p_points.push_back(Bare_point_3(0.0, 0.0, 0.0));
std::cout << "Check exception throw in debug mode" << std::endl;
// Check it throws an exception when the cuboid is not iso
@@ -73,13 +73,13 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_periodic) {
// ---------------------
std::cout << "Fast periodic alpha complex 3d" << std::endl;
- using Creator = CGAL::Creator_uniform_3<double, Fast_periodic_alpha_complex_3d::Point_3>;
+ using Creator = CGAL::Creator_uniform_3<double, Fast_periodic_alpha_complex_3d::Bare_point_3>;
CGAL::Random random(7);
- CGAL::Random_points_in_cube_3<Fast_periodic_alpha_complex_3d::Point_3, Creator> in_cube(1, random);
- std::vector<Fast_periodic_alpha_complex_3d::Point_3> p_points;
+ CGAL::Random_points_in_cube_3<Fast_periodic_alpha_complex_3d::Bare_point_3, Creator> in_cube(1, random);
+ std::vector<Fast_periodic_alpha_complex_3d::Bare_point_3> p_points;
for (int i = 0; i < 50; i++) {
- Fast_periodic_alpha_complex_3d::Point_3 p = *in_cube++;
+ Fast_periodic_alpha_complex_3d::Bare_point_3 p = *in_cube++;
p_points.push_back(p);
}
@@ -88,15 +88,30 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_periodic) {
Gudhi::Simplex_tree<> stree;
periodic_alpha_complex.create_complex(stree);
+ for (std::size_t index = 0; index < p_points.size(); index++) {
+ bool found = false;
+ Fast_periodic_alpha_complex_3d::Bare_point_3 ap = periodic_alpha_complex.get_point(index);
+ for (auto point : p_points) {
+ if ((point.x() == ap.x()) && (point.y() == ap.y()) && (point.z() == ap.z())) {
+ found = true;
+ break;
+ }
+ }
+ // Check all points from alpha complex are found in the input point cloud
+ BOOST_CHECK(found);
+ }
+ // Exception if we go out of range
+ BOOST_CHECK_THROW(periodic_alpha_complex.get_point(p_points.size()), std::out_of_range);
+
// ----------------------
// Exact periodic version
// ----------------------
std::cout << "Exact periodic alpha complex 3d" << std::endl;
- std::vector<Exact_periodic_alpha_complex_3d::Point_3> e_p_points;
+ std::vector<Exact_periodic_alpha_complex_3d::Bare_point_3> e_p_points;
for (auto p : p_points) {
- e_p_points.push_back(Exact_periodic_alpha_complex_3d::Point_3(p[0], p[1], p[2]));
+ e_p_points.push_back(Exact_periodic_alpha_complex_3d::Bare_point_3(p[0], p[1], p[2]));
}
Exact_periodic_alpha_complex_3d exact_alpha_complex(e_p_points, -1., -1., -1., 1., 1., 1.);
@@ -142,10 +157,10 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_periodic) {
// ----------------------
std::cout << "Safe periodic alpha complex 3d" << std::endl;
- std::vector<Safe_periodic_alpha_complex_3d::Point_3> s_p_points;
+ std::vector<Safe_periodic_alpha_complex_3d::Bare_point_3> s_p_points;
for (auto p : p_points) {
- s_p_points.push_back(Safe_periodic_alpha_complex_3d::Point_3(p[0], p[1], p[2]));
+ s_p_points.push_back(Safe_periodic_alpha_complex_3d::Bare_point_3(p[0], p[1], p[2]));
}
Safe_periodic_alpha_complex_3d safe_alpha_complex(s_p_points, -1., -1., -1., 1., 1., 1.);
diff --git a/src/Alpha_complex/test/Weighted_alpha_complex_3d_unit_test.cpp b/src/Alpha_complex/test/Weighted_alpha_complex_3d_unit_test.cpp
index 44deb930..8035f6e8 100644
--- a/src/Alpha_complex/test/Weighted_alpha_complex_3d_unit_test.cpp
+++ b/src/Alpha_complex/test/Weighted_alpha_complex_3d_unit_test.cpp
@@ -43,14 +43,14 @@ typedef boost::mpl::list<Fast_weighted_alpha_complex_3d, Safe_weighted_alpha_com
#ifdef GUDHI_DEBUG
BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted_throw, Weighted_alpha_complex_3d, weighted_variants_type_list) {
- using Point_3 = typename Weighted_alpha_complex_3d::Point_3;
- std::vector<Point_3> w_points;
- w_points.push_back(Point_3(0.0, 0.0, 0.0));
- w_points.push_back(Point_3(0.0, 0.0, 0.2));
- w_points.push_back(Point_3(0.2, 0.0, 0.2));
- // w_points.push_back(Point_3(0.6, 0.6, 0.0));
- // w_points.push_back(Point_3(0.8, 0.8, 0.2));
- // w_points.push_back(Point_3(0.2, 0.8, 0.6));
+ using Bare_point_3 = typename Weighted_alpha_complex_3d::Bare_point_3;
+ std::vector<Bare_point_3> w_points;
+ w_points.push_back(Bare_point_3(0.0, 0.0, 0.0));
+ w_points.push_back(Bare_point_3(0.0, 0.0, 0.2));
+ w_points.push_back(Bare_point_3(0.2, 0.0, 0.2));
+ // w_points.push_back(Bare_point_3(0.6, 0.6, 0.0));
+ // w_points.push_back(Bare_point_3(0.8, 0.8, 0.2));
+ // w_points.push_back(Bare_point_3(0.2, 0.8, 0.6));
// weights size is different from w_points size to make weighted Alpha_complex_3d throw in debug mode
std::vector<double> weights = {0.01, 0.005, 0.006, 0.01, 0.009, 0.001};
@@ -62,14 +62,14 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted_throw, Weighted_alpha_compl
BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted, Weighted_alpha_complex_3d, weighted_variants_type_list) {
std::cout << "Weighted alpha complex 3d from points and weights" << std::endl;
- using Point_3 = typename Weighted_alpha_complex_3d::Point_3;
- std::vector<Point_3> w_points;
- w_points.push_back(Point_3(0.0, 0.0, 0.0));
- w_points.push_back(Point_3(0.0, 0.0, 0.2));
- w_points.push_back(Point_3(0.2, 0.0, 0.2));
- w_points.push_back(Point_3(0.6, 0.6, 0.0));
- w_points.push_back(Point_3(0.8, 0.8, 0.2));
- w_points.push_back(Point_3(0.2, 0.8, 0.6));
+ using Bare_point_3 = typename Weighted_alpha_complex_3d::Bare_point_3;
+ std::vector<Bare_point_3> w_points;
+ w_points.push_back(Bare_point_3(0.0, 0.0, 0.0));
+ w_points.push_back(Bare_point_3(0.0, 0.0, 0.2));
+ w_points.push_back(Bare_point_3(0.2, 0.0, 0.2));
+ w_points.push_back(Bare_point_3(0.6, 0.6, 0.0));
+ w_points.push_back(Bare_point_3(0.8, 0.8, 0.2));
+ w_points.push_back(Bare_point_3(0.2, 0.8, 0.6));
// weights size is different from w_points size to make weighted Alpha_complex_3d throw in debug mode
std::vector<double> weights = {0.01, 0.005, 0.006, 0.01, 0.009, 0.001};
@@ -91,6 +91,24 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted, Weighted_alpha_complex_3d,
Gudhi::Simplex_tree<> stree_bis;
alpha_complex_w_p.create_complex(stree_bis);
+ for (std::size_t index = 0; index < weighted_points.size(); index++) {
+ bool found = false;
+ Weighted_point_3 awp = alpha_complex_w_p.get_point(index);
+ for (auto weighted_point : weighted_points) {
+ if ((weighted_point.weight() == awp.weight()) &&
+ (weighted_point.x() == awp.x()) &&
+ (weighted_point.y() == awp.y()) &&
+ (weighted_point.z() == awp.z())) {
+ found = true;
+ break;
+ }
+ }
+ // Check all points from alpha complex are found in the input point cloud
+ BOOST_CHECK(found);
+ }
+ // Exception if we go out of range
+ BOOST_CHECK_THROW(alpha_complex_w_p.get_point(weighted_points.size()), std::out_of_range);
+
// ---------------------
// Compare both versions
// ---------------------
diff --git a/src/Alpha_complex/test/Weighted_periodic_alpha_complex_3d_unit_test.cpp b/src/Alpha_complex/test/Weighted_periodic_alpha_complex_3d_unit_test.cpp
index 670c7799..b09e92d5 100644
--- a/src/Alpha_complex/test/Weighted_periodic_alpha_complex_3d_unit_test.cpp
+++ b/src/Alpha_complex/test/Weighted_periodic_alpha_complex_3d_unit_test.cpp
@@ -47,13 +47,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_weighted_periodic_throw, Weighted_pe
wp_variants_type_list) {
std::cout << "Weighted periodic alpha complex 3d exception throw" << std::endl;
- using Creator = CGAL::Creator_uniform_3<double, typename Weighted_periodic_alpha_complex_3d::Point_3>;
+ using Creator = CGAL::Creator_uniform_3<double, typename Weighted_periodic_alpha_complex_3d::Bare_point_3>;
CGAL::Random random(7);
- CGAL::Random_points_in_cube_3<typename Weighted_periodic_alpha_complex_3d::Point_3, Creator> in_cube(1, random);
- std::vector<typename Weighted_periodic_alpha_complex_3d::Point_3> wp_points;
+ CGAL::Random_points_in_cube_3<typename Weighted_periodic_alpha_complex_3d::Bare_point_3, Creator> in_cube(1, random);
+ std::vector<typename Weighted_periodic_alpha_complex_3d::Bare_point_3> wp_points;
for (int i = 0; i < 50; i++) {
- typename Weighted_periodic_alpha_complex_3d::Point_3 p = *in_cube++;
+ typename Weighted_periodic_alpha_complex_3d::Bare_point_3 p = *in_cube++;
wp_points.push_back(p);
}
std::vector<double> p_weights;
@@ -117,13 +117,13 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_weighted_periodic) {
// ---------------------
std::cout << "Fast weighted periodic alpha complex 3d" << std::endl;
- using Creator = CGAL::Creator_uniform_3<double, Fast_weighted_periodic_alpha_complex_3d::Point_3>;
+ using Creator = CGAL::Creator_uniform_3<double, Fast_weighted_periodic_alpha_complex_3d::Bare_point_3>;
CGAL::Random random(7);
- CGAL::Random_points_in_cube_3<Fast_weighted_periodic_alpha_complex_3d::Point_3, Creator> in_cube(1, random);
- std::vector<Fast_weighted_periodic_alpha_complex_3d::Point_3> p_points;
+ CGAL::Random_points_in_cube_3<Fast_weighted_periodic_alpha_complex_3d::Bare_point_3, Creator> in_cube(1, random);
+ std::vector<Fast_weighted_periodic_alpha_complex_3d::Bare_point_3> p_points;
for (int i = 0; i < 50; i++) {
- Fast_weighted_periodic_alpha_complex_3d::Point_3 p = *in_cube++;
+ Fast_weighted_periodic_alpha_complex_3d::Bare_point_3 p = *in_cube++;
p_points.push_back(p);
}
std::vector<double> p_weights;
@@ -142,10 +142,10 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_weighted_periodic) {
// ----------------------
std::cout << "Exact weighted periodic alpha complex 3d" << std::endl;
- std::vector<Exact_weighted_periodic_alpha_complex_3d::Point_3> e_p_points;
+ std::vector<Exact_weighted_periodic_alpha_complex_3d::Bare_point_3> e_p_points;
for (auto p : p_points) {
- e_p_points.push_back(Exact_weighted_periodic_alpha_complex_3d::Point_3(p[0], p[1], p[2]));
+ e_p_points.push_back(Exact_weighted_periodic_alpha_complex_3d::Bare_point_3(p[0], p[1], p[2]));
}
Exact_weighted_periodic_alpha_complex_3d exact_alpha_complex(e_p_points, p_weights, -1., -1., -1., 1., 1., 1.);
@@ -191,10 +191,10 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_weighted_periodic) {
// ----------------------
std::cout << "Safe weighted periodic alpha complex 3d" << std::endl;
- std::vector<Safe_weighted_periodic_alpha_complex_3d::Point_3> s_p_points;
+ std::vector<Safe_weighted_periodic_alpha_complex_3d::Bare_point_3> s_p_points;
for (auto p : p_points) {
- s_p_points.push_back(Safe_weighted_periodic_alpha_complex_3d::Point_3(p[0], p[1], p[2]));
+ s_p_points.push_back(Safe_weighted_periodic_alpha_complex_3d::Bare_point_3(p[0], p[1], p[2]));
}
Safe_weighted_periodic_alpha_complex_3d safe_alpha_complex(s_p_points, p_weights, -1., -1., -1., 1., 1., 1.);
diff --git a/src/Alpha_complex/utilities/CMakeLists.txt b/src/Alpha_complex/utilities/CMakeLists.txt
index 5295f3cd..57b92942 100644
--- a/src/Alpha_complex/utilities/CMakeLists.txt
+++ b/src/Alpha_complex/utilities/CMakeLists.txt
@@ -7,9 +7,19 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
if (TBB_FOUND)
target_link_libraries(alpha_complex_persistence ${TBB_LIBRARIES})
endif(TBB_FOUND)
- add_test(NAME Alpha_complex_utilities_alpha_complex_persistence COMMAND $<TARGET_FILE:alpha_complex_persistence>
- "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-p" "2" "-m" "0.45")
-
+ add_test(NAME Alpha_complex_utilities_safe_alpha_complex_persistence COMMAND $<TARGET_FILE:alpha_complex_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-p" "2" "-m" "0.45" "-o" "safe.pers")
+ add_test(NAME Alpha_complex_utilities_fast_alpha_complex_persistence COMMAND $<TARGET_FILE:alpha_complex_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-p" "2" "-m" "0.45" "-o" "fast.pers" "-f")
+ add_test(NAME Alpha_complex_utilities_exact_alpha_complex_persistence COMMAND $<TARGET_FILE:alpha_complex_persistence>
+ "${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off" "-p" "2" "-m" "0.45" "-o" "exact.pers" "-e")
+ if (DIFF_PATH)
+ add_test(Alpha_complex_utilities_diff_exact_alpha_complex ${DIFF_PATH}
+ "exact.pers" "safe.pers")
+ add_test(Alpha_complex_utilities_diff_fast_alpha_complex ${DIFF_PATH}
+ "fast.pers" "safe.pers")
+ endif()
+
install(TARGETS alpha_complex_persistence DESTINATION bin)
add_executable(alpha_complex_3d_persistence alpha_complex_3d_persistence.cpp)
@@ -20,21 +30,21 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
add_test(NAME Alpha_complex_utilities_alpha_complex_3d COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
"${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off"
- "-p" "2" "-m" "0.45" "-o" "safe.pers")
+ "-p" "2" "-m" "0.45" "-o" "safe_3d.pers")
add_test(NAME Alpha_complex_utilities_exact_alpha_complex_3d COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
"${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off"
- "-p" "2" "-m" "0.45" "-o" "exact.pers" "-e")
+ "-p" "2" "-m" "0.45" "-o" "exact_3d.pers" "-e")
add_test(NAME Alpha_complex_utilities_safe_alpha_complex_3d COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
"${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off"
- "-p" "2" "-m" "0.45" "-o" "fast.pers" "-f")
+ "-p" "2" "-m" "0.45" "-o" "fast_3d.pers" "-f")
if (DIFF_PATH)
- add_test(Alpha_complex_utilities_diff_alpha_complex_3d ${DIFF_PATH}
- "exact.pers" "safe.pers")
- add_test(Alpha_complex_utilities_diff_alpha_complex_3d ${DIFF_PATH}
- "fast.pers" "safe.pers")
+ add_test(Alpha_complex_utilities_diff_exact_alpha_complex_3d ${DIFF_PATH}
+ "exact_3d.pers" "safe_3d.pers")
+ add_test(Alpha_complex_utilities_diff_fast_alpha_complex_3d ${DIFF_PATH}
+ "fast_3d.pers" "safe_3d.pers")
endif()
add_test(NAME Alpha_complex_utilities_periodic_alpha_complex_3d_persistence COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
diff --git a/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp b/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp
index 2272576e..929fc2e8 100644
--- a/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp
+++ b/src/Alpha_complex/utilities/alpha_complex_3d_persistence.cpp
@@ -62,9 +62,9 @@ bool read_cuboid_file(const std::string &cuboid_file, double &x_min, double &y_m
}
template <typename AlphaComplex3d>
-std::vector<typename AlphaComplex3d::Point_3> read_off(const std::string &off_file_points) {
+std::vector<typename AlphaComplex3d::Bare_point_3> read_off(const std::string &off_file_points) {
// Read the OFF file (input file name given as parameter) and triangulate points
- Gudhi::Points_3D_off_reader<typename AlphaComplex3d::Point_3> off_reader(off_file_points);
+ Gudhi::Points_3D_off_reader<typename AlphaComplex3d::Bare_point_3> off_reader(off_file_points);
// Check the read operation was correct
if (!off_reader.is_valid()) {
std::cerr << "Unable to read OFF file " << off_file_points << std::endl;
diff --git a/src/Alpha_complex/utilities/alpha_complex_persistence.cpp b/src/Alpha_complex/utilities/alpha_complex_persistence.cpp
index fab7bd30..486347cc 100644
--- a/src/Alpha_complex/utilities/alpha_complex_persistence.cpp
+++ b/src/Alpha_complex/utilities/alpha_complex_persistence.cpp
@@ -24,63 +24,84 @@
using Simplex_tree = Gudhi::Simplex_tree<>;
using Filtration_value = Simplex_tree::Filtration_value;
-void program_options(int argc, char *argv[], std::string &off_file_points, std::string &output_file_diag,
- Filtration_value &alpha_square_max_value, int &coeff_field_characteristic,
- Filtration_value &min_persistence);
+void program_options(int argc, char *argv[], std::string &off_file_points, bool &exact, bool &fast,
+ std::string &output_file_diag, Filtration_value &alpha_square_max_value,
+ int &coeff_field_characteristic, Filtration_value &min_persistence);
int main(int argc, char **argv) {
std::string off_file_points;
std::string output_file_diag;
+ bool exact_version = false;
+ bool fast_version = false;
Filtration_value alpha_square_max_value;
int coeff_field_characteristic;
Filtration_value min_persistence;
- program_options(argc, argv, off_file_points, output_file_diag, alpha_square_max_value, coeff_field_characteristic,
- min_persistence);
+ program_options(argc, argv, off_file_points, exact_version, fast_version, output_file_diag, alpha_square_max_value,
+ coeff_field_characteristic, min_persistence);
- // ----------------------------------------------------------------------------
- // Init of an alpha complex from an OFF file
- // ----------------------------------------------------------------------------
- using Kernel = CGAL::Epick_d<CGAL::Dynamic_dimension_tag>;
- Gudhi::alpha_complex::Alpha_complex<Kernel> alpha_complex_from_file(off_file_points);
+ if ((exact_version) && (fast_version)) {
+ std::cerr << "You cannot set the exact and the fast version." << std::endl;
+ exit(-1);
+ }
Simplex_tree simplex;
- if (alpha_complex_from_file.create_complex(simplex, alpha_square_max_value)) {
- // ----------------------------------------------------------------------------
- // Display information about the alpha complex
- // ----------------------------------------------------------------------------
- std::cout << "Simplicial complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices()
- << " simplices - " << simplex.num_vertices() << " vertices." << std::endl;
-
- // Sort the simplices in the order of the filtration
- simplex.initialize_filtration();
-
- std::cout << "Simplex_tree dim: " << simplex.dimension() << std::endl;
- // Compute the persistence diagram of the complex
- Gudhi::persistent_cohomology::Persistent_cohomology<Simplex_tree, Gudhi::persistent_cohomology::Field_Zp> pcoh(
- simplex);
- // initializes the coefficient field for homology
- pcoh.init_coefficients(coeff_field_characteristic);
-
- pcoh.compute_persistent_cohomology(min_persistence);
-
- // Output the diagram in filediag
- if (output_file_diag.empty()) {
- pcoh.output_diagram();
- } else {
- std::cout << "Result in file: " << output_file_diag << std::endl;
- std::ofstream out(output_file_diag);
- pcoh.output_diagram(out);
- out.close();
+ if (fast_version) {
+ // WARNING : CGAL::Epick_d is fast but not safe (unlike CGAL::Epeck_d)
+ // (i.e. when the points are on a grid)
+ using Fast_kernel = CGAL::Epick_d<CGAL::Dynamic_dimension_tag>;
+
+ // Init of an alpha complex from an OFF file
+ Gudhi::alpha_complex::Alpha_complex<Fast_kernel> alpha_complex_from_file(off_file_points);
+
+ if (!alpha_complex_from_file.create_complex(simplex, alpha_square_max_value)) {
+ std::cerr << "Fast Alpha complex simplicial complex creation failed." << std::endl;
+ exit(-1);
}
- }
+ } else {
+ using Kernel = CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>;
+ // Init of an alpha complex from an OFF file
+ Gudhi::alpha_complex::Alpha_complex<Kernel> alpha_complex_from_file(off_file_points);
+
+ if (!alpha_complex_from_file.create_complex(simplex, alpha_square_max_value, exact_version)) {
+ std::cerr << "Alpha complex simplicial complex creation failed." << std::endl;
+ exit(-1);
+ }
+ }
+ // ----------------------------------------------------------------------------
+ // Display information about the alpha complex
+ // ----------------------------------------------------------------------------
+ std::cout << "Simplicial complex is of dimension " << simplex.dimension() << " - " << simplex.num_simplices()
+ << " simplices - " << simplex.num_vertices() << " vertices." << std::endl;
+
+ // Sort the simplices in the order of the filtration
+ simplex.initialize_filtration();
+
+ std::cout << "Simplex_tree dim: " << simplex.dimension() << std::endl;
+ // Compute the persistence diagram of the complex
+ Gudhi::persistent_cohomology::Persistent_cohomology<Simplex_tree, Gudhi::persistent_cohomology::Field_Zp> pcoh(
+ simplex);
+ // initializes the coefficient field for homology
+ pcoh.init_coefficients(coeff_field_characteristic);
+
+ pcoh.compute_persistent_cohomology(min_persistence);
+
+ // Output the diagram in filediag
+ if (output_file_diag.empty()) {
+ pcoh.output_diagram();
+ } else {
+ std::cout << "Result in file: " << output_file_diag << std::endl;
+ std::ofstream out(output_file_diag);
+ pcoh.output_diagram(out);
+ out.close();
+ }
return 0;
}
-void program_options(int argc, char *argv[], std::string &off_file_points, std::string &output_file_diag,
- Filtration_value &alpha_square_max_value, int &coeff_field_characteristic,
- Filtration_value &min_persistence) {
+void program_options(int argc, char *argv[], std::string &off_file_points, bool &exact, bool &fast,
+ std::string &output_file_diag, Filtration_value &alpha_square_max_value,
+ int &coeff_field_characteristic, Filtration_value &min_persistence) {
namespace po = boost::program_options;
po::options_description hidden("Hidden options");
hidden.add_options()("input-file", po::value<std::string>(&off_file_points),
@@ -88,6 +109,10 @@ void program_options(int argc, char *argv[], std::string &off_file_points, std::
po::options_description visible("Allowed options", 100);
visible.add_options()("help,h", "produce help message")(
+ "exact,e", po::bool_switch(&exact),
+ "To activate exact version of Alpha complex (default is false, not available if fast is set)")(
+ "fast,f", po::bool_switch(&fast),
+ "To activate fast version of Alpha complex (default is false, not available if exact is set)")(
"output-file,o", po::value<std::string>(&output_file_diag)->default_value(std::string()),
"Name of file in which the persistence diagram is written. Default print in std::cout")(
"max-alpha-square-value,r", po::value<Filtration_value>(&alpha_square_max_value)
diff --git a/src/Alpha_complex/utilities/alphacomplex.md b/src/Alpha_complex/utilities/alphacomplex.md
index fcd16a3b..527598a9 100644
--- a/src/Alpha_complex/utilities/alphacomplex.md
+++ b/src/Alpha_complex/utilities/alphacomplex.md
@@ -46,6 +46,8 @@ for the Alpha complex construction.
coefficient field Z/pZ for computing homology.
* `-m [ --min-persistence ]` (default = 0) Minimal lifetime of homology feature
to be recorded. Enter a negative value to see zero length intervals.
+* `-e [ --exact ]` for the exact computation version.
+* `-f [ --fast ]` for the fast computation version.
**Example**
diff --git a/src/Bitmap_cubical_complex/test/CMakeLists.txt b/src/Bitmap_cubical_complex/test/CMakeLists.txt
index d2f002a6..eb7eb6b5 100644
--- a/src/Bitmap_cubical_complex/test/CMakeLists.txt
+++ b/src/Bitmap_cubical_complex/test/CMakeLists.txt
@@ -1,14 +1,13 @@
project(Bitmap_cubical_complex_tests)
-include(GUDHI_test_coverage)
+include(GUDHI_boost_test)
# Do not forget to copy test files in current binary dir
file(COPY "${CMAKE_SOURCE_DIR}/data/bitmap/sinusoid.txt" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
add_executable ( Bitmap_cubical_complex_test_unit Bitmap_test.cpp )
-target_link_libraries(Bitmap_cubical_complex_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Bitmap_cubical_complex_test_unit ${TBB_LIBRARIES})
endif()
-gudhi_add_coverage_test(Bitmap_cubical_complex_test_unit)
+gudhi_add_boost_test(Bitmap_cubical_complex_test_unit)
diff --git a/src/Bottleneck_distance/include/gudhi/Bottleneck.h b/src/Bottleneck_distance/include/gudhi/Bottleneck.h
index 82ba9f68..e466828a 100644
--- a/src/Bottleneck_distance/include/gudhi/Bottleneck.h
+++ b/src/Bottleneck_distance/include/gudhi/Bottleneck.h
@@ -26,7 +26,7 @@
// Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10
#if CGAL_VERSION_NR < 1041101000
-# error Alpha_complex_3d is only available for CGAL >= 4.11
+# error bottleneck_distance is only available for CGAL >= 4.11
#endif
namespace Gudhi {
diff --git a/src/Bottleneck_distance/test/CMakeLists.txt b/src/Bottleneck_distance/test/CMakeLists.txt
index ec2d045f..3acd3d86 100644
--- a/src/Bottleneck_distance/test/CMakeLists.txt
+++ b/src/Bottleneck_distance/test/CMakeLists.txt
@@ -1,14 +1,13 @@
project(Bottleneck_distance_tests)
if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
- include(GUDHI_test_coverage)
+ include(GUDHI_boost_test)
add_executable ( Bottleneck_distance_test_unit bottleneck_unit_test.cpp )
- target_link_libraries(Bottleneck_distance_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Bottleneck_distance_test_unit ${TBB_LIBRARIES})
endif(TBB_FOUND)
- gudhi_add_coverage_test(Bottleneck_distance_test_unit)
+ gudhi_add_boost_test(Bottleneck_distance_test_unit)
endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
diff --git a/src/Bottleneck_distance/test/bottleneck_unit_test.cpp b/src/Bottleneck_distance/test/bottleneck_unit_test.cpp
index 3fc6fc7b..2c520045 100644
--- a/src/Bottleneck_distance/test/bottleneck_unit_test.cpp
+++ b/src/Bottleneck_distance/test/bottleneck_unit_test.cpp
@@ -15,6 +15,7 @@
#include <random>
#include <gudhi/Bottleneck.h>
+#include <gudhi/Unitary_tests_utils.h>
using namespace Gudhi::persistence_diagram;
@@ -59,24 +60,24 @@ BOOST_AUTO_TEST_CASE(persistence_graph) {
BOOST_CHECK(g.size() == (n1 + n2));
//
BOOST_CHECK((int) d.size() == (n1 + n2)*(n1 + n2) + n1 + n2 + 1);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance(0, 0)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance(0, n1 - 1)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance(0, n1)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance(0, n2 - 1)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance(0, n2)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance(0, (n1 + n2) - 1)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance(n1, 0)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance(n1, n1 - 1)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance(n1, n1)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance(n1, n2 - 1)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance(n1, n2)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance(n1, (n1 + n2) - 1)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance((n1 + n2) - 1, 0)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance((n1 + n2) - 1, n1 - 1)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance((n1 + n2) - 1, n1)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance((n1 + n2) - 1, n2 - 1)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance((n1 + n2) - 1, n2)) > 0);
- BOOST_CHECK(std::count(d.begin(), d.end(), g.distance((n1 + n2) - 1, (n1 + n2) - 1)) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance(0, 0))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance(0, n1 - 1))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance(0, n1))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance(0, n2 - 1))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance(0, n2))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance(0, (n1 + n2) - 1))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance(n1, 0))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance(n1, n1 - 1))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance(n1, n1))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance(n1, n2 - 1))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance(n1, n2))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance(n1, (n1 + n2) - 1))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance((n1 + n2) - 1, 0))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance((n1 + n2) - 1, n1 - 1))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance((n1 + n2) - 1, n1))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance((n1 + n2) - 1, n2 - 1))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance((n1 + n2) - 1, n2))) > 0);
+ BOOST_CHECK(std::count(d.begin(), d.end(), GUDHI_PROTECT_FLOAT(g.distance((n1 + n2) - 1, (n1 + n2) - 1))) > 0);
}
BOOST_AUTO_TEST_CASE(neighbors_finder) {
diff --git a/src/Cech_complex/doc/Intro_cech_complex.h b/src/Cech_complex/doc/Intro_cech_complex.h
index 90086de7..80c88dc6 100644
--- a/src/Cech_complex/doc/Intro_cech_complex.h
+++ b/src/Cech_complex/doc/Intro_cech_complex.h
@@ -24,7 +24,7 @@ namespace cech_complex {
* \section cechdefinition ÄŒech complex definition
*
* ÄŒech complex
- * <a target="_blank" href="https://en.wikipedia.org/wiki/%C4%8Cech_cohomology">(Wikipedia)</a> is a
+ * <a target="_blank" href="https://en.wikipedia.org/wiki/%C4%8Cech_complex">(Wikipedia)</a> is a
* <a target="_blank" href="https://en.wikipedia.org/wiki/Simplicial_complex">simplicial complex</a> constructed
* from a proximity graph. The set of all simplices is filtered by the radius of their minimal enclosing ball.
*
diff --git a/src/Cech_complex/test/CMakeLists.txt b/src/Cech_complex/test/CMakeLists.txt
index 8db51173..db510af3 100644
--- a/src/Cech_complex/test/CMakeLists.txt
+++ b/src/Cech_complex/test/CMakeLists.txt
@@ -1,10 +1,9 @@
cmake_minimum_required(VERSION 2.6)
project(Cech_complex_tests)
-include(GUDHI_test_coverage)
+include(GUDHI_boost_test)
add_executable ( Cech_complex_test_unit test_cech_complex.cpp )
-target_link_libraries(Cech_complex_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Cech_complex_test_unit ${TBB_LIBRARIES})
endif()
@@ -12,4 +11,4 @@ endif()
# Do not forget to copy test files in current binary dir
file(COPY "${CMAKE_SOURCE_DIR}/data/points/alphacomplexdoc.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
-gudhi_add_coverage_test(Cech_complex_test_unit)
+gudhi_add_boost_test(Cech_complex_test_unit)
diff --git a/src/Contraction/include/gudhi/Skeleton_blocker_contractor.h b/src/Contraction/include/gudhi/Skeleton_blocker_contractor.h
index c2b3157c..a0d9f2b2 100644
--- a/src/Contraction/include/gudhi/Skeleton_blocker_contractor.h
+++ b/src/Contraction/include/gudhi/Skeleton_blocker_contractor.h
@@ -40,7 +40,7 @@
// Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10
#if CGAL_VERSION_NR < 1041101000
-# error Alpha_complex_3d is only available for CGAL >= 4.11
+# error Skeleton_blocker_contractor is only available for CGAL >= 4.11
#endif
namespace Gudhi {
diff --git a/src/Doxyfile.in b/src/Doxyfile.in
index 57775498..ec551882 100644
--- a/src/Doxyfile.in
+++ b/src/Doxyfile.in
@@ -765,7 +765,7 @@ INPUT_ENCODING = UTF-8
# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
# *.qsf, *.as and *.js.
-FILE_PATTERNS =
+#FILE_PATTERNS =
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
diff --git a/src/Nerve_GIC/include/gudhi/GIC.h b/src/Nerve_GIC/include/gudhi/GIC.h
index fc6a2a91..a47d6889 100644
--- a/src/Nerve_GIC/include/gudhi/GIC.h
+++ b/src/Nerve_GIC/include/gudhi/GIC.h
@@ -48,11 +48,6 @@
#include <cassert>
#include <cmath>
-// Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10
-#if CGAL_VERSION_NR < 1041101000
-# error Alpha_complex_3d is only available for CGAL >= 4.11
-#endif
-
namespace Gudhi {
namespace cover_complex {
@@ -712,7 +707,7 @@ class Cover_complex {
// Sort points according to function values
std::vector<int> points(n);
for (int i = 0; i < n; i++) points[i] = i;
- std::sort(points.begin(), points.end(), [=](const int & p1, const int & p2){return (this->func[p1] < this->func[p2]);});
+ std::sort(points.begin(), points.end(), [this](int p1, int p2){return (this->func[p1] < this->func[p2]);});
int id = 0;
int pos = 0;
diff --git a/src/Nerve_GIC/test/CMakeLists.txt b/src/Nerve_GIC/test/CMakeLists.txt
index b89c18a2..567bf43f 100644
--- a/src/Nerve_GIC/test/CMakeLists.txt
+++ b/src/Nerve_GIC/test/CMakeLists.txt
@@ -1,16 +1,15 @@
project(Graph_induced_complex_tests)
if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
- include(GUDHI_test_coverage)
+ include(GUDHI_boost_test)
add_executable ( Nerve_GIC_test_unit test_GIC.cpp )
- target_link_libraries(Nerve_GIC_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Nerve_GIC_test_unit ${TBB_LIBRARIES})
endif()
file(COPY data DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
- gudhi_add_coverage_test(Nerve_GIC_test_unit)
+ gudhi_add_boost_test(Nerve_GIC_test_unit)
endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
diff --git a/src/Persistence_representations/include/gudhi/Persistence_intervals.h b/src/Persistence_representations/include/gudhi/Persistence_intervals.h
index e2db4572..ea4220ea 100644
--- a/src/Persistence_representations/include/gudhi/Persistence_intervals.h
+++ b/src/Persistence_representations/include/gudhi/Persistence_intervals.h
@@ -6,6 +6,8 @@
*
* Modification(s):
* - YYYY/MM Author: Description of the modification
+ * - 2019/12 Vincent Rouvreau: Fix #118 - Make histogram_of_lengths and cumulative_histogram_of_lengths
+ * return the exact number_of_bins (was failing on x86)
*/
#ifndef PERSISTENCE_INTERVALS_H_
@@ -335,6 +337,9 @@ std::vector<size_t> Persistence_intervals::histogram_of_lengths(size_t number_of
getchar();
}
}
+ // we want number of bins equals to number_of_bins (some unexpected results on x86)
+ result[number_of_bins-1]+=result[number_of_bins];
+ result.resize(number_of_bins);
if (dbg) {
for (size_t i = 0; i != result.size(); ++i) std::cerr << result[i] << std::endl;
diff --git a/src/Persistence_representations/test/CMakeLists.txt b/src/Persistence_representations/test/CMakeLists.txt
index a95880c9..92d68a63 100644
--- a/src/Persistence_representations/test/CMakeLists.txt
+++ b/src/Persistence_representations/test/CMakeLists.txt
@@ -1,51 +1,36 @@
project(Persistence_representations_test)
-include(GUDHI_test_coverage)
+include(GUDHI_boost_test)
# copy data directory for tests purpose.
file(COPY data DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
add_executable ( Persistence_intervals_test_unit persistence_intervals_test.cpp )
-target_link_libraries(Persistence_intervals_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
-
-gudhi_add_coverage_test(Persistence_intervals_test_unit)
+gudhi_add_boost_test(Persistence_intervals_test_unit)
add_executable (Vector_representation_test_unit vector_representation_test.cpp )
-target_link_libraries(Vector_representation_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
-
-gudhi_add_coverage_test(Vector_representation_test_unit)
+gudhi_add_boost_test(Vector_representation_test_unit)
add_executable (Persistence_lanscapes_test_unit persistence_lanscapes_test.cpp )
-target_link_libraries(Persistence_lanscapes_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
-
-gudhi_add_coverage_test(Persistence_lanscapes_test_unit)
+gudhi_add_boost_test(Persistence_lanscapes_test_unit)
add_executable ( Persistence_lanscapes_on_grid_test_unit persistence_lanscapes_on_grid_test.cpp )
-target_link_libraries(Persistence_lanscapes_on_grid_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
-
-gudhi_add_coverage_test(Persistence_lanscapes_on_grid_test_unit)
+gudhi_add_boost_test(Persistence_lanscapes_on_grid_test_unit)
add_executable (Persistence_heat_maps_test_unit persistence_heat_maps_test.cpp )
-target_link_libraries(Persistence_heat_maps_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
-
-gudhi_add_coverage_test(Persistence_heat_maps_test_unit)
+gudhi_add_boost_test(Persistence_heat_maps_test_unit)
add_executable ( Read_persistence_from_file_test_unit read_persistence_from_file_test.cpp )
-target_link_libraries(Read_persistence_from_file_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
-
-gudhi_add_coverage_test(Read_persistence_from_file_test_unit)
+gudhi_add_boost_test(Read_persistence_from_file_test_unit)
add_executable ( kernels_unit kernels.cpp )
-target_link_libraries(kernels_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
-
-gudhi_add_coverage_test(kernels_unit)
+gudhi_add_boost_test(kernels_unit)
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
add_executable (Persistence_intervals_with_distances_test_unit persistence_intervals_with_distances_test.cpp )
- target_link_libraries(Persistence_intervals_with_distances_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Persistence_intervals_with_distances_test_unit ${TBB_LIBRARIES})
endif(TBB_FOUND)
- gudhi_add_coverage_test(Persistence_intervals_with_distances_test_unit)
+ gudhi_add_boost_test(Persistence_intervals_with_distances_test_unit)
endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
diff --git a/src/Persistence_representations/test/persistence_intervals_test.cpp b/src/Persistence_representations/test/persistence_intervals_test.cpp
index 3b7a2049..02ea8edb 100644
--- a/src/Persistence_representations/test/persistence_intervals_test.cpp
+++ b/src/Persistence_representations/test/persistence_intervals_test.cpp
@@ -6,6 +6,8 @@
*
* Modification(s):
* - YYYY/MM Author: Description of the modification
+ * - 2019/12 Vincent Rouvreau: Fix #118 - Make histogram_of_lengths and cumulative_histogram_of_lengths
+ * return the exact number_of_bins (was failing on x86)
*/
#define BOOST_TEST_DYN_LINK
@@ -32,17 +34,8 @@ BOOST_AUTO_TEST_CASE(check_min_max_function) {
BOOST_AUTO_TEST_CASE(check_length_of_dominant_intervals) {
Persistence_intervals p("data/file_with_diagram");
std::vector<double> dominant_ten_intervals_length = p.length_of_dominant_intervals(10);
- std::vector<double> dominant_intervals_length;
- dominant_intervals_length.push_back(0.862625);
- dominant_intervals_length.push_back(0.800893);
- dominant_intervals_length.push_back(0.762061);
- dominant_intervals_length.push_back(0.756501);
- dominant_intervals_length.push_back(0.729367);
- dominant_intervals_length.push_back(0.718177);
- dominant_intervals_length.push_back(0.708395);
- dominant_intervals_length.push_back(0.702844);
- dominant_intervals_length.push_back(0.700468);
- dominant_intervals_length.push_back(0.622177);
+ std::vector<double> dominant_intervals_length{0.862625, 0.800893, 0.762061, 0.756501, 0.729367,
+ 0.718177, 0.708395, 0.702844, 0.700468, 0.622177};
for (size_t i = 0; i != dominant_ten_intervals_length.size(); ++i) {
GUDHI_TEST_FLOAT_EQUALITY_CHECK(dominant_ten_intervals_length[i], dominant_intervals_length[i],
Gudhi::Persistence_representations::epsi);
@@ -52,17 +45,11 @@ BOOST_AUTO_TEST_CASE(check_dominant_intervals) {
Persistence_intervals p("data/file_with_diagram");
std::vector<std::pair<double, double> > ten_dominant_intervals = p.dominant_intervals(10);
- std::vector<std::pair<double, double> > templ;
- templ.push_back(std::pair<double, double>(0.114718, 0.977343));
- templ.push_back(std::pair<double, double>(0.133638, 0.93453));
- templ.push_back(std::pair<double, double>(0.104599, 0.866659));
- templ.push_back(std::pair<double, double>(0.149798, 0.906299));
- templ.push_back(std::pair<double, double>(0.247352, 0.976719));
- templ.push_back(std::pair<double, double>(0.192675, 0.910852));
- templ.push_back(std::pair<double, double>(0.191836, 0.900231));
- templ.push_back(std::pair<double, double>(0.284998, 0.987842));
- templ.push_back(std::pair<double, double>(0.294069, 0.994537));
- templ.push_back(std::pair<double, double>(0.267421, 0.889597));
+ std::vector<std::pair<double, double> > templ{ {0.114718, 0.977343}, {0.133638, 0.93453},
+ {0.104599, 0.866659}, {0.149798, 0.906299},
+ {0.247352, 0.976719}, {0.192675, 0.910852},
+ {0.191836, 0.900231}, {0.284998, 0.987842},
+ {0.294069, 0.994537}, {0.267421, 0.889597} };
for (size_t i = 0; i != ten_dominant_intervals.size(); ++i) {
GUDHI_TEST_FLOAT_EQUALITY_CHECK(ten_dominant_intervals[i].first, templ[i].first,
@@ -75,18 +62,7 @@ BOOST_AUTO_TEST_CASE(check_dominant_intervals) {
BOOST_AUTO_TEST_CASE(check_histogram_of_lengths) {
Persistence_intervals p("data/file_with_diagram");
std::vector<size_t> histogram = p.histogram_of_lengths(10);
- std::vector<size_t> template_histogram;
- template_histogram.push_back(10);
- template_histogram.push_back(5);
- template_histogram.push_back(3);
- template_histogram.push_back(4);
- template_histogram.push_back(4);
- template_histogram.push_back(3);
- template_histogram.push_back(6);
- template_histogram.push_back(1);
- template_histogram.push_back(7);
- template_histogram.push_back(1);
- template_histogram.push_back(1);
+ std::vector<size_t> template_histogram{10, 5, 3, 4, 4, 3, 6, 1, 7, 2};
for (size_t i = 0; i != histogram.size(); ++i) {
BOOST_CHECK(histogram[i] == template_histogram[i]);
}
@@ -95,18 +71,7 @@ BOOST_AUTO_TEST_CASE(check_histogram_of_lengths) {
BOOST_AUTO_TEST_CASE(check_cumulative_histograms_of_lengths) {
Persistence_intervals p("data/file_with_diagram");
std::vector<size_t> cumulative_histogram = p.cumulative_histogram_of_lengths(10);
- std::vector<size_t> template_cumulative_histogram;
- template_cumulative_histogram.push_back(10);
- template_cumulative_histogram.push_back(15);
- template_cumulative_histogram.push_back(18);
- template_cumulative_histogram.push_back(22);
- template_cumulative_histogram.push_back(26);
- template_cumulative_histogram.push_back(29);
- template_cumulative_histogram.push_back(35);
- template_cumulative_histogram.push_back(36);
- template_cumulative_histogram.push_back(43);
- template_cumulative_histogram.push_back(44);
- template_cumulative_histogram.push_back(45);
+ std::vector<size_t> template_cumulative_histogram{10, 15, 18, 22, 26, 29, 35, 36, 43, 45};
for (size_t i = 0; i != cumulative_histogram.size(); ++i) {
BOOST_CHECK(cumulative_histogram[i] == template_cumulative_histogram[i]);
@@ -116,17 +81,8 @@ BOOST_AUTO_TEST_CASE(check_characteristic_function_of_diagram) {
Persistence_intervals p("data/file_with_diagram");
std::pair<double, double> min_max_ = p.get_x_range();
std::vector<double> char_funct_diag = p.characteristic_function_of_diagram(min_max_.first, min_max_.second);
- std::vector<double> template_char_funct_diag;
- template_char_funct_diag.push_back(0.370665);
- template_char_funct_diag.push_back(0.84058);
- template_char_funct_diag.push_back(1.24649);
- template_char_funct_diag.push_back(1.3664);
- template_char_funct_diag.push_back(1.34032);
- template_char_funct_diag.push_back(1.31904);
- template_char_funct_diag.push_back(1.14076);
- template_char_funct_diag.push_back(0.991259);
- template_char_funct_diag.push_back(0.800714);
- template_char_funct_diag.push_back(0.0676303);
+ std::vector<double> template_char_funct_diag{0.370665, 0.84058, 1.24649, 1.3664, 1.34032,
+ 1.31904, 1.14076, 0.991259, 0.800714, 0.0676303};
for (size_t i = 0; i != char_funct_diag.size(); ++i) {
GUDHI_TEST_FLOAT_EQUALITY_CHECK(char_funct_diag[i], template_char_funct_diag[i],
@@ -139,18 +95,8 @@ BOOST_AUTO_TEST_CASE(check_cumulative_characteristic_function_of_diagram) {
std::pair<double, double> min_max_ = p.get_x_range();
std::vector<double> cumul_char_funct_diag =
p.cumulative_characteristic_function_of_diagram(min_max_.first, min_max_.second);
- std::vector<double> template_char_funct_diag_cumul;
-
- template_char_funct_diag_cumul.push_back(0.370665);
- template_char_funct_diag_cumul.push_back(1.21125);
- template_char_funct_diag_cumul.push_back(2.45774);
- template_char_funct_diag_cumul.push_back(3.82414);
- template_char_funct_diag_cumul.push_back(5.16446);
- template_char_funct_diag_cumul.push_back(6.4835);
- template_char_funct_diag_cumul.push_back(7.62426);
- template_char_funct_diag_cumul.push_back(8.61552);
- template_char_funct_diag_cumul.push_back(9.41623);
- template_char_funct_diag_cumul.push_back(9.48386);
+ std::vector<double> template_char_funct_diag_cumul{0.370665, 1.21125, 2.45774, 3.82414, 5.16446,
+ 6.4835, 7.62426, 8.61552, 9.41623, 9.48386};
for (size_t i = 0; i != cumul_char_funct_diag.size(); ++i) {
GUDHI_TEST_FLOAT_EQUALITY_CHECK(cumul_char_funct_diag[i], template_char_funct_diag_cumul[i],
@@ -160,97 +106,29 @@ BOOST_AUTO_TEST_CASE(check_cumulative_characteristic_function_of_diagram) {
BOOST_AUTO_TEST_CASE(check_compute_persistent_betti_numbers) {
Persistence_intervals p("data/file_with_diagram");
- std::vector<std::pair<double, size_t> > pbns;
- pbns.push_back(std::pair<double, size_t>(0.0290362, 1));
- pbns.push_back(std::pair<double, size_t>(0.0307676, 2));
- pbns.push_back(std::pair<double, size_t>(0.0366312, 3));
- pbns.push_back(std::pair<double, size_t>(0.0544614, 4));
- pbns.push_back(std::pair<double, size_t>(0.0920033, 5));
- pbns.push_back(std::pair<double, size_t>(0.104599, 6));
- pbns.push_back(std::pair<double, size_t>(0.114718, 7));
- pbns.push_back(std::pair<double, size_t>(0.117379, 8));
- pbns.push_back(std::pair<double, size_t>(0.123493, 9));
- pbns.push_back(std::pair<double, size_t>(0.133638, 10));
- pbns.push_back(std::pair<double, size_t>(0.137798, 9));
- pbns.push_back(std::pair<double, size_t>(0.149798, 10));
- pbns.push_back(std::pair<double, size_t>(0.155421, 11));
- pbns.push_back(std::pair<double, size_t>(0.158443, 12));
- pbns.push_back(std::pair<double, size_t>(0.176956, 13));
- pbns.push_back(std::pair<double, size_t>(0.183234, 12));
- pbns.push_back(std::pair<double, size_t>(0.191069, 13));
- pbns.push_back(std::pair<double, size_t>(0.191333, 14));
- pbns.push_back(std::pair<double, size_t>(0.191836, 15));
- pbns.push_back(std::pair<double, size_t>(0.192675, 16));
- pbns.push_back(std::pair<double, size_t>(0.208564, 17));
- pbns.push_back(std::pair<double, size_t>(0.218425, 18));
- pbns.push_back(std::pair<double, size_t>(0.219902, 17));
- pbns.push_back(std::pair<double, size_t>(0.23233, 16));
- pbns.push_back(std::pair<double, size_t>(0.234558, 17));
- pbns.push_back(std::pair<double, size_t>(0.237166, 16));
- pbns.push_back(std::pair<double, size_t>(0.247352, 17));
- pbns.push_back(std::pair<double, size_t>(0.267421, 18));
- pbns.push_back(std::pair<double, size_t>(0.268093, 19));
- pbns.push_back(std::pair<double, size_t>(0.278734, 18));
- pbns.push_back(std::pair<double, size_t>(0.284722, 19));
- pbns.push_back(std::pair<double, size_t>(0.284998, 20));
- pbns.push_back(std::pair<double, size_t>(0.294069, 21));
- pbns.push_back(std::pair<double, size_t>(0.306293, 22));
- pbns.push_back(std::pair<double, size_t>(0.322361, 21));
- pbns.push_back(std::pair<double, size_t>(0.323152, 22));
- pbns.push_back(std::pair<double, size_t>(0.371021, 23));
- pbns.push_back(std::pair<double, size_t>(0.372395, 24));
- pbns.push_back(std::pair<double, size_t>(0.387744, 25));
- pbns.push_back(std::pair<double, size_t>(0.435537, 26));
- pbns.push_back(std::pair<double, size_t>(0.462911, 25));
- pbns.push_back(std::pair<double, size_t>(0.483569, 26));
- pbns.push_back(std::pair<double, size_t>(0.489209, 25));
- pbns.push_back(std::pair<double, size_t>(0.517115, 24));
- pbns.push_back(std::pair<double, size_t>(0.522197, 23));
- pbns.push_back(std::pair<double, size_t>(0.532665, 22));
- pbns.push_back(std::pair<double, size_t>(0.545262, 23));
- pbns.push_back(std::pair<double, size_t>(0.587227, 22));
- pbns.push_back(std::pair<double, size_t>(0.593036, 23));
- pbns.push_back(std::pair<double, size_t>(0.602647, 24));
- pbns.push_back(std::pair<double, size_t>(0.605044, 25));
- pbns.push_back(std::pair<double, size_t>(0.621962, 24));
- pbns.push_back(std::pair<double, size_t>(0.629449, 23));
- pbns.push_back(std::pair<double, size_t>(0.636719, 22));
- pbns.push_back(std::pair<double, size_t>(0.64957, 21));
- pbns.push_back(std::pair<double, size_t>(0.650781, 22));
- pbns.push_back(std::pair<double, size_t>(0.654951, 23));
- pbns.push_back(std::pair<double, size_t>(0.683489, 24));
- pbns.push_back(std::pair<double, size_t>(0.687172, 23));
- pbns.push_back(std::pair<double, size_t>(0.69703, 22));
- pbns.push_back(std::pair<double, size_t>(0.701174, 21));
- pbns.push_back(std::pair<double, size_t>(0.717623, 22));
- pbns.push_back(std::pair<double, size_t>(0.722023, 21));
- pbns.push_back(std::pair<double, size_t>(0.722298, 20));
- pbns.push_back(std::pair<double, size_t>(0.725347, 19));
- pbns.push_back(std::pair<double, size_t>(0.73071, 18));
- pbns.push_back(std::pair<double, size_t>(0.758355, 17));
- pbns.push_back(std::pair<double, size_t>(0.770913, 18));
- pbns.push_back(std::pair<double, size_t>(0.790833, 17));
- pbns.push_back(std::pair<double, size_t>(0.821211, 16));
- pbns.push_back(std::pair<double, size_t>(0.849305, 17));
- pbns.push_back(std::pair<double, size_t>(0.853669, 16));
- pbns.push_back(std::pair<double, size_t>(0.866659, 15));
- pbns.push_back(std::pair<double, size_t>(0.872896, 16));
- pbns.push_back(std::pair<double, size_t>(0.889597, 15));
- pbns.push_back(std::pair<double, size_t>(0.900231, 14));
- pbns.push_back(std::pair<double, size_t>(0.903847, 13));
- pbns.push_back(std::pair<double, size_t>(0.906299, 12));
- pbns.push_back(std::pair<double, size_t>(0.910852, 11));
- pbns.push_back(std::pair<double, size_t>(0.93453, 10));
- pbns.push_back(std::pair<double, size_t>(0.944757, 9));
- pbns.push_back(std::pair<double, size_t>(0.947812, 8));
- pbns.push_back(std::pair<double, size_t>(0.959154, 7));
- pbns.push_back(std::pair<double, size_t>(0.975654, 6));
- pbns.push_back(std::pair<double, size_t>(0.976719, 5));
- pbns.push_back(std::pair<double, size_t>(0.977343, 4));
- pbns.push_back(std::pair<double, size_t>(0.980129, 3));
- pbns.push_back(std::pair<double, size_t>(0.987842, 2));
- pbns.push_back(std::pair<double, size_t>(0.990127, 1));
- pbns.push_back(std::pair<double, size_t>(0.994537, 0));
+ std::vector<std::pair<double, size_t> > pbns{ {0.0290362, 1}, {0.0307676, 2}, {0.0366312, 3}, {0.0544614, 4},
+ {0.0920033, 5}, {0.104599, 6}, {0.114718, 7}, {0.117379, 8},
+ {0.123493, 9}, {0.133638, 10}, {0.137798, 9}, {0.149798, 10},
+ {0.155421, 11}, {0.158443, 12}, {0.176956, 13}, {0.183234, 12},
+ {0.191069, 13}, {0.191333, 14}, {0.191836, 15}, {0.192675, 16},
+ {0.208564, 17}, {0.218425, 18}, {0.219902, 17}, {0.23233, 16},
+ {0.234558, 17}, {0.237166, 16}, {0.247352, 17}, {0.267421, 18},
+ {0.268093, 19}, {0.278734, 18}, {0.284722, 19}, {0.284998, 20},
+ {0.294069, 21}, {0.306293, 22}, {0.322361, 21}, {0.323152, 22},
+ {0.371021, 23}, {0.372395, 24}, {0.387744, 25}, {0.435537, 26},
+ {0.462911, 25}, {0.483569, 26}, {0.489209, 25}, {0.517115, 24},
+ {0.522197, 23}, {0.532665, 22}, {0.545262, 23}, {0.587227, 22},
+ {0.593036, 23}, {0.602647, 24}, {0.605044, 25}, {0.621962, 24},
+ {0.629449, 23}, {0.636719, 22}, {0.64957, 21}, {0.650781, 22},
+ {0.654951, 23}, {0.683489, 24}, {0.687172, 23}, {0.69703, 22},
+ {0.701174, 21}, {0.717623, 22}, {0.722023, 21}, {0.722298, 20},
+ {0.725347, 19}, {0.73071, 18}, {0.758355, 17}, {0.770913, 18},
+ {0.790833, 17}, {0.821211, 16}, {0.849305, 17}, {0.853669, 16},
+ {0.866659, 15}, {0.872896, 16}, {0.889597, 15}, {0.900231, 14},
+ {0.903847, 13}, {0.906299, 12}, {0.910852, 11}, {0.93453, 10},
+ {0.944757, 9}, {0.947812, 8}, {0.959154, 7}, {0.975654, 6},
+ {0.976719, 5}, {0.977343, 4}, {0.980129, 3}, {0.987842, 2},
+ {0.990127, 1}, {0.994537, 0} };
std::vector<std::pair<double, size_t> > pbns_new = p.compute_persistent_betti_numbers();
for (size_t i = 0; i != pbns.size(); ++i) {
@@ -262,17 +140,8 @@ BOOST_AUTO_TEST_CASE(check_compute_persistent_betti_numbers) {
BOOST_AUTO_TEST_CASE(check_k_n_n) {
Persistence_intervals p("data/file_with_diagram");
std::vector<double> knn = p.k_n_n(5);
- std::vector<double> knn_template;
- knn_template.push_back(1.04208);
- knn_template.push_back(1.00344);
- knn_template.push_back(0.979395);
- knn_template.push_back(0.890643);
- knn_template.push_back(0.874769);
- knn_template.push_back(0.845787);
- knn_template.push_back(0.819713);
- knn_template.push_back(0.803984);
- knn_template.push_back(0.799864);
- knn_template.push_back(0.786945);
+ std::vector<double> knn_template{1.04208, 1.00344, 0.979395, 0.890643, 0.874769,
+ 0.845787, 0.819713, 0.803984, 0.799864, 0.786945};
for (size_t i = 0; i != knn.size(); ++i) {
GUDHI_TEST_FLOAT_EQUALITY_CHECK(knn[i], knn_template[i], Gudhi::Persistence_representations::epsi);
diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
index 944b6d35..0f1876d0 100644
--- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
+++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
@@ -600,8 +600,10 @@ class Persistent_cohomology {
* @return A vector of Betti numbers.
*/
std::vector<int> betti_numbers() const {
+ // Don't allocate a vector of negative size for an empty complex
+ int siz = std::max(dim_max_, 0);
// Init Betti numbers vector with zeros until Simplicial complex dimension
- std::vector<int> betti_numbers(dim_max_, 0);
+ std::vector<int> betti_numbers(siz);
for (auto pair : persistent_pairs_) {
// Count never ended persistence intervals
@@ -639,8 +641,10 @@ class Persistent_cohomology {
* @return A vector of persistent Betti numbers.
*/
std::vector<int> persistent_betti_numbers(Filtration_value from, Filtration_value to) const {
+ // Don't allocate a vector of negative size for an empty complex
+ int siz = std::max(dim_max_, 0);
// Init Betti numbers vector with zeros until Simplicial complex dimension
- std::vector<int> betti_numbers(dim_max_, 0);
+ std::vector<int> betti_numbers(siz);
for (auto pair : persistent_pairs_) {
// Count persistence intervals that covers the given interval
// null_simplex test : if the function is called with to=+infinity, we still get something useful. And it will
diff --git a/src/Persistent_cohomology/test/CMakeLists.txt b/src/Persistent_cohomology/test/CMakeLists.txt
index f8baf861..64669c4e 100644
--- a/src/Persistent_cohomology/test/CMakeLists.txt
+++ b/src/Persistent_cohomology/test/CMakeLists.txt
@@ -1,11 +1,9 @@
project(Persistent_cohomology_tests)
-include(GUDHI_test_coverage)
+include(GUDHI_boost_test)
add_executable ( Persistent_cohomology_test_unit persistent_cohomology_unit_test.cpp )
-target_link_libraries(Persistent_cohomology_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
add_executable ( Persistent_cohomology_test_betti_numbers betti_numbers_unit_test.cpp )
-target_link_libraries(Persistent_cohomology_test_betti_numbers ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Persistent_cohomology_test_unit ${TBB_LIBRARIES})
target_link_libraries(Persistent_cohomology_test_betti_numbers ${TBB_LIBRARIES})
@@ -16,13 +14,12 @@ file(COPY "${CMAKE_SOURCE_DIR}/src/Persistent_cohomology/test/simplex_tree_file_
DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
# Unitary tests
-gudhi_add_coverage_test(Persistent_cohomology_test_unit)
-gudhi_add_coverage_test(Persistent_cohomology_test_betti_numbers)
+gudhi_add_boost_test(Persistent_cohomology_test_unit)
+gudhi_add_boost_test(Persistent_cohomology_test_betti_numbers)
if(GMPXX_FOUND AND GMP_FOUND)
add_executable ( Persistent_cohomology_test_unit_multi_field persistent_cohomology_unit_test_multi_field.cpp )
- target_link_libraries(Persistent_cohomology_test_unit_multi_field
- ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY} ${GMPXX_LIBRARIES} ${GMP_LIBRARIES})
+ target_link_libraries(Persistent_cohomology_test_unit_multi_field ${GMPXX_LIBRARIES} ${GMP_LIBRARIES})
if (TBB_FOUND)
target_link_libraries(Persistent_cohomology_test_unit_multi_field ${TBB_LIBRARIES})
endif(TBB_FOUND)
@@ -31,7 +28,7 @@ if(GMPXX_FOUND AND GMP_FOUND)
file(COPY "${CMAKE_SOURCE_DIR}/src/Persistent_cohomology/test/simplex_tree_file_for_multi_field_unit_test.txt"
DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
# Unitary tests
- gudhi_add_coverage_test(Persistent_cohomology_test_unit_multi_field)
+ gudhi_add_boost_test(Persistent_cohomology_test_unit_multi_field)
endif(GMPXX_FOUND AND GMP_FOUND)
diff --git a/src/Persistent_cohomology/test/betti_numbers_unit_test.cpp b/src/Persistent_cohomology/test/betti_numbers_unit_test.cpp
index 0a08d200..b9f11607 100644
--- a/src/Persistent_cohomology/test/betti_numbers_unit_test.cpp
+++ b/src/Persistent_cohomology/test/betti_numbers_unit_test.cpp
@@ -284,4 +284,13 @@ BOOST_AUTO_TEST_CASE( betti_numbers )
auto intervals_in_dimension_2 = pcoh.intervals_in_dimension(2);
std::cout << "intervals_in_dimension_2.size() = " << intervals_in_dimension_2.size() << std::endl;
BOOST_CHECK(intervals_in_dimension_2.size() == 0);
+
+ std::cout << "EMPTY COMPLEX" << std::endl;
+ Simplex_tree empty;
+ empty.initialize_filtration();
+ St_persistence pcoh_empty(empty, false);
+ pcoh_empty.init_coefficients(2);
+ pcoh_empty.compute_persistent_cohomology();
+ BOOST_CHECK(pcoh_empty.betti_numbers().size() == 0);
+ BOOST_CHECK(pcoh_empty.persistent_betti_numbers(0,1).size() == 0);
}
diff --git a/src/Rips_complex/example/example_rips_complex_from_csv_distance_matrix_file.cpp b/src/Rips_complex/example/example_rips_complex_from_csv_distance_matrix_file.cpp
index 9e182f1e..b7040453 100644
--- a/src/Rips_complex/example/example_rips_complex_from_csv_distance_matrix_file.cpp
+++ b/src/Rips_complex/example/example_rips_complex_from_csv_distance_matrix_file.cpp
@@ -35,19 +35,19 @@ int main(int argc, char **argv) {
Distance_matrix distances = Gudhi::read_lower_triangular_matrix_from_csv_file<Filtration_value>(csv_file_name);
Rips_complex rips_complex_from_file(distances, threshold);
- std::streambuf* streambufffer;
+ std::streambuf* streambuffer;
std::ofstream ouput_file_stream;
if (argc == 5) {
ouput_file_stream.open(std::string(argv[4]));
- streambufffer = ouput_file_stream.rdbuf();
+ streambuffer = ouput_file_stream.rdbuf();
} else {
- streambufffer = std::cout.rdbuf();
+ streambuffer = std::cout.rdbuf();
}
Simplex_tree stree;
rips_complex_from_file.create_complex(stree, dim_max);
- std::ostream output_stream(streambufffer);
+ std::ostream output_stream(streambuffer);
// ----------------------------------------------------------------------------
// Display information about the Rips complex
diff --git a/src/Rips_complex/example/example_rips_complex_from_off_file.cpp b/src/Rips_complex/example/example_rips_complex_from_off_file.cpp
index de2e4ea4..36b468a7 100644
--- a/src/Rips_complex/example/example_rips_complex_from_off_file.cpp
+++ b/src/Rips_complex/example/example_rips_complex_from_off_file.cpp
@@ -34,19 +34,19 @@ int main(int argc, char **argv) {
Gudhi::Points_off_reader<Point> off_reader(off_file_name);
Rips_complex rips_complex_from_file(off_reader.get_point_cloud(), threshold, Gudhi::Euclidean_distance());
- std::streambuf* streambufffer;
+ std::streambuf* streambuffer;
std::ofstream ouput_file_stream;
if (argc == 5) {
ouput_file_stream.open(std::string(argv[4]));
- streambufffer = ouput_file_stream.rdbuf();
+ streambuffer = ouput_file_stream.rdbuf();
} else {
- streambufffer = std::cout.rdbuf();
+ streambuffer = std::cout.rdbuf();
}
Simplex_tree stree;
rips_complex_from_file.create_complex(stree, dim_max);
- std::ostream output_stream(streambufffer);
+ std::ostream output_stream(streambuffer);
// ----------------------------------------------------------------------------
// Display information about the Rips complex
diff --git a/src/Rips_complex/test/CMakeLists.txt b/src/Rips_complex/test/CMakeLists.txt
index 745d953c..b359584e 100644
--- a/src/Rips_complex/test/CMakeLists.txt
+++ b/src/Rips_complex/test/CMakeLists.txt
@@ -1,9 +1,8 @@
project(Rips_complex_tests)
-include(GUDHI_test_coverage)
+include(GUDHI_boost_test)
add_executable ( Rips_complex_test_unit test_rips_complex.cpp )
-target_link_libraries(Rips_complex_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Rips_complex_test_unit ${TBB_LIBRARIES})
endif()
@@ -12,4 +11,4 @@ endif()
file(COPY "${CMAKE_SOURCE_DIR}/data/points/alphacomplexdoc.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
file(COPY "${CMAKE_SOURCE_DIR}/data/distance_matrix/full_square_distance_matrix.csv" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
-gudhi_add_coverage_test(Rips_complex_test_unit)
+gudhi_add_boost_test(Rips_complex_test_unit)
diff --git a/src/Rips_complex/utilities/ripscomplex.md b/src/Rips_complex/utilities/ripscomplex.md
index 03838085..61f31e3c 100644
--- a/src/Rips_complex/utilities/ripscomplex.md
+++ b/src/Rips_complex/utilities/ripscomplex.md
@@ -99,6 +99,7 @@ where `dim` is the dimension of the homological feature, `birth` and `death` are
* `-h [ --help ]` Produce help message
* `-o [ --output-file ]` Name of file in which the persistence diagram is written. Default print in standard output.
+* `-r [ --max-edge-length ]` (default = inf) Maximal length of an edge for the Rips complex construction.
* `-e [ --approximation ]` (default = .5) Epsilon, where the sparse Rips complex is a (1+epsilon)/(1-epsilon)-approximation of the Rips complex.
* `-d [ --cpx-dimension ]` (default = INT_MAX) Maximal dimension of the Rips complex we want to compute.
* `-p [ --field-charac ]` (default = 11) Characteristic p of the coefficient field Z/pZ for computing homology.
diff --git a/src/Rips_complex/utilities/sparse_rips_persistence.cpp b/src/Rips_complex/utilities/sparse_rips_persistence.cpp
index 1a86eafe..cefd8a67 100644
--- a/src/Rips_complex/utilities/sparse_rips_persistence.cpp
+++ b/src/Rips_complex/utilities/sparse_rips_persistence.cpp
@@ -28,21 +28,24 @@ using Persistent_cohomology = Gudhi::persistent_cohomology::Persistent_cohomolog
using Point = std::vector<double>;
using Points_off_reader = Gudhi::Points_off_reader<Point>;
-void program_options(int argc, char* argv[], std::string& off_file_points, std::string& filediag, double& epsilon,
+void program_options(int argc, char* argv[], std::string& off_file_points, std::string& filediag,
+ Filtration_value& threshold, double& epsilon,
int& dim_max, int& p, Filtration_value& min_persistence);
int main(int argc, char* argv[]) {
std::string off_file_points;
std::string filediag;
+ Filtration_value threshold;
double epsilon;
int dim_max;
int p;
Filtration_value min_persistence;
- program_options(argc, argv, off_file_points, filediag, epsilon, dim_max, p, min_persistence);
+ program_options(argc, argv, off_file_points, filediag, threshold, epsilon, dim_max, p, min_persistence);
Points_off_reader off_reader(off_file_points);
- Sparse_rips sparse_rips(off_reader.get_point_cloud(), Gudhi::Euclidean_distance(), epsilon);
+ Sparse_rips sparse_rips(off_reader.get_point_cloud(), Gudhi::Euclidean_distance(), epsilon,
+ -std::numeric_limits<Filtration_value>::infinity(), threshold);
// Construct the Rips complex in a Simplex Tree
Simplex_tree simplex_tree;
@@ -73,7 +76,8 @@ int main(int argc, char* argv[]) {
return 0;
}
-void program_options(int argc, char* argv[], std::string& off_file_points, std::string& filediag, double& epsilon,
+void program_options(int argc, char* argv[], std::string& off_file_points, std::string& filediag,
+ Filtration_value& threshold, double& epsilon,
int& dim_max, int& p, Filtration_value& min_persistence) {
namespace po = boost::program_options;
po::options_description hidden("Hidden options");
@@ -84,6 +88,9 @@ void program_options(int argc, char* argv[], std::string& off_file_points, std::
visible.add_options()("help,h", "produce help message")(
"output-file,o", po::value<std::string>(&filediag)->default_value(std::string()),
"Name of file in which the persistence diagram is written. Default print in std::cout")(
+ "max-edge-length,r",
+ po::value<Filtration_value>(&threshold)->default_value(std::numeric_limits<Filtration_value>::infinity()),
+ "Maximal length of an edge for the Rips complex construction.")(
"approximation,e", po::value<double>(&epsilon)->default_value(.5),
"Epsilon, where the sparse Rips complex is a (1+epsilon)-approximation of the Rips complex.")(
"cpx-dimension,d", po::value<int>(&dim_max)->default_value(std::numeric_limits<int>::max()),
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h
index fafdb01c..76608008 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h
@@ -1378,7 +1378,7 @@ class Simplex_tree {
private:
bool rec_prune_above_filtration(Siblings* sib, Filtration_value filt) {
auto&& list = sib->members();
- auto last = std::remove_if(list.begin(), list.end(), [=](Dit_value_t& simplex) {
+ auto last = std::remove_if(list.begin(), list.end(), [this,filt](Dit_value_t& simplex) {
if (simplex.second.filtration() <= filt) return false;
if (has_children(&simplex)) rec_delete(simplex.second.children());
// dimension may need to be lowered
diff --git a/src/Simplex_tree/test/CMakeLists.txt b/src/Simplex_tree/test/CMakeLists.txt
index 5bea3938..8b9163f5 100644
--- a/src/Simplex_tree/test/CMakeLists.txt
+++ b/src/Simplex_tree/test/CMakeLists.txt
@@ -1,38 +1,30 @@
project(Simplex_tree_tests)
-include(GUDHI_test_coverage)
+include(GUDHI_boost_test)
# Do not forget to copy test files in current binary dir
file(COPY "simplex_tree_for_unit_test.txt" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
add_executable ( Simplex_tree_test_unit simplex_tree_unit_test.cpp )
-target_link_libraries(Simplex_tree_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Simplex_tree_test_unit ${TBB_LIBRARIES})
endif()
-
-gudhi_add_coverage_test(Simplex_tree_test_unit)
+gudhi_add_boost_test(Simplex_tree_test_unit)
add_executable ( Simplex_tree_remove_test_unit simplex_tree_remove_unit_test.cpp )
-target_link_libraries(Simplex_tree_remove_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Simplex_tree_remove_test_unit ${TBB_LIBRARIES})
endif()
-
-gudhi_add_coverage_test(Simplex_tree_remove_test_unit)
+gudhi_add_boost_test(Simplex_tree_remove_test_unit)
add_executable ( Simplex_tree_iostream_operator_test_unit simplex_tree_iostream_operator_unit_test.cpp )
-target_link_libraries(Simplex_tree_iostream_operator_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Simplex_tree_iostream_operator_test_unit ${TBB_LIBRARIES})
endif()
-
-gudhi_add_coverage_test(Simplex_tree_iostream_operator_test_unit)
+gudhi_add_boost_test(Simplex_tree_iostream_operator_test_unit)
add_executable ( Simplex_tree_ctor_and_move_test_unit simplex_tree_ctor_and_move_unit_test.cpp )
-target_link_libraries(Simplex_tree_ctor_and_move_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Simplex_tree_ctor_and_move_test_unit ${TBB_LIBRARIES})
endif()
-
-gudhi_add_coverage_test(Simplex_tree_ctor_and_move_test_unit)
+gudhi_add_boost_test(Simplex_tree_ctor_and_move_test_unit)
diff --git a/src/Skeleton_blocker/test/CMakeLists.txt b/src/Skeleton_blocker/test/CMakeLists.txt
index 19c65871..24b6c11e 100644
--- a/src/Skeleton_blocker/test/CMakeLists.txt
+++ b/src/Skeleton_blocker/test/CMakeLists.txt
@@ -1,17 +1,14 @@
project(Skeleton_blocker_tests)
-include(GUDHI_test_coverage)
+include(GUDHI_boost_test)
add_executable ( Skeleton_blocker_test_unit test_skeleton_blocker_complex.cpp )
-target_link_libraries(Skeleton_blocker_test_unit ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
add_executable ( Skeleton_blocker_test_geometric_complex test_skeleton_blocker_geometric_complex.cpp )
-target_link_libraries(Skeleton_blocker_test_geometric_complex ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
add_executable ( Skeleton_blocker_test_simplifiable test_skeleton_blocker_simplifiable.cpp )
-target_link_libraries(Skeleton_blocker_test_simplifiable ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
# Do not forget to copy test files in current binary dir
file(COPY "test2.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
-gudhi_add_coverage_test(Skeleton_blocker_test_unit)
-gudhi_add_coverage_test(Skeleton_blocker_test_geometric_complex)
-gudhi_add_coverage_test(Skeleton_blocker_test_simplifiable)
+gudhi_add_boost_test(Skeleton_blocker_test_unit)
+gudhi_add_boost_test(Skeleton_blocker_test_geometric_complex)
+gudhi_add_boost_test(Skeleton_blocker_test_simplifiable)
diff --git a/src/Spatial_searching/include/gudhi/Kd_tree_search.h b/src/Spatial_searching/include/gudhi/Kd_tree_search.h
index fedbb32e..87969dd9 100644
--- a/src/Spatial_searching/include/gudhi/Kd_tree_search.h
+++ b/src/Spatial_searching/include/gudhi/Kd_tree_search.h
@@ -30,11 +30,11 @@
// Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10
#if CGAL_VERSION_NR < 1041101000
-# error Alpha_complex_3d is only available for CGAL >= 4.11
+# error Kd_tree_search is only available for CGAL >= 4.11
#endif
#if !EIGEN_VERSION_AT_LEAST(3,1,0)
-# error Alpha_complex_3d is only available for Eigen3 >= 3.1.0 installed with CGAL
+# error Kd_tree_search is only available for Eigen3 >= 3.1.0 installed with CGAL
#endif
namespace Gudhi {
diff --git a/src/Spatial_searching/test/CMakeLists.txt b/src/Spatial_searching/test/CMakeLists.txt
index 18f7c6b8..a6c23951 100644
--- a/src/Spatial_searching/test/CMakeLists.txt
+++ b/src/Spatial_searching/test/CMakeLists.txt
@@ -1,11 +1,8 @@
project(Spatial_searching_tests)
if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- include(GUDHI_test_coverage)
-
+ include(GUDHI_boost_test)
add_executable( Spatial_searching_test_Kd_tree_search test_Kd_tree_search.cpp )
- target_link_libraries(Spatial_searching_test_Kd_tree_search
- ${CGAL_LIBRARY} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
-
- gudhi_add_coverage_test(Spatial_searching_test_Kd_tree_search)
+ target_link_libraries(Spatial_searching_test_Kd_tree_search ${CGAL_LIBRARY})
+ gudhi_add_boost_test(Spatial_searching_test_Kd_tree_search)
endif ()
diff --git a/src/Subsampling/test/CMakeLists.txt b/src/Subsampling/test/CMakeLists.txt
index cf54788e..354021c1 100644
--- a/src/Subsampling/test/CMakeLists.txt
+++ b/src/Subsampling/test/CMakeLists.txt
@@ -1,18 +1,18 @@
project(Subsampling_tests)
if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- include(GUDHI_test_coverage)
+ include(GUDHI_boost_test)
add_executable( Subsampling_test_pick_n_random_points test_pick_n_random_points.cpp )
- target_link_libraries(Subsampling_test_pick_n_random_points ${CGAL_LIBRARY} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
+ target_link_libraries(Subsampling_test_pick_n_random_points ${CGAL_LIBRARY})
add_executable( Subsampling_test_choose_n_farthest_points test_choose_n_farthest_points.cpp )
- target_link_libraries(Subsampling_test_choose_n_farthest_points ${CGAL_LIBRARY} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
+ target_link_libraries(Subsampling_test_choose_n_farthest_points ${CGAL_LIBRARY})
add_executable(Subsampling_test_sparsify_point_set test_sparsify_point_set.cpp)
- target_link_libraries(Subsampling_test_sparsify_point_set ${CGAL_LIBRARY} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
+ target_link_libraries(Subsampling_test_sparsify_point_set ${CGAL_LIBRARY})
- gudhi_add_coverage_test(Subsampling_test_pick_n_random_points)
- gudhi_add_coverage_test(Subsampling_test_choose_n_farthest_points)
- gudhi_add_coverage_test(Subsampling_test_sparsify_point_set)
+ gudhi_add_boost_test(Subsampling_test_pick_n_random_points)
+ gudhi_add_boost_test(Subsampling_test_choose_n_farthest_points)
+ gudhi_add_boost_test(Subsampling_test_sparsify_point_set)
endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
diff --git a/src/Tangential_complex/include/gudhi/Tangential_complex.h b/src/Tangential_complex/include/gudhi/Tangential_complex.h
index f59476b1..f058fa9f 100644
--- a/src/Tangential_complex/include/gudhi/Tangential_complex.h
+++ b/src/Tangential_complex/include/gudhi/Tangential_complex.h
@@ -67,11 +67,11 @@
// Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10
#if CGAL_VERSION_NR < 1041101000
-# error Alpha_complex_3d is only available for CGAL >= 4.11
+# error Tangential_complex is only available for CGAL >= 4.11
#endif
#if !EIGEN_VERSION_AT_LEAST(3,1,0)
-# error Alpha_complex_3d is only available for Eigen3 >= 3.1.0 installed with CGAL
+# error Tangential_complex is only available for Eigen3 >= 3.1.0 installed with CGAL
#endif
namespace sps = Gudhi::spatial_searching;
diff --git a/src/Tangential_complex/test/CMakeLists.txt b/src/Tangential_complex/test/CMakeLists.txt
index ae17a286..2207d67c 100644
--- a/src/Tangential_complex/test/CMakeLists.txt
+++ b/src/Tangential_complex/test/CMakeLists.txt
@@ -1,13 +1,13 @@
project(Tangential_complex_tests)
if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- include(GUDHI_test_coverage)
+ include(GUDHI_boost_test)
add_executable( Tangential_complex_test_TC test_tangential_complex.cpp )
- target_link_libraries(Tangential_complex_test_TC ${CGAL_LIBRARY} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
+ target_link_libraries(Tangential_complex_test_TC ${CGAL_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Tangential_complex_test_TC ${TBB_LIBRARIES})
endif()
- gudhi_add_coverage_test(Tangential_complex_test_TC)
+ gudhi_add_boost_test(Tangential_complex_test_TC)
endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
diff --git a/src/Toplex_map/test/CMakeLists.txt b/src/Toplex_map/test/CMakeLists.txt
index 59517db5..2997584d 100644
--- a/src/Toplex_map/test/CMakeLists.txt
+++ b/src/Toplex_map/test/CMakeLists.txt
@@ -1,11 +1,9 @@
project(Toplex_map_tests)
-include(GUDHI_test_coverage)
+include(GUDHI_boost_test)
add_executable( Toplex_map_unit_test toplex_map_unit_test.cpp )
-target_link_libraries(Toplex_map_unit_test ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
-gudhi_add_coverage_test(Toplex_map_unit_test)
+gudhi_add_boost_test(Toplex_map_unit_test)
add_executable( Lazy_toplex_map_unit_test lazy_toplex_map_unit_test.cpp )
-target_link_libraries(Lazy_toplex_map_unit_test ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
-gudhi_add_coverage_test(Lazy_toplex_map_unit_test)
+gudhi_add_boost_test(Lazy_toplex_map_unit_test)
diff --git a/src/Witness_complex/include/gudhi/Euclidean_strong_witness_complex.h b/src/Witness_complex/include/gudhi/Euclidean_strong_witness_complex.h
index 7d3c2d6d..4d5e73f2 100644
--- a/src/Witness_complex/include/gudhi/Euclidean_strong_witness_complex.h
+++ b/src/Witness_complex/include/gudhi/Euclidean_strong_witness_complex.h
@@ -25,11 +25,11 @@
// Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10
#if CGAL_VERSION_NR < 1041101000
-# error Alpha_complex_3d is only available for CGAL >= 4.11
+# error Euclidean_strong_witness_complex is only available for CGAL >= 4.11
#endif
#if !EIGEN_VERSION_AT_LEAST(3,1,0)
-# error Alpha_complex_3d is only available for Eigen3 >= 3.1.0 installed with CGAL
+# error Euclidean_strong_witness_complex is only available for Eigen3 >= 3.1.0 installed with CGAL
#endif
namespace Gudhi {
diff --git a/src/Witness_complex/include/gudhi/Euclidean_witness_complex.h b/src/Witness_complex/include/gudhi/Euclidean_witness_complex.h
index 21682ec4..09cb7ec2 100644
--- a/src/Witness_complex/include/gudhi/Euclidean_witness_complex.h
+++ b/src/Witness_complex/include/gudhi/Euclidean_witness_complex.h
@@ -27,11 +27,11 @@
// Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10
#if CGAL_VERSION_NR < 1041101000
-# error Alpha_complex_3d is only available for CGAL >= 4.11
+# error Euclidean_witness_complex is only available for CGAL >= 4.11
#endif
#if !EIGEN_VERSION_AT_LEAST(3,1,0)
-# error Alpha_complex_3d is only available for Eigen3 >= 3.1.0 installed with CGAL
+# error Euclidean_witness_complex is only available for Eigen3 >= 3.1.0 installed with CGAL
#endif
namespace Gudhi {
diff --git a/src/Witness_complex/test/CMakeLists.txt b/src/Witness_complex/test/CMakeLists.txt
index 96188e46..690933aa 100644
--- a/src/Witness_complex/test/CMakeLists.txt
+++ b/src/Witness_complex/test/CMakeLists.txt
@@ -1,22 +1,20 @@
project(Witness_complex_tests)
-include(GUDHI_test_coverage)
+include(GUDHI_boost_test)
add_executable ( Witness_complex_test_simple_witness_complex test_simple_witness_complex.cpp )
-target_link_libraries(Witness_complex_test_simple_witness_complex ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Witness_complex_test_simple_witness_complex ${TBB_LIBRARIES})
endif(TBB_FOUND)
-gudhi_add_coverage_test(Witness_complex_test_simple_witness_complex)
+gudhi_add_boost_test(Witness_complex_test_simple_witness_complex)
# CGAL and Eigen3 are required for Euclidean version of Witness
if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
add_executable ( Witness_complex_test_euclidean_simple_witness_complex test_euclidean_simple_witness_complex.cpp )
- target_link_libraries(Witness_complex_test_euclidean_simple_witness_complex ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
if (TBB_FOUND)
target_link_libraries(Witness_complex_test_euclidean_simple_witness_complex ${TBB_LIBRARIES})
endif(TBB_FOUND)
- gudhi_add_coverage_test(Witness_complex_test_euclidean_simple_witness_complex)
+ gudhi_add_boost_test(Witness_complex_test_euclidean_simple_witness_complex)
endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
diff --git a/src/cmake/modules/GUDHI_boost_test.cmake b/src/cmake/modules/GUDHI_boost_test.cmake
new file mode 100644
index 00000000..c3b29883
--- /dev/null
+++ b/src/cmake/modules/GUDHI_boost_test.cmake
@@ -0,0 +1,26 @@
+if (WITH_GUDHI_BOOST_TEST_COVERAGE)
+ # Make CTest output XML coverage report - WITH_GUDHI_BOOST_TEST_COVERAGE must be set - default is OFF
+ if (GCOVR_PATH)
+ # for gcovr to make coverage reports - Corbera Jenkins plugin
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")
+ endif()
+ if (GPROF_PATH)
+ # for gprof to make coverage reports - Jenkins
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pg")
+ endif()
+ set(GUDHI_UT_LOG_FORMAT "--log_format=XML")
+ set(GUDHI_UT_LOG_SINK "--log_sink=${CMAKE_BINARY_DIR}/${unitary_test}_UT.xml")
+ set(GUDHI_UT_LOG_LEVEL "--log_level=test_suite")
+ set(GUDHI_UT_REPORT_LEVEL "--report_level=no")
+else (WITH_GUDHI_BOOST_TEST_COVERAGE)
+ # Make CTest more verbose and color output
+ set(GUDHI_UT_LOG_LEVEL "--color_output")
+ set(GUDHI_UT_REPORT_LEVEL "--report_level=detailed")
+endif(WITH_GUDHI_BOOST_TEST_COVERAGE)
+
+function(gudhi_add_boost_test unitary_test)
+ target_link_libraries(${unitary_test} ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
+ add_test(NAME ${unitary_test} COMMAND $<TARGET_FILE:${unitary_test}>
+ ${GUDHI_UT_LOG_FORMAT} ${GUDHI_UT_LOG_SINK}
+ ${GUDHI_UT_LOG_LEVEL} ${GUDHI_UT_REPORT_LEVEL})
+endfunction()
diff --git a/src/cmake/modules/GUDHI_compilation_flags.cmake b/src/cmake/modules/GUDHI_compilation_flags.cmake
index 6cd2614d..34c2e065 100644
--- a/src/cmake/modules/GUDHI_compilation_flags.cmake
+++ b/src/cmake/modules/GUDHI_compilation_flags.cmake
@@ -73,3 +73,5 @@ if(CMAKE_BUILD_TYPE MATCHES Debug)
else()
message("++ Release compilation flags are: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_RELEASE}")
endif()
+
+option(WITH_GUDHI_BOOST_TEST_COVERAGE "Report xml coverage files on boost tests" OFF)
diff --git a/src/cmake/modules/GUDHI_test_coverage.cmake b/src/cmake/modules/GUDHI_test_coverage.cmake
deleted file mode 100644
index bea5b2d6..00000000
--- a/src/cmake/modules/GUDHI_test_coverage.cmake
+++ /dev/null
@@ -1,26 +0,0 @@
-
-if (GCOVR_PATH)
- # for gcovr to make coverage reports - Corbera Jenkins plugin
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")
-endif()
-if (GPROF_PATH)
- # for gprof to make coverage reports - Jenkins
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pg")
-endif()
-
-if (DEBUG_TRACES)
- # Make CTest more verbose with DEBUG_TRACES - no XML output
- set(GUDHI_UT_LOG_LEVEL "--log_level=all")
- set(GUDHI_UT_REPORT_LEVEL "--report_level=detailed")
-else()
- set(GUDHI_UT_LOG_FORMAT "--log_format=XML")
- set(GUDHI_UT_LOG_SINK "--log_sink=${CMAKE_BINARY_DIR}/${unitary_test}_UT.xml")
- set(GUDHI_UT_LOG_LEVEL "--log_level=test_suite")
- set(GUDHI_UT_REPORT_LEVEL "--report_level=no")
-endif()
-
-function(gudhi_add_coverage_test unitary_test)
- add_test(NAME ${unitary_test} COMMAND $<TARGET_FILE:${unitary_test}>
- ${GUDHI_UT_LOG_FORMAT} ${GUDHI_UT_LOG_SINK}
- ${GUDHI_UT_LOG_LEVEL} ${GUDHI_UT_REPORT_LEVEL})
-endfunction()
diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake
index 360a230b..24a34150 100644
--- a/src/cmake/modules/GUDHI_third_party_libraries.cmake
+++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake
@@ -125,6 +125,8 @@ if( PYTHONINTERP_FOUND )
find_python_module("numpy")
find_python_module("scipy")
find_python_module("sphinx")
+ find_python_module("sklearn")
+ find_python_module("ot")
endif()
if(NOT GUDHI_PYTHON_PATH)
diff --git a/src/cmake/modules/GUDHI_user_version_target.cmake b/src/cmake/modules/GUDHI_user_version_target.cmake
index f75fb19e..4fa74330 100644
--- a/src/cmake/modules/GUDHI_user_version_target.cmake
+++ b/src/cmake/modules/GUDHI_user_version_target.cmake
@@ -18,12 +18,20 @@ foreach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
set(GUDHI_DOXYGEN_IMAGE_PATH "${GUDHI_DOXYGEN_IMAGE_PATH} doc/${GUDHI_MODULE}/ \\ \n")
endforeach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST})
-# Generate setup.py file to cythonize Gudhi - This file must be named setup.py by convention
+# Generate Doxyfile for Doxygen - cf. root CMakeLists.txt for explanation
configure_file(${CMAKE_SOURCE_DIR}/src/Doxyfile.in "${CMAKE_CURRENT_BINARY_DIR}/src/Doxyfile" @ONLY)
-
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy ${CMAKE_CURRENT_BINARY_DIR}/src/Doxyfile ${GUDHI_USER_VERSION_DIR}/Doxyfile)
+# Generate bib files for Doxygen - cf. root CMakeLists.txt for explanation
+string(TIMESTAMP GUDHI_VERSION_YEAR "%Y")
+configure_file(${CMAKE_SOURCE_DIR}/biblio/how_to_cite_gudhi.bib.in "${CMAKE_CURRENT_BINARY_DIR}/biblio/how_to_cite_gudhi.bib" @ONLY)
+file(COPY "${CMAKE_SOURCE_DIR}/biblio/how_to_cite_cgal.bib" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/biblio/")
+file(COPY "${CMAKE_SOURCE_DIR}/biblio/bibliography.bib" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/biblio/")
+# Copy biblio directory for user version
+add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
+ copy_directory ${CMAKE_CURRENT_BINARY_DIR}/biblio ${GUDHI_USER_VERSION_DIR}/biblio)
+
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy ${CMAKE_SOURCE_DIR}/Conventions.txt ${GUDHI_USER_VERSION_DIR}/Conventions.txt)
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
@@ -40,8 +48,6 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy ${CMAKE_SOURCE_DIR}/CMakeGUDHIVersion.txt ${GUDHI_USER_VERSION_DIR}/CMakeGUDHIVersion.txt)
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
- copy_directory ${CMAKE_SOURCE_DIR}/biblio ${GUDHI_USER_VERSION_DIR}/biblio)
-add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy_directory ${CMAKE_SOURCE_DIR}/${GUDHI_PYTHON_PATH} ${GUDHI_USER_VERSION_DIR}/python)
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy_directory ${CMAKE_SOURCE_DIR}/data ${GUDHI_USER_VERSION_DIR}/data)
diff --git a/src/common/doc/installation.h b/src/common/doc/installation.h
index 2e64bef8..ce2c5448 100644
--- a/src/common/doc/installation.h
+++ b/src/common/doc/installation.h
@@ -32,7 +32,10 @@ make \endverbatim
*
* \subsection testsuites Test suites
* To test your build, run the following command in a terminal:
- * \verbatim make test \endverbatim
+ * \verbatim make test \endverbatim
+ * `make test` is using <a href="https://cmake.org/cmake/help/latest/manual/ctest.1.html">Ctest</a> (CMake test driver
+ * program). If some of the tests are failing, please send us the result of the following command:
+ * \verbatim ctest --output-on-failure \endverbatim
*
* \subsection documentationgeneration Documentation
* To generate the documentation, <a target="_blank" href="http://www.doxygen.org/">Doxygen</a> is required.
@@ -71,7 +74,7 @@ make doxygen
* your operating system is detailed here http://doc.cgal.org/latest/Manual/installation.html
*
* The following examples/utilities require the <a target="_blank" href="http://www.cgal.org/">Computational Geometry Algorithms
- * Library</a> (CGAL \cite cgal:eb-15b) and will not be built if CGAL version 4.11.0 or higher is not installed:
+ * Library</a> (CGAL \cite cgal:eb-19b) and will not be built if CGAL version 4.11.0 or higher is not installed:
* \li <a href="_simplex_tree_2example_alpha_shapes_3_simplex_tree_from_off_file_8cpp-example.html">
* Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp</a>
* \li <a href="_witness_complex_2strong_witness_persistence_8cpp-example.html">
diff --git a/src/common/doc/main_page.md b/src/common/doc/main_page.md
index d8cbf97f..0b4bfb7a 100644
--- a/src/common/doc/main_page.md
+++ b/src/common/doc/main_page.md
@@ -43,8 +43,9 @@
The filtration value of each simplex is computed as the square of the circumradius of the simplex if the
circumsphere is empty (the simplex is then said to be Gabriel), and as the minimum of the filtration
values of the codimension 1 cofaces that make it not Gabriel otherwise.
- All simplices that have a filtration value strictly greater than a given alpha squared value are not inserted into
- the complex.<br>
+ All simplices that have a filtration value \f$ > \alpha^2 \f$ are removed from the Delaunay complex
+ when creating the simplicial complex if it is specified.<br>
+ For performances reasons, it is advised to use \ref cgal &ge; 5.0.0.
</td>
<td width="15%">
<b>Author:</b> Vincent Rouvreau<br>
diff --git a/src/common/include/gudhi/Unitary_tests_utils.h b/src/common/include/gudhi/Unitary_tests_utils.h
index 4ad4dae8..9b86460a 100644
--- a/src/common/include/gudhi/Unitary_tests_utils.h
+++ b/src/common/include/gudhi/Unitary_tests_utils.h
@@ -14,6 +14,7 @@
#include <iostream>
#include <limits> // for std::numeric_limits<>
+#include <cmath> // for std::fabs
template<typename FloatingType >
void GUDHI_TEST_FLOAT_EQUALITY_CHECK(FloatingType a, FloatingType b,
@@ -25,4 +26,15 @@ void GUDHI_TEST_FLOAT_EQUALITY_CHECK(FloatingType a, FloatingType b,
BOOST_CHECK(std::fabs(a - b) <= epsilon);
}
+// That's the usual x86 issue where a+b==a+b can return false (without any NaN) because one of them was stored in
+// memory (and thus rounded to 64 bits) while the other is still in a register (80 bits).
+template<typename FloatingType >
+FloatingType GUDHI_PROTECT_FLOAT(FloatingType value) {
+ volatile FloatingType protected_value = value;
+#ifdef DEBUG_TRACES
+ std::cout << "GUDHI_PROTECT_FLOAT - " << protected_value << std::endl;
+#endif
+ return protected_value;
+}
+
#endif // UNITARY_TESTS_UTILS_H_
diff --git a/src/common/include/gudhi/random_point_generators.h b/src/common/include/gudhi/random_point_generators.h
index fb69f832..9dd88ac4 100644
--- a/src/common/include/gudhi/random_point_generators.h
+++ b/src/common/include/gudhi/random_point_generators.h
@@ -21,7 +21,7 @@
// Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10
#if CGAL_VERSION_NR < 1041101000
-# error Alpha_complex_3d is only available for CGAL >= 4.11
+# error random_point_generators is only available for CGAL >= 4.11
#endif
namespace Gudhi {
diff --git a/src/common/include/gudhi/reader_utils.h b/src/common/include/gudhi/reader_utils.h
index 98335552..db31bf5c 100644
--- a/src/common/include/gudhi/reader_utils.h
+++ b/src/common/include/gudhi/reader_utils.h
@@ -293,6 +293,9 @@ Note: the function does not check that birth <= death.
**/
template <typename OutputIterator>
void read_persistence_intervals_and_dimension(std::string const& filename, OutputIterator out) {
+#ifdef DEBUG_TRACES
+ std::cout << "read_persistence_intervals_and_dimension - " << filename << std::endl;
+#endif // DEBUG_TRACES
std::ifstream in(filename);
if (!in.is_open()) {
std::string error_str("read_persistence_intervals_and_dimension - Unable to open file ");
@@ -307,6 +310,13 @@ void read_persistence_intervals_and_dimension(std::string const& filename, Outpu
if (line.length() != 0 && line[0] != '#') {
double numbers[4];
int n = sscanf(line.c_str(), "%lf %lf %lf %lf", &numbers[0], &numbers[1], &numbers[2], &numbers[3]);
+#ifdef DEBUG_TRACES
+ std::cout << "[" << n << "] = ";
+ for (int i = 0; i < n; i++) {
+ std::cout << numbers[i] << ",";
+ }
+ std::cout << std::endl;
+#endif // DEBUG_TRACES
if (n >= 2) {
int dim = (n >= 3 ? static_cast<int>(numbers[n - 3]) : -1);
*out++ = std::make_tuple(dim, numbers[n - 2], numbers[n - 1]);
diff --git a/src/common/test/CMakeLists.txt b/src/common/test/CMakeLists.txt
index 0b49fa1e..34de7398 100644
--- a/src/common/test/CMakeLists.txt
+++ b/src/common/test/CMakeLists.txt
@@ -1,15 +1,10 @@
project(Common_tests)
-include(GUDHI_test_coverage)
+include(GUDHI_boost_test)
add_executable ( Common_test_points_off_reader test_points_off_reader.cpp )
-target_link_libraries(Common_test_points_off_reader ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
-
add_executable ( Common_test_distance_matrix_reader test_distance_matrix_reader.cpp )
-target_link_libraries(Common_test_distance_matrix_reader ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
-
add_executable ( Common_test_persistence_intervals_reader test_persistence_intervals_reader.cpp )
-target_link_libraries(Common_test_persistence_intervals_reader ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY})
# Do not forget to copy test files in current binary dir
file(COPY "${CMAKE_SOURCE_DIR}/data/points/alphacomplexdoc.off" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
@@ -19,6 +14,6 @@ file(COPY "${CMAKE_SOURCE_DIR}/src/common/test/persistence_intervals_with_dimens
file(COPY "${CMAKE_SOURCE_DIR}/src/common/test/persistence_intervals_with_field.pers" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
file(COPY "${CMAKE_SOURCE_DIR}/src/common/test/persistence_intervals_without_dimension.pers" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
-gudhi_add_coverage_test(Common_test_points_off_reader)
-gudhi_add_coverage_test(Common_test_distance_matrix_reader)
-gudhi_add_coverage_test(Common_test_persistence_intervals_reader)
+gudhi_add_boost_test(Common_test_points_off_reader)
+gudhi_add_boost_test(Common_test_distance_matrix_reader)
+gudhi_add_boost_test(Common_test_persistence_intervals_reader)
diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt
index 5508cbc7..b558d4c4 100644
--- a/src/python/CMakeLists.txt
+++ b/src/python/CMakeLists.txt
@@ -49,6 +49,9 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'alpha_complex', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'euclidean_witness_complex', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'euclidean_strong_witness_complex', ")
+ # Modules that should not be auto-imported in __init__.py
+ set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'representations', ")
+ set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'wasserstein', ")
add_gudhi_debug_info("Python version ${PYTHON_VERSION_STRING}")
add_gudhi_debug_info("Cython version ${CYTHON_VERSION}")
@@ -64,6 +67,12 @@ if(PYTHONINTERP_FOUND)
if(SCIPY_FOUND)
add_gudhi_debug_info("Scipy version ${SCIPY_VERSION}")
endif()
+ if(SKLEARN_FOUND)
+ add_gudhi_debug_info("Scikit-learn version ${SKLEARN_VERSION}")
+ endif()
+ if(OT_FOUND)
+ add_gudhi_debug_info("POT version ${OT_VERSION}")
+ endif()
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_RESULT_OF_USE_DECLTYPE', ")
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_ALL_NO_LIB', ")
@@ -105,9 +114,9 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'nerve_gic', ")
endif ()
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'alpha_complex', ")
set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'subsampling', ")
set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'tangential_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'alpha_complex', ")
set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'euclidean_witness_complex', ")
set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'euclidean_strong_witness_complex', ")
endif ()
@@ -153,10 +162,21 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DCGAL_USE_GMPXX', ")
add_GUDHI_PYTHON_lib("${GMPXX_LIBRARIES}")
set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${GMPXX_LIBRARIES_DIR}', ")
- message("** Add gmpxx ${GMPXX_LIBRARIES_DIR}")
+ message("** Add gmpxx ${GMPXX_LIBRARIES_DIR}")
endif(GMPXX_FOUND)
endif(GMP_FOUND)
- endif(CGAL_FOUND)
+ if(MPFR_FOUND)
+ add_gudhi_debug_info("MPFR_LIBRARIES = ${MPFR_LIBRARIES}")
+ set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DCGAL_USE_MPFR', ")
+ add_GUDHI_PYTHON_lib("${MPFR_LIBRARIES}")
+ # In case CGAL is not header only, all MPFR variables are set except MPFR_LIBRARIES_DIR - Just set it
+ if(NOT MPFR_LIBRARIES_DIR)
+ get_filename_component(MPFR_LIBRARIES_DIR ${MPFR_LIBRARIES} PATH)
+ endif(NOT MPFR_LIBRARIES_DIR)
+ set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${MPFR_LIBRARIES_DIR}', ")
+ message("** Add mpfr ${MPFR_LIBRARIES}")
+ endif(MPFR_FOUND)
+endif(CGAL_FOUND)
# Specific for Mac
if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
@@ -199,6 +219,8 @@ if(PYTHONINTERP_FOUND)
# Other .py files
file(COPY "gudhi/persistence_graphical_tools.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
+ file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/")
+ file(COPY "gudhi/wasserstein.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
add_custom_command(
OUTPUT gudhi.so
@@ -218,7 +240,6 @@ if(PYTHONINTERP_FOUND)
COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_rips_persistence_bottleneck_distance.py"
-f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -t 0.15 -d 3)
-
if(MATPLOTLIB_FOUND AND NUMPY_FOUND)
# Tangential
add_test(NAME tangential_complex_plain_homology_from_off_file_example_py_test
@@ -294,7 +315,6 @@ if(PYTHONINTERP_FOUND)
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_complex_from_points_example.py")
-
if(MATPLOTLIB_FOUND AND NUMPY_FOUND)
add_test(NAME alpha_complex_diagram_persistence_from_off_file_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
@@ -302,9 +322,7 @@ if(PYTHONINTERP_FOUND)
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_complex_diagram_persistence_from_off_file_example.py"
--no-diagram -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -a 0.6)
endif()
-
add_gudhi_py_test(test_alpha_complex)
-
endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
@@ -371,37 +389,57 @@ if(PYTHONINTERP_FOUND)
# Reader utils
add_gudhi_py_test(test_reader_utils)
+ # Wasserstein
+ if(OT_FOUND)
+ add_gudhi_py_test(test_wasserstein_distance)
+ endif(OT_FOUND)
+
+ # Representations
+ if(SKLEARN_FOUND AND MATPLOTLIB_FOUND)
+ add_gudhi_py_test(test_representations)
+ endif()
+
# Documentation generation is available through sphinx - requires all modules
if(SPHINX_PATH)
if(MATPLOTLIB_FOUND)
if(NUMPY_FOUND)
if(SCIPY_FOUND)
- if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/")
- # User warning - Sphinx is a static pages generator, and configured to work fine with user_version
- # Images and biblio warnings because not found on developper version
- if (GUDHI_PYTHON_PATH STREQUAL "src/python")
- set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developper version. Images and biblio will miss")
- endif()
- # sphinx target requires gudhi.so, because conf.py reads gudhi version from it
- add_custom_target(sphinx
- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/doc
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${SPHINX_PATH} -b html ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/sphinx
- DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/gudhi.so"
- COMMENT "${GUDHI_SPHINX_MESSAGE}" VERBATIM)
-
- add_test(NAME sphinx_py_test
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
- COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
- ${SPHINX_PATH} -b doctest ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/doctest)
-
- # Set missing or not modules
- set(GUDHI_MODULES ${GUDHI_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MODULES")
- else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 4.11.0")
+ if(SKLEARN_FOUND)
+ if(OT_FOUND)
+ if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/")
+ # User warning - Sphinx is a static pages generator, and configured to work fine with user_version
+ # Images and biblio warnings because not found on developper version
+ if (GUDHI_PYTHON_PATH STREQUAL "src/python")
+ set (GUDHI_SPHINX_MESSAGE "${GUDHI_SPHINX_MESSAGE} \n WARNING : Sphinx is configured for user version, you run it on developper version. Images and biblio will miss")
+ endif()
+ # sphinx target requires gudhi.so, because conf.py reads gudhi version from it
+ add_custom_target(sphinx
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/doc
+ COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ ${SPHINX_PATH} -b html ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/sphinx
+ DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/gudhi.so"
+ COMMENT "${GUDHI_SPHINX_MESSAGE}" VERBATIM)
+
+ add_test(NAME sphinx_py_test
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ COMMAND ${CMAKE_COMMAND} -E env "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}"
+ ${SPHINX_PATH} -b doctest ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/doctest)
+
+ # Set missing or not modules
+ set(GUDHI_MODULES ${GUDHI_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MODULES")
+ else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 4.11.0")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ else(OT_FOUND)
+ message("++ Python documentation module will not be compiled because POT was not found")
+ set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
+ endif(OT_FOUND)
+ else(SKLEARN_FOUND)
+ message("++ Python documentation module will not be compiled because scikit-learn was not found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ endif(SKLEARN_FOUND)
else(SCIPY_FOUND)
message("++ Python documentation module will not be compiled because scipy was not found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
diff --git a/src/python/doc/_templates/layout.html b/src/python/doc/_templates/layout.html
index fe64fb3d..2f2d9c72 100644
--- a/src/python/doc/_templates/layout.html
+++ b/src/python/doc/_templates/layout.html
@@ -56,12 +56,16 @@
</a></p>
{%- endif %}
{%- endblock %}
- <h2><a href="index.html">GUDHI</a></h2>
- <h2><a href="fileformats.html">File formats</a></h2>
- <h2><a href="installation.html">GUDHI installation</a></h2>
- <h2><a href="citation.html">Acknowledging the GUDHI library</a></h2>
- <h2><a href="genindex.html">Index</a></h2>
- <h2><a href="examples.html">Examples</a></h2>
+ <b>
+ <ul style="list-style-type:circle;">
+ <li><a href="index.html">Modules</a></li>
+ <li><a href="installation.html">Installation</a></li>
+ <li><a href="examples.html">Examples</a></li>
+ <li><a href="fileformats.html">File formats</a></li>
+ <li><a href="citation.html">Acknowledging</a></li>
+ <li><a href="genindex.html">Index</a></li>
+ </ul>
+ </b>
{%- if sidebars != None %}
{#- new style sidebar: explicitly include/exclude templates #}
{%- for sidebartemplate in sidebars %}
diff --git a/src/python/doc/alpha_complex_sum.inc b/src/python/doc/alpha_complex_sum.inc
index c5ba9dc7..a1184663 100644
--- a/src/python/doc/alpha_complex_sum.inc
+++ b/src/python/doc/alpha_complex_sum.inc
@@ -9,9 +9,9 @@
| | circumradius of the simplex if the circumsphere is empty (the simplex | :Copyright: MIT (`GPL v3 </licensing/>`_) |
| | is then said to be Gabriel), and as the minimum of the filtration | |
| | values of the codimension 1 cofaces that make it not Gabriel | :Requires: `Eigen <installation.html#eigen>`__ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`__ :math:`\geq` 4.11.0 |
- | | otherwise. All simplices that have a filtration value strictly | |
- | | greater than a given alpha squared value are not inserted into the | |
- | | complex. | |
+ | | otherwise. All simplices that have a filtration value | |
+ | | :math:`> \alpha^2` are removed from the Delaunay complex | |
+ | | when creating the simplicial complex if it is specified. | |
| | | |
| | This package requires having CGAL version 4.7 or higher (4.8.1 is | |
| | advised for better performance). | |
diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst
index f9662a6d..60319e84 100644
--- a/src/python/doc/alpha_complex_user.rst
+++ b/src/python/doc/alpha_complex_user.rst
@@ -9,19 +9,20 @@ Definition
.. include:: alpha_complex_sum.inc
-Alpha_complex is constructing a :doc:`Simplex_tree <simplex_tree_ref>` using
+`AlphaComplex` is constructing a :doc:`SimplexTree <simplex_tree_ref>` using
`Delaunay Triangulation <http://doc.cgal.org/latest/Triangulation/index.html#Chapter_Triangulations>`_
-:cite:`cgal:hdj-t-15b` from `CGAL <http://www.cgal.org/>`_ (the Computational Geometry Algorithms Library
-:cite:`cgal:eb-15b`).
+:cite:`cgal:hdj-t-19b` from `CGAL <http://www.cgal.org/>`_ (the Computational Geometry Algorithms Library
+:cite:`cgal:eb-19b`).
Remarks
^^^^^^^
-When Alpha_complex is constructed with an infinite value of :math:`\alpha`, the complex is a Delaunay complex.
+When an :math:`\alpha`-complex is constructed with an infinite value of :math:`\alpha^2`,
+the complex is a Delaunay complex (with special filtration values).
Example from points
-------------------
-This example builds the Delaunay triangulation from the given points, and initializes the alpha complex with it:
+This example builds the alpha-complex from the given points:
.. testcode::
@@ -137,18 +138,20 @@ sets the filtration value (0 in case of a vertex - propagation will have no effe
Non decreasing filtration values
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-As the squared radii computed by CGAL are an approximation, it might happen that these alpha squared values do not
-quite define a proper filtration (i.e. non-decreasing with respect to inclusion).
-We fix that up by calling `Simplex_tree::make_filtration_non_decreasing()` (cf.
+As the squared radii computed by CGAL are an approximation, it might happen that these
+:math:`\alpha^2` values do not quite define a proper filtration (i.e. non-decreasing with
+respect to inclusion).
+We fix that up by calling :func:`~gudhi.SimplexTree.make_filtration_non_decreasing` (cf.
`C++ version <http://gudhi.gforge.inria.fr/doc/latest/index.html>`_).
Prune above given filtration value
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The simplex tree is pruned from the given maximum alpha squared value (cf. `Simplex_tree::prune_above_filtration()`
-in the `C++ version <http://gudhi.gforge.inria.fr/doc/latest/index.html>`_). Note that this does not provide any kind
-of speed-up, since we always first build the full filtered complex, so it is recommended not to use `max_alpha_square`.
-In the following example, a threshold of 59 is used.
+The simplex tree is pruned from the given maximum :math:`\alpha^2` value (cf.
+:func:`~gudhi.SimplexTree.prune_above_filtration`). Note that this does not provide any kind
+of speed-up, since we always first build the full filtered complex, so it is recommended not to use
+:paramref:`~gudhi.AlphaComplex.create_simplex_tree.max_alpha_square`.
+In the following example, a threshold of :math:`\alpha^2 = 32.0` is used.
Example from OFF file
@@ -165,7 +168,7 @@ Then, it is asked to display information about the alpha complex:
import gudhi
alpha_complex = gudhi.AlphaComplex(off_file=gudhi.__root_source_dir__ + \
'/data/points/alphacomplexdoc.off')
- simplex_tree = alpha_complex.create_simplex_tree(max_alpha_square=59.0)
+ simplex_tree = alpha_complex.create_simplex_tree(max_alpha_square=32.0)
result_str = 'Alpha complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \
repr(simplex_tree.num_simplices()) + ' simplices - ' + \
repr(simplex_tree.num_vertices()) + ' vertices.'
@@ -178,7 +181,7 @@ the program output is:
.. testoutput::
- Alpha complex is of dimension 2 - 23 simplices - 7 vertices.
+ Alpha complex is of dimension 2 - 20 simplices - 7 vertices.
[0] -> 0.00
[1] -> 0.00
[2] -> 0.00
@@ -199,9 +202,6 @@ the program output is:
[4, 6] -> 22.74
[4, 5, 6] -> 22.74
[3, 6] -> 30.25
- [2, 6] -> 36.50
- [2, 3, 6] -> 36.50
- [2, 4, 6] -> 37.24
CGAL citations
==============
diff --git a/src/python/doc/conf.py b/src/python/doc/conf.py
index e4c718c3..3cc5d1d6 100755
--- a/src/python/doc/conf.py
+++ b/src/python/doc/conf.py
@@ -39,7 +39,9 @@ extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
+ 'sphinx.ext.napoleon',
'sphinxcontrib.bibtex',
+ 'sphinx_paramlinks',
]
todo_include_todos = True
diff --git a/src/python/doc/cubical_complex_user.rst b/src/python/doc/cubical_complex_user.rst
index b13b500e..56cf0170 100644
--- a/src/python/doc/cubical_complex_user.rst
+++ b/src/python/doc/cubical_complex_user.rst
@@ -142,8 +142,7 @@ Or it can be defined as follows:
.. testcode::
from gudhi import PeriodicCubicalComplex as pcc
- periodic_cc = pcc(dimensions=[3,3],
- top_dimensional_cells= [0, 0, 0, 0, 1, 0, 0, 0, 0],
+ periodic_cc = pcc(top_dimensional_cells = [[0, 0, 0], [0, 1, 0], [0, 0, 0]],
periodic_dimensions=[True, False])
result_str = 'Periodic cubical complex is of dimension ' + repr(periodic_cc.dimension()) + ' - ' + \
repr(periodic_cc.num_simplices()) + ' simplices.'
diff --git a/src/python/doc/examples.rst b/src/python/doc/examples.rst
index edbc2f72..a42227e3 100644
--- a/src/python/doc/examples.rst
+++ b/src/python/doc/examples.rst
@@ -16,6 +16,9 @@ Examples
* :download:`periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py <../example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py>`
* :download:`bottleneck_basic_example.py <../example/bottleneck_basic_example.py>`
* :download:`gudhi_graphical_tools_example.py <../example/gudhi_graphical_tools_example.py>`
+ * :download:`plot_simplex_tree_dim012.py <../example/plot_simplex_tree_dim012.py>`
+ * :download:`plot_rips_complex.py <../example/plot_rips_complex.py>`
+ * :download:`plot_alpha_complex.py <../example/plot_alpha_complex.py>`
* :download:`witness_complex_from_nearest_landmark_table.py <../example/witness_complex_from_nearest_landmark_table.py>`
* :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>`
* :download:`euclidean_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py>`
diff --git a/src/python/doc/img/sklearn-tda.png b/src/python/doc/img/sklearn-tda.png
new file mode 100644
index 00000000..f0ff07f4
--- /dev/null
+++ b/src/python/doc/img/sklearn-tda.png
Binary files differ
diff --git a/src/python/doc/index.rst b/src/python/doc/index.rst
index e379bc23..c36a578f 100644
--- a/src/python/doc/index.rst
+++ b/src/python/doc/index.rst
@@ -1,5 +1,5 @@
-GUDHI Python module documentation
-#################################
+GUDHI Python modules documentation
+##################################
.. figure::
../../doc/common/Gudhi_banner.png
@@ -23,7 +23,7 @@ Alpha complex
.. include:: alpha_complex_sum.inc
Rips complex
--------------
+------------
.. include:: rips_complex_sum.inc
@@ -73,6 +73,16 @@ Bottleneck distance
.. include:: bottleneck_distance_sum.inc
+Wasserstein distance
+====================
+
+.. include:: wasserstein_distance_sum.inc
+
+Persistence representations
+===========================
+
+.. include:: representations_sum.inc
+
Persistence graphical tools
===========================
diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst
index 77d9e8b3..40f3f44b 100644
--- a/src/python/doc/installation.rst
+++ b/src/python/doc/installation.rst
@@ -8,7 +8,7 @@ Installation
Conda
*****
The easiest way to install the Python version of GUDHI is using
-`conda <https://gudhi.inria.fr/licensing/>`_.
+`conda <https://gudhi.inria.fr/conda/>`_.
Compiling
*********
@@ -40,6 +40,20 @@ To build the GUDHI Python module, run the following commands in a terminal:
cd python
make
+.. note::
+
+ :code:`make python` (or :code:`make` in python directory) is only a
+ `CMake custom targets <https://cmake.org/cmake/help/latest/command/add_custom_target.html>`_
+ to shortcut :code:`python setup.py build_ext --inplace` command.
+ No specific other options (:code:`-j8` for parallel, or even :code:`make clean`, ...) are
+ available.
+ But one can use :code:`python setup.py ...` specific options in the python directory:
+
+.. code-block:: bash
+
+ python setup.py clean --all # Clean former compilation
+ python setup.py build_ext -j 8 --inplace # Build in parallel
+
GUDHI Python module installation
================================
@@ -59,19 +73,40 @@ Or install it definitely in your Python packages folder:
# May require sudo or administrator privileges
make install
+.. note::
+
+ :code:`make install` is only a
+ `CMake custom targets <https://cmake.org/cmake/help/latest/command/add_custom_target.html>`_
+ to shortcut :code:`python setup.py install` command.
+ It does not take into account :code:`CMAKE_INSTALL_PREFIX`.
+ But one can use :code:`python setup.py install ...` specific options in the python directory:
+
+.. code-block:: bash
+
+ python setup.py install --prefix /home/gudhi # Install in /home/gudhi directory
Test suites
===========
-To test your build, `py.test <http://doc.pytest.org>`_ is optional. Run the
-following command in a terminal:
+To test your build, `py.test <http://doc.pytest.org>`_ is required. Run the
+following `Ctest <https://cmake.org/cmake/help/latest/manual/ctest.1.html>`_
+(CMake test driver program) command in a terminal:
.. code-block:: bash
cd /path-to-gudhi/build/python
# For windows, you have to set PYTHONPATH environment variable
export PYTHONPATH='$PYTHONPATH:/path-to-gudhi/build/python'
- make test
+ ctest
+
+.. note::
+
+ One can use :code:`ctest` specific options in the python directory:
+
+.. code-block:: bash
+
+ # Launch tests in parallel on 8 cores and set failing tests in verbose mode
+ ctest -j 8 --output-on-failure
Debugging issues
================
@@ -215,12 +250,27 @@ The following examples require the `Matplotlib <http://matplotlib.org>`_:
* :download:`euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py>`
* :download:`euclidean_witness_complex_diagram_persistence_from_off_file_example.py <../example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py>`
+Python Optimal Transport
+========================
+
+The :doc:`Wasserstein distance </wasserstein_distance_user>`
+module requires `POT <https://pot.readthedocs.io/>`_, a library that provides
+several solvers for optimization problems related to Optimal Transport.
+
+Scikit-learn
+============
+
+The :doc:`persistence representations </representations>` module require
+`scikit-learn <https://scikit-learn.org/>`_, a Python-based ecosystem of
+open-source software for machine learning.
+
SciPy
=====
-The :doc:`persistence graphical tools </persistence_graphical_tools_user>`
-module requires `SciPy <http://scipy.org>`_, a Python-based ecosystem of
-open-source software for mathematics, science, and engineering.
+The :doc:`persistence graphical tools </persistence_graphical_tools_user>` and
+:doc:`Wasserstein distance </wasserstein_distance_user>` modules require `SciPy
+<http://scipy.org>`_, a Python-based ecosystem of open-source software for
+mathematics, science, and engineering.
Threading Building Blocks
=========================
diff --git a/src/python/doc/persistence_graphical_tools_user.rst b/src/python/doc/persistence_graphical_tools_user.rst
index b2124fdd..80002db6 100644
--- a/src/python/doc/persistence_graphical_tools_user.rst
+++ b/src/python/doc/persistence_graphical_tools_user.rst
@@ -20,16 +20,17 @@ This function can display the persistence result as a barcode:
.. plot::
:include-source:
+ import matplotlib.pyplot as plot
import gudhi
off_file = gudhi.__root_source_dir__ + '/data/points/tore3D_300.off'
- point_cloud = gudhi.read_off(off_file=off_file)
+ point_cloud = gudhi.read_points_from_off_file(off_file=off_file)
rips_complex = gudhi.RipsComplex(points=point_cloud, max_edge_length=0.7)
simplex_tree = rips_complex.create_simplex_tree(max_dimension=3)
diag = simplex_tree.persistence(min_persistence=0.4)
- plot = gudhi.plot_persistence_barcode(diag)
+ gudhi.plot_persistence_barcode(diag)
plot.show()
Show persistence as a diagram
@@ -43,14 +44,15 @@ This function can display the persistence result as a diagram:
.. plot::
:include-source:
+ import matplotlib.pyplot as plot
import gudhi
# rips_on_tore3D_1307.pers obtained from write_persistence_diagram method
persistence_file=gudhi.__root_source_dir__ + \
'/data/persistence_diagram/rips_on_tore3D_1307.pers'
- plt = gudhi.plot_persistence_diagram(persistence_file=persistence_file,
+ gudhi.plot_persistence_diagram(persistence_file=persistence_file,
legend=True)
- plt.show()
+ plot.show()
Persistence density
-------------------
@@ -63,11 +65,19 @@ If you want more information on a specific dimension, for instance:
.. plot::
:include-source:
+ import matplotlib.pyplot as plot
import gudhi
-
# rips_on_tore3D_1307.pers obtained from write_persistence_diagram method
persistence_file=gudhi.__root_source_dir__ + \
'/data/persistence_diagram/rips_on_tore3D_1307.pers'
- plt = gudhi.plot_persistence_density(persistence_file=persistence_file,
- max_intervals=0, dimension=1, legend=True)
- plt.show()
+ birth_death = gudhi.read_persistence_intervals_in_dimension(
+ persistence_file=persistence_file,
+ only_this_dim=1)
+ pers_diag = [(1, elt) for elt in birth_death]
+ # Use subplots to display diagram and density side by side
+ fig, axes = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
+ gudhi.plot_persistence_diagram(persistence=pers_diag,
+ axes=axes[0])
+ gudhi.plot_persistence_density(persistence=pers_diag,
+ dimension=1, legend=True, axes=axes[1])
+ plot.show()
diff --git a/src/python/doc/reader_utils_ref.rst b/src/python/doc/reader_utils_ref.rst
index f3ecebad..b8977a5a 100644
--- a/src/python/doc/reader_utils_ref.rst
+++ b/src/python/doc/reader_utils_ref.rst
@@ -6,7 +6,7 @@
Reader utils reference manual
=============================
-.. autofunction:: gudhi.read_off
+.. autofunction:: gudhi.read_points_from_off_file
.. autofunction:: gudhi.read_lower_triangular_matrix_from_csv_file
diff --git a/src/python/doc/representations.rst b/src/python/doc/representations.rst
new file mode 100644
index 00000000..11dcbcf9
--- /dev/null
+++ b/src/python/doc/representations.rst
@@ -0,0 +1,72 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+======================
+Representations manual
+======================
+
+.. include:: representations_sum.inc
+
+This module, originally available at https://github.com/MathieuCarriere/sklearn-tda and named sklearn_tda, aims at bridging the gap between persistence diagrams and machine learning, by providing implementations of most of the vector representations for persistence diagrams in the literature, in a scikit-learn format. More specifically, it provides tools, using the scikit-learn standard interface, to compute distances and kernels on persistence diagrams, and to convert these diagrams into vectors in Euclidean space.
+
+A diagram is represented as a numpy array of shape (n,2), as can be obtained from :func:`~gudhi.SimplexTree.persistence_intervals_in_dimension` for instance. Points at infinity are represented as a numpy array of shape (n,1), storing only the birth time.
+
+A small example is provided
+
+.. only:: builder_html
+
+ * :download:`diagram_vectorizations_distances_kernels.py <../example/diagram_vectorizations_distances_kernels.py>`
+
+
+Preprocessing
+-------------
+.. automodule:: gudhi.representations.preprocessing
+ :members:
+ :special-members:
+ :show-inheritance:
+
+Vector methods
+--------------
+.. automodule:: gudhi.representations.vector_methods
+ :members:
+ :special-members:
+ :show-inheritance:
+
+Kernel methods
+--------------
+.. automodule:: gudhi.representations.kernel_methods
+ :members:
+ :special-members:
+ :show-inheritance:
+
+Metrics
+-------
+.. automodule:: gudhi.representations.metrics
+ :members:
+ :special-members:
+ :show-inheritance:
+
+Basic example
+-------------
+
+This example computes the first two Landscapes associated to a persistence diagram with four points. The landscapes are evaluated on ten samples, leading to two vectors with ten coordinates each, that are eventually concatenated in order to produce a single vector representation.
+
+.. testcode::
+
+ import numpy as np
+ from gudhi.representations import Landscape
+ # A single diagram with 4 points
+ D = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])
+ diags = [D]
+ l=Landscape(num_landscapes=2,resolution=10).fit_transform(diags)
+ print(l)
+
+The output is:
+
+.. testoutput::
+
+ [[1.02851895 2.05703791 2.57129739 1.54277843 0.89995409 1.92847304
+ 2.95699199 3.08555686 2.05703791 1.02851895 0. 0.64282435
+ 0. 0. 0.51425948 0. 0. 0.
+ 0.77138922 1.02851895]]
diff --git a/src/python/doc/representations_sum.inc b/src/python/doc/representations_sum.inc
new file mode 100644
index 00000000..700828f1
--- /dev/null
+++ b/src/python/doc/representations_sum.inc
@@ -0,0 +1,14 @@
+.. table::
+ :widths: 30 50 20
+
+ +------------------------------------------------------------------+----------------------------------------------------------------+-----------------------------------------------+
+ | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière |
+ | img/sklearn-tda.png | diagrams, compatible with scikit-learn. | |
+ | | | :Introduced in: GUDHI 3.1.0 |
+ | | | |
+ | | | :Copyright: MIT |
+ | | | |
+ | | | :Requires: scikit-learn |
+ +------------------------------------------------------------------+----------------------------------------------------------------+-----------------------------------------------+
+ | * :doc:`representations` |
+ +------------------------------------------------------------------+----------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/rips_complex_user.rst b/src/python/doc/rips_complex_user.rst
index 3f6b960d..a27573e8 100644
--- a/src/python/doc/rips_complex_user.rst
+++ b/src/python/doc/rips_complex_user.rst
@@ -40,12 +40,12 @@ A vertex name corresponds to the index of the point in the given range (aka. the
On this example, as edges (4,5), (4,6) and (5,6) are in the complex, simplex (4,5,6) is added with the filtration value
set with :math:`max(filtration(4,5), filtration(4,6), filtration(5,6))`. And so on for simplex (0,1,2,3).
-If the `RipsComplex` interfaces are not detailed enough for your need, please refer to rips_persistence_step_by_step.cpp
-C++ example, where the graph construction over the Simplex_tree is more detailed.
+If the :doc:`RipsComplex <rips_complex_ref>` interfaces are not detailed enough for your need, please refer to
+rips_persistence_step_by_step.cpp C++ example, where the graph construction over the Simplex_tree is more detailed.
A Rips complex can easily become huge, even if we limit the length of the edges
and the dimension of the simplices. One easy trick, before building a Rips
-complex on a point cloud, is to call `sparsify_point_set` which removes points
+complex on a point cloud, is to call :func:`~gudhi.sparsify_point_set` which removes points
that are too close to each other. This does not change its persistence diagram
by more than the length used to define "too close".
@@ -57,7 +57,7 @@ a :math:`\frac{1}{1-\varepsilon}`-interleaving, although in practice the
error is usually smaller. A more intuitive presentation of the idea is
available in :cite:`cavanna15geometric`, and in a video
:cite:`cavanna15visualizing`. Passing an extra argument `sparse=0.3` at the
-construction of a `RipsComplex` object asks it to build a sparse Rips with
+construction of a :class:`~gudhi.RipsComplex` object asks it to build a sparse Rips with
parameter :math:`\varepsilon=0.3`, while the default `sparse=None` builds the
regular Rips complex.
@@ -69,7 +69,7 @@ Example from a point cloud
^^^^^^^^^^^^^^^^^^^^^^^^^^
This example builds the neighborhood graph from the given points, up to max_edge_length.
-Then it creates a :doc:`Simplex_tree <simplex_tree_ref>` with it.
+Then it creates a :doc:`SimplexTree <simplex_tree_ref>` with it.
Finally, it is asked to display information about the simplicial complex.
@@ -128,7 +128,7 @@ Example from OFF file
This example builds the :doc:`RipsComplex <rips_complex_ref>` from the given
points in an OFF file, and max_edge_length value.
-Then it creates a :doc:`Simplex_tree <simplex_tree_ref>` with it.
+Then it creates a :doc:`SimplexTree <simplex_tree_ref>` with it.
Finally, it is asked to display information about the Rips complex.
@@ -136,7 +136,8 @@ Finally, it is asked to display information about the Rips complex.
.. testcode::
import gudhi
- point_cloud = gudhi.read_off(off_file=gudhi.__root_source_dir__ + '/data/points/alphacomplexdoc.off')
+ off_file = gudhi.__root_source_dir__ + '/data/points/alphacomplexdoc.off'
+ point_cloud = gudhi.read_points_from_off_file(off_file = off_file)
rips_complex = gudhi.RipsComplex(points=point_cloud, max_edge_length=12.0)
simplex_tree = rips_complex.create_simplex_tree(max_dimension=1)
result_str = 'Rips complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \
@@ -178,7 +179,7 @@ Example from a distance matrix
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This example builds the one skeleton graph from the given distance matrix, and max_edge_length value.
-Then it creates a :doc:`Simplex_tree <simplex_tree_ref>` with it.
+Then it creates a :doc:`SimplexTree <simplex_tree_ref>` with it.
Finally, it is asked to display information about the simplicial complex.
@@ -233,7 +234,7 @@ Example from csv file
This example builds the :doc:`RipsComplex <rips_complex_ref>` from the given
distance matrix in a csv file, and max_edge_length value.
-Then it creates a :doc:`Simplex_tree <simplex_tree_ref>` with it.
+Then it creates a :doc:`SimplexTree <simplex_tree_ref>` with it.
Finally, it is asked to display information about the Rips complex.
@@ -286,7 +287,7 @@ Example from a correlation matrix
Analogously to the case of distance matrix, Rips complexes can be also constructed based on correlation matrix.
Given a correlation matrix M, comportment-wise 1-M is a distance matrix.
This example builds the one skeleton graph from the given corelation matrix and threshold value.
-Then it creates a :doc:`Simplex_tree <simplex_tree_ref>` with it.
+Then it creates a :doc:`SimplexTree <simplex_tree_ref>` with it.
Finally, it is asked to display information about the simplicial complex.
@@ -307,7 +308,7 @@ Finally, it is asked to display information about the simplicial complex.
[0.01, 0.01, 0.72, 1., 0.7],
[0.89, 0.61, 0.03, 0.7, 1.]], float)
- distance_matrix = np.ones((correlation_matrix.shape),float) - correlation_matrix
+ distance_matrix = 1 - correlation_matrix
rips_complex = gudhi.RipsComplex(distance_matrix=distance_matrix, max_edge_length=1.0)
simplex_tree = rips_complex.create_simplex_tree(max_dimension=1)
@@ -342,6 +343,7 @@ until dimension 1 - one skeleton graph in other words), the output is:
[1, 3] -> 0.99
.. note::
- As persistence diagrams points will be under the diagonal,
+ If you compute the persistence diagram and convert distances back to correlation values,
+ points in the persistence diagram will be under the diagonal, and
bottleneck distance and persistence graphical tool will not work properly,
this is a known issue.
diff --git a/src/python/doc/simplex_tree_user.rst b/src/python/doc/simplex_tree_user.rst
index aebeb29f..3df7617f 100644
--- a/src/python/doc/simplex_tree_user.rst
+++ b/src/python/doc/simplex_tree_user.rst
@@ -23,13 +23,9 @@ scheme.
Implementation
--------------
-There are two implementation of complexes. The first on is the Simplex_tree data structure.
-The simplex tree is an efficient and flexible data structure for representing general (filtered) simplicial complexes.
-The data structure is described in :cite`boissonnatmariasimplextreealgorithmica`.
-
-The second one is the Hasse_complex. The Hasse complex is a data structure representing explicitly all co-dimension 1
-incidence relations in a complex. It is consequently faster when accessing the boundary of a simplex, but is less
-compact and harder to construct from scratch.
+The :class:`simplex tree<gudhi.SimplexTree>` is an efficient and flexible data structure for representing general
+(filtered) simplicial complexes.
+The data structure is described in :cite:`boissonnatmariasimplextreealgorithmica`.
Example
-------
diff --git a/src/python/doc/tangential_complex_user.rst b/src/python/doc/tangential_complex_user.rst
index ebfe1e29..852cf5b6 100644
--- a/src/python/doc/tangential_complex_user.rst
+++ b/src/python/doc/tangential_complex_user.rst
@@ -107,12 +107,12 @@ inconsistencies, but is not guaranteed to succeed.
Output
^^^^^^
-The result of the computation is exported as a Simplex_tree. It is the union of
+The result of the computation is exported as a :class:`~gudhi.SimplexTree`. It is the union of
the stars of all the input points. A vertex in the Simplex Tree is the index of
the point in the range provided by the user. The point corresponding to a
-vertex can also be obtained through the Tangential_complex::get_point function.
+vertex can also be obtained through the :func:`gudhi.TangentialComplex.get_point` function.
Note that even if the positions of the points are perturbed, their original
-positions are kept (e.g. Tangential_complex::get_point returns the original
+positions are kept (e.g. :func:`~gudhi.TangentialComplex.get_point` returns the original
position of the point).
The result can be obtained after the computation of the Tangential complex
diff --git a/src/python/doc/wasserstein_distance_sum.inc b/src/python/doc/wasserstein_distance_sum.inc
new file mode 100644
index 00000000..a97f428d
--- /dev/null
+++ b/src/python/doc/wasserstein_distance_sum.inc
@@ -0,0 +1,14 @@
+.. table::
+ :widths: 30 50 20
+
+ +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+
+ | .. figure:: | The q-Wasserstein distance measures the similarity between two | :Author: Theo Lacombe |
+ | ../../doc/Bottleneck_distance/perturb_pd.png | persistence diagrams. It's the minimum value c that can be achieved | |
+ | :figclass: align-center | by a perfect matching between the points of the two diagrams (+ all | :Introduced in: GUDHI 3.1.0 |
+ | | diagonal points), where the value of a matching is defined as the | |
+ | Wasserstein distance is the q-th root of the sum of the | q-th root of the sum of all edge lengths to the power q. Edge lengths| :Copyright: MIT |
+ | edge lengths to the power q. | are measured in norm p, for :math:`1 \leq p \leq \infty`. | |
+ | | | :Requires: Python Optimal Transport (POT) :math:`\geq` 0.5.1 |
+ +-----------------------------------------------------------------+----------------------------------------------------------------------+------------------------------------------------------------------+
+ | * :doc:`wasserstein_distance_user` | |
+ +-----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst
new file mode 100644
index 00000000..32999a0c
--- /dev/null
+++ b/src/python/doc/wasserstein_distance_user.rst
@@ -0,0 +1,40 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+Wasserstein distance user manual
+================================
+Definition
+----------
+
+.. include:: wasserstein_distance_sum.inc
+
+This implementation is based on ideas from "Large Scale Computation of Means and Cluster for Persistence Diagrams via Optimal Transport".
+
+Function
+--------
+.. autofunction:: gudhi.wasserstein.wasserstein_distance
+
+
+Basic example
+-------------
+
+This example computes the 1-Wasserstein distance from 2 persistence diagrams with euclidean ground metric.
+Note that persistence diagrams must be submitted as (n x 2) numpy arrays and must not contain inf values.
+
+.. testcode::
+
+ import gudhi.wasserstein
+ import numpy as np
+
+ diag1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974]])
+ diag2 = np.array([[2.8, 4.45],[9.5, 14.1]])
+
+ message = "Wasserstein distance value = " + '%.2f' % gudhi.wasserstein.wasserstein_distance(diag1, diag2, order=1., internal_p=2.)
+ print(message)
+
+The output is:
+
+.. testoutput::
+
+ Wasserstein distance value = 1.45
diff --git a/src/python/doc/witness_complex_user.rst b/src/python/doc/witness_complex_user.rst
index 40e94134..7087fa98 100644
--- a/src/python/doc/witness_complex_user.rst
+++ b/src/python/doc/witness_complex_user.rst
@@ -47,7 +47,7 @@ which leads to definitions of **weak relaxed witness complex** (or just relaxed
In particular case of 0-relaxation, weak complex corresponds to **witness complex** introduced in
:cite:`de2004topological`, whereas 0-relaxed strong witness complex consists of just vertices and is not very
interesting. Hence for small relaxation weak version is preferable.
-However, to capture the homotopy type (for example using Gudhi::persistent_cohomology::Persistent_cohomology) it is
+However, to capture the homotopy type (for example using :func:`gudhi.SimplexTree.persistence`) it is
often necessary to work with higher filtration values. In this case strong relaxed witness complex is faster to compute
and offers similar results.
@@ -69,7 +69,7 @@ The construction of the Euclidean versions of complexes follow the same scheme:
In the non-Euclidean classes, the lists of nearest landmarks are supposed to be given as input.
-The constructors take on the steps 1 and 2, while the function 'create_complex' executes the step 3.
+The constructors take on the steps 1 and 2, while the function :func:`!create_complex` executes the step 3.
Constructing weak relaxed witness complex from an off file
----------------------------------------------------------
@@ -101,7 +101,7 @@ Let's start with a simple example, which reads an off point file and computes a
print("#####################################################################")
print("EuclideanWitnessComplex creation from points read in a OFF file")
- witnesses = gudhi.read_off(off_file=args.file)
+ witnesses = gudhi.read_points_from_off_file(off_file=args.file)
landmarks = gudhi.pick_n_random_points(points=witnesses, nb_points=args.number_of_landmarks)
message = "EuclideanWitnessComplex with max_edge_length=" + repr(args.max_alpha_square) + \
diff --git a/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py b/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py
index b8f283b3..4079a469 100755
--- a/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py
+++ b/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py
@@ -1,7 +1,8 @@
#!/usr/bin/env python
-import gudhi
import argparse
+import matplotlib.pyplot as plot
+import gudhi
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
@@ -60,8 +61,8 @@ with open(args.file, "r") as f:
print(simplex_tree.betti_numbers())
if args.no_diagram == False:
- pplot = gudhi.plot_persistence_diagram(diag, band=args.band)
- pplot.show()
+ gudhi.plot_persistence_diagram(diag, band=args.band)
+ plot.show()
else:
print(args.file, "is not a valid OFF file")
diff --git a/src/python/example/alpha_rips_persistence_bottleneck_distance.py b/src/python/example/alpha_rips_persistence_bottleneck_distance.py
index 086307ee..d5c33ec8 100755
--- a/src/python/example/alpha_rips_persistence_bottleneck_distance.py
+++ b/src/python/example/alpha_rips_persistence_bottleneck_distance.py
@@ -35,7 +35,7 @@ args = parser.parse_args()
with open(args.file, "r") as f:
first_line = f.readline()
if (first_line == "OFF\n") or (first_line == "nOFF\n"):
- point_cloud = gudhi.read_off(off_file=args.file)
+ point_cloud = gudhi.read_points_from_off_file(off_file=args.file)
print("#####################################################################")
print("RipsComplex creation from points read in a OFF file")
diff --git a/src/python/example/diagram_vectorizations_distances_kernels.py b/src/python/example/diagram_vectorizations_distances_kernels.py
new file mode 100755
index 00000000..119072eb
--- /dev/null
+++ b/src/python/example/diagram_vectorizations_distances_kernels.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+
+import matplotlib.pyplot as plt
+import numpy as np
+from sklearn.kernel_approximation import RBFSampler
+from sklearn.preprocessing import MinMaxScaler
+
+from gudhi.representations import DiagramSelector, Clamping, Landscape, Silhouette, BettiCurve, ComplexPolynomial,\
+ TopologicalVector, DiagramScaler, BirthPersistenceTransform,\
+ PersistenceImage, PersistenceWeightedGaussianKernel, Entropy, \
+ PersistenceScaleSpaceKernel, SlicedWassersteinDistance,\
+ SlicedWassersteinKernel, BottleneckDistance, PersistenceFisherKernel
+
+D = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.], [0., np.inf], [5., np.inf]])
+diags = [D]
+
+diags = DiagramSelector(use=True, point_type="finite").fit_transform(diags)
+diags = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diags)
+diags = DiagramScaler(use=True, scalers=[([1], Clamping(maximum=.9))]).fit_transform(diags)
+
+D = diags[0]
+plt.scatter(D[:,0],D[:,1])
+plt.plot([0.,1.],[0.,1.])
+plt.title("Test Persistence Diagram for vector methods")
+plt.show()
+
+LS = Landscape(resolution=1000)
+L = LS.fit_transform(diags)
+plt.plot(L[0][:1000])
+plt.plot(L[0][1000:2000])
+plt.plot(L[0][2000:3000])
+plt.title("Landscape")
+plt.show()
+
+def pow(n):
+ return lambda x: np.power(x[1]-x[0],n)
+
+SH = Silhouette(resolution=1000, weight=pow(2))
+sh = SH.fit_transform(diags)
+plt.plot(sh[0])
+plt.title("Silhouette")
+plt.show()
+
+BC = BettiCurve(resolution=1000)
+bc = BC.fit_transform(diags)
+plt.plot(bc[0])
+plt.title("Betti Curve")
+plt.show()
+
+CP = ComplexPolynomial(threshold=-1, polynomial_type="T")
+cp = CP.fit_transform(diags)
+print("Complex polynomial is " + str(cp[0,:]))
+
+TV = TopologicalVector(threshold=-1)
+tv = TV.fit_transform(diags)
+print("Topological vector is " + str(tv[0,:]))
+
+PI = PersistenceImage(bandwidth=.1, weight=lambda x: x[1], im_range=[0,1,0,1], resolution=[100,100])
+pi = PI.fit_transform(diags)
+plt.imshow(np.flip(np.reshape(pi[0], [100,100]), 0))
+plt.title("Persistence Image")
+plt.show()
+
+ET = Entropy(mode="scalar")
+et = ET.fit_transform(diags)
+print("Entropy statistic is " + str(et[0,:]))
+
+ET = Entropy(mode="vector", normalized=False)
+et = ET.fit_transform(diags)
+plt.plot(et[0])
+plt.title("Entropy function")
+plt.show()
+
+D = np.array([[1.,5.],[3.,6.],[2.,7.]])
+diags2 = [D]
+
+diags2 = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diags2)
+
+D = diags[0]
+plt.scatter(D[:,0],D[:,1])
+D = diags2[0]
+plt.scatter(D[:,0],D[:,1])
+plt.plot([0.,1.],[0.,1.])
+plt.title("Test Persistence Diagrams for kernel methods")
+plt.show()
+
+def arctan(C,p):
+ return lambda x: C*np.arctan(np.power(x[1], p))
+
+PWG = PersistenceWeightedGaussianKernel(bandwidth=1., kernel_approx=None, weight=arctan(1.,1.))
+X = PWG.fit(diags)
+Y = PWG.transform(diags2)
+print("PWG kernel is " + str(Y[0][0]))
+
+PWG = PersistenceWeightedGaussianKernel(kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])), weight=arctan(1.,1.))
+X = PWG.fit(diags)
+Y = PWG.transform(diags2)
+print("Approximate PWG kernel is " + str(Y[0][0]))
+
+PSS = PersistenceScaleSpaceKernel(bandwidth=1.)
+X = PSS.fit(diags)
+Y = PSS.transform(diags2)
+print("PSS kernel is " + str(Y[0][0]))
+
+PSS = PersistenceScaleSpaceKernel(kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])))
+X = PSS.fit(diags)
+Y = PSS.transform(diags2)
+print("Approximate PSS kernel is " + str(Y[0][0]))
+
+sW = SlicedWassersteinDistance(num_directions=100)
+X = sW.fit(diags)
+Y = sW.transform(diags2)
+print("SW distance is " + str(Y[0][0]))
+
+SW = SlicedWassersteinKernel(num_directions=100, bandwidth=1.)
+X = SW.fit(diags)
+Y = SW.transform(diags2)
+print("SW kernel is " + str(Y[0][0]))
+
+W = BottleneckDistance(epsilon=.001)
+X = W.fit(diags)
+Y = W.transform(diags2)
+print("Bottleneck distance is " + str(Y[0][0]))
+
+PF = PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1.)
+X = PF.fit(diags)
+Y = PF.transform(diags2)
+print("PF kernel is " + str(Y[0][0]))
+
+PF = PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1., kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])))
+X = PF.fit(diags)
+Y = PF.transform(diags2)
+print("Approximate PF kernel is " + str(Y[0][0]))
diff --git a/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py b/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py
index 610ba44f..4903667e 100755
--- a/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py
+++ b/src/python/example/euclidean_strong_witness_complex_diagram_persistence_from_off_file_example.py
@@ -1,7 +1,8 @@
#!/usr/bin/env python
-import gudhi
import argparse
+import matplotlib.pyplot as plot
+import gudhi
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
@@ -46,7 +47,7 @@ with open(args.file, "r") as f:
print("#####################################################################")
print("EuclideanStrongWitnessComplex creation from points read in a OFF file")
- witnesses = gudhi.read_off(off_file=args.file)
+ witnesses = gudhi.read_points_from_off_file(off_file=args.file)
landmarks = gudhi.pick_n_random_points(
points=witnesses, nb_points=args.number_of_landmarks
)
@@ -75,8 +76,8 @@ with open(args.file, "r") as f:
print(simplex_tree.betti_numbers())
if args.no_diagram == False:
- pplot = gudhi.plot_persistence_diagram(diag, band=args.band)
- pplot.show()
+ gudhi.plot_persistence_diagram(diag, band=args.band)
+ plot.show()
else:
print(args.file, "is not a valid OFF file")
diff --git a/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py b/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py
index 7587b732..339a8577 100755
--- a/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py
+++ b/src/python/example/euclidean_witness_complex_diagram_persistence_from_off_file_example.py
@@ -1,7 +1,8 @@
#!/usr/bin/env python
-import gudhi
import argparse
+import matplotlib.pyplot as plot
+import gudhi
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
@@ -45,7 +46,7 @@ with open(args.file, "r") as f:
print("#####################################################################")
print("EuclideanWitnessComplex creation from points read in a OFF file")
- witnesses = gudhi.read_off(off_file=args.file)
+ witnesses = gudhi.read_points_from_off_file(off_file=args.file)
landmarks = gudhi.pick_n_random_points(
points=witnesses, nb_points=args.number_of_landmarks
)
@@ -74,8 +75,8 @@ with open(args.file, "r") as f:
print(simplex_tree.betti_numbers())
if args.no_diagram == False:
- pplot = gudhi.plot_persistence_diagram(diag, band=args.band)
- pplot.show()
+ gudhi.plot_persistence_diagram(diag, band=args.band)
+ plot.show()
else:
print(args.file, "is not a valid OFF file")
diff --git a/src/python/example/gudhi_graphical_tools_example.py b/src/python/example/gudhi_graphical_tools_example.py
index 3b0ca54d..37ecbf53 100755
--- a/src/python/example/gudhi_graphical_tools_example.py
+++ b/src/python/example/gudhi_graphical_tools_example.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python
+import matplotlib.pyplot as plot
import gudhi
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
@@ -29,15 +30,24 @@ persistence = [
(0, (0.0, 1.0)),
]
gudhi.plot_persistence_barcode(persistence)
+plot.show()
print("#####################################################################")
print("Show diagram persistence example")
-pplot = gudhi.plot_persistence_diagram(persistence)
-pplot.show()
+gudhi.plot_persistence_diagram(persistence)
+plot.show()
print("#####################################################################")
print("Show diagram persistence example with a confidence band")
-pplot = gudhi.plot_persistence_diagram(persistence, band=0.2)
-pplot.show()
+gudhi.plot_persistence_diagram(persistence, band=0.2)
+plot.show()
+
+print("#####################################################################")
+print("Show barcode and diagram persistence side by side example")
+fig, axes = plot.subplots(nrows=1, ncols=2)
+gudhi.plot_persistence_barcode(persistence, axes = axes[0])
+gudhi.plot_persistence_diagram(persistence, axes = axes[1])
+fig.suptitle("barcode versus diagram")
+plot.show()
diff --git a/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py b/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py
index 9cb855cd..c692e66f 100755
--- a/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py
+++ b/src/python/example/periodic_cubical_complex_barcode_persistence_from_perseus_file_example.py
@@ -1,7 +1,8 @@
#!/usr/bin/env python
-import gudhi
import argparse
+import matplotlib.pyplot as plot
+import gudhi
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
@@ -70,5 +71,6 @@ if is_file_perseus(args.file):
print(periodic_cubical_complex.betti_numbers())
if args.no_barcode == False:
gudhi.plot_persistence_barcode(diag)
+ plot.show()
else:
print(args.file, "is not a valid perseus style file")
diff --git a/src/python/example/plot_alpha_complex.py b/src/python/example/plot_alpha_complex.py
new file mode 100755
index 00000000..99c18a7c
--- /dev/null
+++ b/src/python/example/plot_alpha_complex.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+import numpy as np
+import gudhi
+ac = gudhi.AlphaComplex(off_file='../../data/points/tore3D_1307.off')
+st = ac.create_simplex_tree()
+points = np.array([ac.get_point(i) for i in range(st.num_vertices())])
+# We want to plot the alpha-complex with alpha=0.1.
+# We are only going to plot the triangles
+triangles = np.array([s[0] for s in st.get_skeleton(2) if len(s[0])==3 and s[1] <= .1])
+
+# First possibility: plotly
+import plotly.graph_objects as go
+fig = go.Figure(data=[
+ go.Mesh3d(
+ x=points[:,0],
+ y=points[:,1],
+ z=points[:,2],
+ i = triangles[:,0],
+ j = triangles[:,1],
+ k = triangles[:,2],
+ )
+])
+fig.show()
+
+# Second possibility: matplotlib
+from mpl_toolkits.mplot3d import Axes3D
+import matplotlib.pyplot as plt
+fig = plt.figure()
+ax = fig.gca(projection='3d')
+ax.plot_trisurf(points[:,0], points[:,1], points[:,2], triangles=triangles)
+plt.show()
+
+# Third possibility: mayavi
+from mayavi import mlab
+mlab.triangular_mesh(points[:,0], points[:,1], points[:,2], triangles);
+mlab.show()
diff --git a/src/python/example/plot_rips_complex.py b/src/python/example/plot_rips_complex.py
new file mode 100755
index 00000000..214a3c0a
--- /dev/null
+++ b/src/python/example/plot_rips_complex.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+import numpy as np
+import gudhi
+points = np.array(gudhi.read_points_from_off_file('../../data/points/Kl.off'))
+rc = gudhi.RipsComplex(points=points, max_edge_length=.2)
+st = rc.create_simplex_tree(max_dimension=2)
+# We are only going to plot the triangles
+triangles = np.array([s[0] for s in st.get_skeleton(2) if len(s[0])==3])
+
+# First possibility: plotly
+import plotly.graph_objects as go
+fig = go.Figure(data=[
+ go.Mesh3d(
+ # Use the first 3 coordinates, but we could as easily pick others
+ x=points[:,0],
+ y=points[:,1],
+ z=points[:,2],
+ i = triangles[:,0],
+ j = triangles[:,1],
+ k = triangles[:,2],
+ )
+])
+fig.show()
+
+# Second possibility: matplotlib
+from mpl_toolkits.mplot3d import Axes3D
+import matplotlib.pyplot as plt
+fig = plt.figure()
+ax = fig.gca(projection='3d')
+ax.plot_trisurf(points[:,0], points[:,1], points[:,2], triangles=triangles)
+plt.show()
+
+# Third possibility: mayavi
+# (this may take a while)
+from mayavi import mlab
+mlab.triangular_mesh(points[:,0], points[:,1], points[:,2], triangles);
+mlab.show()
diff --git a/src/python/example/plot_simplex_tree_dim012.py b/src/python/example/plot_simplex_tree_dim012.py
new file mode 100755
index 00000000..5b962131
--- /dev/null
+++ b/src/python/example/plot_simplex_tree_dim012.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+import numpy as np
+import gudhi
+
+# Coordinates of the points
+points=np.array([[0,0,0],[1,0,0],[0,1,0],[0,0,1],[1,1,1],[1,1,0],[0,1,1]])
+# Build the simplicial complex with a tetrahedon, an edge and an isolated vertex
+cplx=gudhi.SimplexTree()
+cplx.insert([1,2,3,5])
+cplx.insert([4,6])
+cplx.insert([0])
+# List of triangles (point indices)
+triangles = np.array([s[0] for s in cplx.get_skeleton(2) if len(s[0])==3])
+# List of edges (point coordinates)
+edges = []
+for s in cplx.get_skeleton(1):
+ e = s[0]
+ if len(e) == 2:
+ edges.append(points[[e[0],e[1]]])
+
+## With plotly
+import plotly.graph_objects as go
+# Plot triangles
+f2 = go.Mesh3d(
+ x=points[:,0],
+ y=points[:,1],
+ z=points[:,2],
+ i = triangles[:,0],
+ j = triangles[:,1],
+ k = triangles[:,2],
+ )
+# Plot points
+f0 = go.Scatter3d(x=points[:,0], y=points[:,1], z=points[:,2], mode="markers")
+data = [f2, f0]
+# Plot edges
+for pts in edges:
+ seg = go.Scatter3d(x=pts[:,0],y=pts[:,1],z=pts[:,2],mode="lines",line=dict(color='green'))
+ data.append(seg)
+fig = go.Figure(data=data,layout=dict(showlegend=False))
+# By default plotly would give each edge its own color and legend, that's too much
+fig.show()
+
+## With matplotlib
+from mpl_toolkits.mplot3d import Axes3D
+from mpl_toolkits.mplot3d.art3d import Line3DCollection
+import matplotlib.pyplot as plt
+fig = plt.figure()
+ax = fig.gca(projection='3d')
+# Plot triangles
+ax.plot_trisurf(points[:,0], points[:,1], points[:,2], triangles=triangles)
+# Plot points
+ax.scatter3D(points[:,0], points[:,1], points[:,2])
+# Plot edges
+ax.add_collection3d(Line3DCollection(segments=edges))
+plt.show()
+
+## With mayavi
+from mayavi import mlab
+# Plot triangles
+mlab.triangular_mesh(points[:,0], points[:,1], points[:,2], triangles);
+# Plot points
+mlab.points3d(points[:,0], points[:,1], points[:,2])
+# Plot edges
+for pts in edges:
+ mlab.plot3d(pts[:,0],pts[:,1],pts[:,2],tube_radius=None)
+mlab.show()
diff --git a/src/python/example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py b/src/python/example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py
index 3571580b..1acb187c 100755
--- a/src/python/example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py
+++ b/src/python/example/rips_complex_diagram_persistence_from_correlation_matrix_file_example.py
@@ -1,8 +1,9 @@
#!/usr/bin/env python
-import gudhi
import sys
import argparse
+import matplotlib.pyplot as plot
+import gudhi
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
@@ -83,5 +84,5 @@ invert_diag = [
]
if args.no_diagram == False:
- pplot = gudhi.plot_persistence_diagram(invert_diag, band=args.band)
- pplot.show()
+ gudhi.plot_persistence_diagram(invert_diag, band=args.band)
+ plot.show()
diff --git a/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py b/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py
index 0b9a9ba9..79ccca96 100755
--- a/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py
+++ b/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py
@@ -1,7 +1,8 @@
#!/usr/bin/env python
-import gudhi
import argparse
+import matplotlib.pyplot as plot
+import gudhi
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
@@ -59,5 +60,5 @@ print("betti_numbers()=")
print(simplex_tree.betti_numbers())
if args.no_diagram == False:
- pplot = gudhi.plot_persistence_diagram(diag, band=args.band)
- pplot.show()
+ gudhi.plot_persistence_diagram(diag, band=args.band)
+ plot.show()
diff --git a/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py b/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py
index 2b335bba..c757aca7 100755
--- a/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py
+++ b/src/python/example/rips_complex_diagram_persistence_from_off_file_example.py
@@ -1,7 +1,8 @@
#!/usr/bin/env python
-import gudhi
import argparse
+import matplotlib.pyplot as plot
+import gudhi
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
@@ -47,7 +48,7 @@ with open(args.file, "r") as f:
message = "RipsComplex with max_edge_length=" + repr(args.max_edge_length)
print(message)
- point_cloud = gudhi.read_off(off_file=args.file)
+ point_cloud = gudhi.read_points_from_off_file(off_file=args.file)
rips_complex = gudhi.RipsComplex(
points=point_cloud, max_edge_length=args.max_edge_length
)
@@ -64,8 +65,8 @@ with open(args.file, "r") as f:
print(simplex_tree.betti_numbers())
if args.no_diagram == False:
- pplot = gudhi.plot_persistence_diagram(diag, band=args.band)
- pplot.show()
+ gudhi.plot_persistence_diagram(diag, band=args.band)
+ plot.show()
else:
print(args.file, "is not a valid OFF file")
diff --git a/src/python/example/rips_persistence_diagram.py b/src/python/example/rips_persistence_diagram.py
index f5897d7b..2a90b4bc 100755
--- a/src/python/example/rips_persistence_diagram.py
+++ b/src/python/example/rips_persistence_diagram.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python
+import matplotlib.pyplot as plot
import gudhi
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
@@ -26,5 +27,5 @@ simplex_tree = rips.create_simplex_tree(max_dimension=1)
diag = simplex_tree.persistence(homology_coeff_field=2, min_persistence=0)
print("diag=", diag)
-pplot = gudhi.plot_persistence_diagram(diag)
-pplot.show()
+gudhi.plot_persistence_diagram(diag)
+plot.show()
diff --git a/src/python/example/sparse_rips_persistence_diagram.py b/src/python/example/sparse_rips_persistence_diagram.py
index 671d5e34..410a6a86 100755
--- a/src/python/example/sparse_rips_persistence_diagram.py
+++ b/src/python/example/sparse_rips_persistence_diagram.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python
+import matplotlib.pyplot as plot
import gudhi
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
@@ -28,5 +29,5 @@ simplex_tree = rips.create_simplex_tree(max_dimension=2)
diag = simplex_tree.persistence(homology_coeff_field=2, min_persistence=0)
print("diag=", diag)
-pplot = gudhi.plot_persistence_diagram(diag)
-pplot.show()
+gudhi.plot_persistence_diagram(diag)
+plot.show()
diff --git a/src/python/example/tangential_complex_plain_homology_from_off_file_example.py b/src/python/example/tangential_complex_plain_homology_from_off_file_example.py
index 456bc9eb..f0df2189 100755
--- a/src/python/example/tangential_complex_plain_homology_from_off_file_example.py
+++ b/src/python/example/tangential_complex_plain_homology_from_off_file_example.py
@@ -1,7 +1,8 @@
#!/usr/bin/env python
-import gudhi
import argparse
+import matplotlib.pyplot as plot
+import gudhi
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
@@ -56,8 +57,8 @@ with open(args.file, "r") as f:
print(st.betti_numbers())
if args.no_diagram == False:
- pplot = gudhi.plot_persistence_diagram(diag, band=args.band)
- pplot.show()
+ gudhi.plot_persistence_diagram(diag, band=args.band)
+ plot.show()
else:
print(args.file, "is not a valid OFF file")
diff --git a/src/python/gudhi/__init__.py.in b/src/python/gudhi/__init__.py.in
index 28bab0e1..79e12fbc 100644
--- a/src/python/gudhi/__init__.py.in
+++ b/src/python/gudhi/__init__.py.in
@@ -1,14 +1,14 @@
-from importlib import import_module
-
-"""This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
+from importlib import import_module
+from sys import exc_info
__author__ = "GUDHI Editorial Board"
__copyright__ = "Copyright (C) 2016 Inria"
@@ -18,16 +18,16 @@ __version__ = "@GUDHI_VERSION@"
__root_source_dir__ = "@CMAKE_SOURCE_DIR@"
__debug_info__ = @GUDHI_PYTHON_DEBUG_INFO@
-from sys import exc_info
-from importlib import import_module
-
-__all__ = [@GUDHI_PYTHON_MODULES@]
+__all__ = [@GUDHI_PYTHON_MODULES@ @GUDHI_PYTHON_MODULES_EXTRA@]
__available_modules = ''
__missing_modules = ''
-# try to import * from gudhi.__module_name
-for __module_name in __all__:
+# Try to import * from gudhi.__module_name for default modules.
+# Extra modules require an explicit import by the user (mostly because of
+# unusual dependencies, but also to avoid cluttering namespace gudhi and
+# speed up the basic import)
+for __module_name in [@GUDHI_PYTHON_MODULES@]:
try:
__module = import_module('gudhi.' + __module_name)
try:
diff --git a/src/python/gudhi/alpha_complex.pyx b/src/python/gudhi/alpha_complex.pyx
index 6d6309db..f3ca3dd5 100644
--- a/src/python/gudhi/alpha_complex.pyx
+++ b/src/python/gudhi/alpha_complex.pyx
@@ -1,3 +1,12 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
@@ -9,16 +18,6 @@ import os
from gudhi.simplex_tree cimport *
from gudhi.simplex_tree import SimplexTree
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
-
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "GPL v3"
@@ -67,10 +66,10 @@ cdef class AlphaComplex:
"""
# The real cython constructor
- def __cinit__(self, points=None, off_file=''):
- if off_file is not '':
+ def __cinit__(self, points = None, off_file = ''):
+ if off_file:
if os.path.isfile(off_file):
- self.thisptr = new Alpha_complex_interface(str.encode(off_file), True)
+ self.thisptr = new Alpha_complex_interface(off_file.encode('utf-8'), True)
else:
print("file " + off_file + " not found.")
else:
@@ -100,7 +99,7 @@ cdef class AlphaComplex:
cdef vector[double] point = self.thisptr.get_point(vertex)
return point
- def create_simplex_tree(self, max_alpha_square=float('inf')):
+ def create_simplex_tree(self, max_alpha_square = float('inf')):
"""
:param max_alpha_square: The maximum alpha square threshold the
simplices shall not exceed. Default is set to infinity, and
diff --git a/src/python/gudhi/bottleneck.pyx b/src/python/gudhi/bottleneck.pyx
index 4b378cbc..af011e88 100644
--- a/src/python/gudhi/bottleneck.pyx
+++ b/src/python/gudhi/bottleneck.pyx
@@ -1,18 +1,17 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
import os
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
-
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "GPL v3"
diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx
index 0dc133d1..cbeda014 100644
--- a/src/python/gudhi/cubical_complex.pyx
+++ b/src/python/gudhi/cubical_complex.pyx
@@ -1,3 +1,12 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
@@ -5,17 +14,7 @@ from libcpp.string cimport string
from libcpp cimport bool
import os
-from numpy import array as np_array
-
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
+import numpy as np
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
@@ -48,7 +47,7 @@ cdef class CubicalComplex:
# Fake constructor that does nothing but documenting the constructor
def __init__(self, dimensions=None, top_dimensional_cells=None,
- perseus_file=''):
+ perseus_file=''):
"""CubicalComplex constructor from dimensions and
top_dimensional_cells or from a Perseus-style file name.
@@ -59,6 +58,12 @@ cdef class CubicalComplex:
Or
+ :param top_dimensional_cells: A multidimensional array of cells
+ filtration values.
+ :type top_dimensional_cells: anything convertible to a numpy ndarray
+
+ Or
+
:param perseus_file: A Perseus-style file name.
:type perseus_file: string
"""
@@ -66,11 +71,21 @@ cdef class CubicalComplex:
# The real cython constructor
def __cinit__(self, dimensions=None, top_dimensional_cells=None,
perseus_file=''):
- if (dimensions is not None) and (top_dimensional_cells is not None) and (perseus_file is ''):
+ if ((dimensions is not None) and (top_dimensional_cells is not None)
+ and (perseus_file == '')):
+ self.thisptr = new Bitmap_cubical_complex_base_interface(dimensions, top_dimensional_cells)
+ elif ((dimensions is None) and (top_dimensional_cells is not None)
+ and (perseus_file == '')):
+ top_dimensional_cells = np.array(top_dimensional_cells,
+ copy = False,
+ order = 'F')
+ dimensions = top_dimensional_cells.shape
+ top_dimensional_cells = top_dimensional_cells.ravel(order='F')
self.thisptr = new Bitmap_cubical_complex_base_interface(dimensions, top_dimensional_cells)
- elif (dimensions is None) and (top_dimensional_cells is None) and (perseus_file is not ''):
+ elif ((dimensions is None) and (top_dimensional_cells is None)
+ and (perseus_file != '')):
if os.path.isfile(perseus_file):
- self.thisptr = new Bitmap_cubical_complex_base_interface(str.encode(perseus_file))
+ self.thisptr = new Bitmap_cubical_complex_base_interface(perseus_file.encode('utf-8'))
else:
print("file " + perseus_file + " not found.")
else:
@@ -185,4 +200,4 @@ cdef class CubicalComplex:
else:
print("intervals_in_dim function requires persistence function"
" to be launched first.")
- return np_array(intervals_result)
+ return np.array(intervals_result)
diff --git a/src/python/gudhi/euclidean_strong_witness_complex.pyx b/src/python/gudhi/euclidean_strong_witness_complex.pyx
index 5d6e4fb9..9889f92c 100644
--- a/src/python/gudhi/euclidean_strong_witness_complex.pyx
+++ b/src/python/gudhi/euclidean_strong_witness_complex.pyx
@@ -1,3 +1,12 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
@@ -6,16 +15,6 @@ from libc.stdint cimport intptr_t
from gudhi.simplex_tree cimport *
from gudhi.simplex_tree import SimplexTree
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
-
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "GPL v3"
@@ -71,7 +70,7 @@ cdef class EuclideanStrongWitnessComplex:
"""
stree = SimplexTree()
cdef intptr_t stree_int_ptr=stree.thisptr
- if limit_dimension is not -1:
+ if limit_dimension != -1:
self.thisptr.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr,
max_alpha_square, limit_dimension)
else:
diff --git a/src/python/gudhi/euclidean_witness_complex.pyx b/src/python/gudhi/euclidean_witness_complex.pyx
index 2531919b..e3ce0e82 100644
--- a/src/python/gudhi/euclidean_witness_complex.pyx
+++ b/src/python/gudhi/euclidean_witness_complex.pyx
@@ -1,3 +1,12 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
@@ -6,16 +15,6 @@ from libc.stdint cimport intptr_t
from gudhi.simplex_tree cimport *
from gudhi.simplex_tree import SimplexTree
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
-
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "GPL v3"
@@ -71,7 +70,7 @@ cdef class EuclideanWitnessComplex:
"""
stree = SimplexTree()
cdef intptr_t stree_int_ptr=stree.thisptr
- if limit_dimension is not -1:
+ if limit_dimension != -1:
self.thisptr.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr,
max_alpha_square, limit_dimension)
else:
diff --git a/src/python/gudhi/nerve_gic.pyx b/src/python/gudhi/nerve_gic.pyx
index 2b230b8c..382e71c5 100644
--- a/src/python/gudhi/nerve_gic.pyx
+++ b/src/python/gudhi/nerve_gic.pyx
@@ -1,3 +1,12 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2018 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
@@ -9,16 +18,6 @@ from libc.stdint cimport intptr_t
from gudhi.simplex_tree cimport *
from gudhi.simplex_tree import SimplexTree
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2018 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
-
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2018 Inria"
__license__ = "GPL v3"
@@ -181,7 +180,7 @@ cdef class CoverComplex:
:returns: Read file status.
"""
if os.path.isfile(off_file):
- return self.thisptr.read_point_cloud(str.encode(off_file))
+ return self.thisptr.read_point_cloud(off_file.encode('utf-8'))
else:
print("file " + off_file + " not found.")
return False
@@ -213,7 +212,7 @@ cdef class CoverComplex:
:type color_file_name: string
"""
if os.path.isfile(color_file_name):
- self.thisptr.set_color_from_file(str.encode(color_file_name))
+ self.thisptr.set_color_from_file(color_file_name.encode('utf-8'))
else:
print("file " + color_file_name + " not found.")
@@ -234,7 +233,7 @@ cdef class CoverComplex:
:type cover_file_name: string
"""
if os.path.isfile(cover_file_name):
- self.thisptr.set_cover_from_file(str.encode(cover_file_name))
+ self.thisptr.set_cover_from_file(cover_file_name.encode('utf-8'))
else:
print("file " + cover_file_name + " not found.")
@@ -267,7 +266,7 @@ cdef class CoverComplex:
:type func_file_name: string
"""
if os.path.isfile(func_file_name):
- self.thisptr.set_function_from_file(str.encode(func_file_name))
+ self.thisptr.set_function_from_file(func_file_name.encode('utf-8'))
else:
print("file " + func_file_name + " not found.")
@@ -308,7 +307,7 @@ cdef class CoverComplex:
:type graph_file_name: string
"""
if os.path.isfile(graph_file_name):
- self.thisptr.set_graph_from_file(str.encode(graph_file_name))
+ self.thisptr.set_graph_from_file(graph_file_name.encode('utf-8'))
else:
print("file " + graph_file_name + " not found.")
@@ -369,7 +368,7 @@ cdef class CoverComplex:
:param type: either "GIC" or "Nerve".
:type type: string
"""
- self.thisptr.set_type(str.encode(type))
+ self.thisptr.set_type(type.encode('utf-8'))
def set_verbose(self, verbose):
"""Specifies whether the program should display information or not.
diff --git a/src/python/gudhi/off_reader.pyx b/src/python/gudhi/off_reader.pyx
index 9efd97ff..a0d5bf25 100644
--- a/src/python/gudhi/off_reader.pyx
+++ b/src/python/gudhi/off_reader.pyx
@@ -1,18 +1,17 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.string cimport string
import os
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
-
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
@@ -20,7 +19,7 @@ __license__ = "MIT"
cdef extern from "Off_reader_interface.h" namespace "Gudhi":
vector[vector[double]] read_points_from_OFF_file(string off_file)
-def read_off(off_file=''):
+def read_points_from_off_file(off_file=''):
"""Read points from OFF file.
:param off_file: An OFF file style name.
@@ -29,9 +28,9 @@ def read_off(off_file=''):
:returns: The point set.
:rtype: vector[vector[double]]
"""
- if off_file is not '':
+ if off_file:
if os.path.isfile(off_file):
- return read_points_from_OFF_file(str.encode(off_file))
+ return read_points_from_OFF_file(off_file.encode('utf-8'))
else:
print("file " + off_file + " not found.")
return []
diff --git a/src/python/gudhi/periodic_cubical_complex.pyx b/src/python/gudhi/periodic_cubical_complex.pyx
index 724fadd4..37f76201 100644
--- a/src/python/gudhi/periodic_cubical_complex.pyx
+++ b/src/python/gudhi/periodic_cubical_complex.pyx
@@ -1,3 +1,12 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
@@ -5,17 +14,7 @@ from libcpp.string cimport string
from libcpp cimport bool
import os
-from numpy import array as np_array
-
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
+import numpy as np
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
@@ -48,7 +47,7 @@ cdef class PeriodicCubicalComplex:
# Fake constructor that does nothing but documenting the constructor
def __init__(self, dimensions=None, top_dimensional_cells=None,
- periodic_dimensions=None, perseus_file=''):
+ periodic_dimensions=None, perseus_file=''):
"""PeriodicCubicalComplex constructor from dimensions and
top_dimensional_cells or from a Perseus-style file name.
@@ -61,6 +60,14 @@ cdef class PeriodicCubicalComplex:
Or
+ :param top_dimensional_cells: A multidimensional array of cells
+ filtration values.
+ :type top_dimensional_cells: anything convertible to a numpy ndarray
+ :param periodic_dimensions: A list of top dimensional cells periodicity value.
+ :type periodic_dimensions: list of boolean
+
+ Or
+
:param perseus_file: A Perseus-style file name.
:type perseus_file: string
"""
@@ -68,16 +75,32 @@ cdef class PeriodicCubicalComplex:
# The real cython constructor
def __cinit__(self, dimensions=None, top_dimensional_cells=None,
periodic_dimensions=None, perseus_file=''):
- if (dimensions is not None) and (top_dimensional_cells is not None) and (periodic_dimensions is not None) and (perseus_file is ''):
- self.thisptr = new Periodic_cubical_complex_base_interface(dimensions, top_dimensional_cells, periodic_dimensions)
- elif (dimensions is None) and (top_dimensional_cells is None) and (periodic_dimensions is None) and (perseus_file is not ''):
+ if ((dimensions is not None) and (top_dimensional_cells is not None)
+ and (periodic_dimensions is not None) and (perseus_file == '')):
+ self.thisptr = new Periodic_cubical_complex_base_interface(dimensions,
+ top_dimensional_cells,
+ periodic_dimensions)
+ elif ((dimensions is None) and (top_dimensional_cells is not None)
+ and (periodic_dimensions is not None) and (perseus_file == '')):
+ top_dimensional_cells = np.array(top_dimensional_cells,
+ copy = False,
+ order = 'F')
+ dimensions = top_dimensional_cells.shape
+ top_dimensional_cells = top_dimensional_cells.ravel(order='F')
+ self.thisptr = new Periodic_cubical_complex_base_interface(dimensions,
+ top_dimensional_cells,
+ periodic_dimensions)
+ elif ((dimensions is None) and (top_dimensional_cells is None)
+ and (periodic_dimensions is None) and (perseus_file != '')):
if os.path.isfile(perseus_file):
- self.thisptr = new Periodic_cubical_complex_base_interface(str.encode(perseus_file))
+ self.thisptr = new Periodic_cubical_complex_base_interface(perseus_file.encode('utf-8'))
else:
print("file " + perseus_file + " not found.")
else:
- print("CubicalComplex can be constructed from dimensions and "
- "top_dimensional_cells or from a Perseus-style file name.")
+ print("CubicalComplex can be constructed from dimensions, "
+ "top_dimensional_cells and periodic_dimensions, or from "
+ "top_dimensional_cells and periodic_dimensions or from "
+ "a Perseus-style file name.")
def __dealloc__(self):
if self.thisptr != NULL:
@@ -187,4 +210,4 @@ cdef class PeriodicCubicalComplex:
else:
print("intervals_in_dim function requires persistence function"
" to be launched first.")
- return np_array(intervals_result)
+ return np.array(intervals_result)
diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py
index 181bc8ea..246280de 100644
--- a/src/python/gudhi/persistence_graphical_tools.py
+++ b/src/python/gudhi/persistence_graphical_tools.py
@@ -1,3 +1,12 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau, Bertrand Michel
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from os import path
from math import isfinite
import numpy as np
@@ -5,16 +14,6 @@ import numpy as np
from gudhi.reader_utils import read_persistence_intervals_in_dimension
from gudhi.reader_utils import read_persistence_intervals_grouped_by_dimension
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau, Bertrand Michel
-
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
-
__author__ = "Vincent Rouvreau, Bertrand Michel"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
@@ -44,27 +43,6 @@ def __min_birth_max_death(persistence, band=0.0):
max_death += band
return (min_birth, max_death)
-
-"""
-Only 13 colors for the palette
-"""
-palette = [
- "#ff0000",
- "#00ff00",
- "#0000ff",
- "#00ffff",
- "#ff00ff",
- "#ffff00",
- "#000000",
- "#880000",
- "#008800",
- "#000088",
- "#888800",
- "#880088",
- "#008888",
-]
-
-
def plot_persistence_barcode(
persistence=[],
persistence_file="",
@@ -73,6 +51,8 @@ def plot_persistence_barcode(
max_barcodes=1000,
inf_delta=0.1,
legend=False,
+ colormap=None,
+ axes=None
):
"""This function plots the persistence bar code from persistence values list
or from a :doc:`persistence file <fileformats>`.
@@ -95,14 +75,19 @@ def plot_persistence_barcode(
:type inf_delta: float.
:param legend: Display the dimension color legend (default is False).
:type legend: boolean.
- :returns: A matplotlib object containing horizontal bar plot of persistence
- (launch `show()` method on it to display it).
+ :param colormap: A matplotlib-like qualitative colormaps. Default is None
+ which means :code:`matplotlib.cm.Set1.colors`.
+ :type colormap: tuple of colors (3-tuple of float between 0. and 1.).
+ :param axes: A matplotlib-like subplot axes. If None, the plot is drawn on
+ a new set of axes.
+ :type axes: `matplotlib.axes.Axes`
+ :returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
try:
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
- if persistence_file is not "":
+ if persistence_file != "":
if path.isfile(persistence_file):
# Reset persistence
persistence = []
@@ -116,7 +101,7 @@ def plot_persistence_barcode(
print("file " + persistence_file + " not found.")
return None
- if max_barcodes is not 1000:
+ if max_barcodes != 1000:
print("Deprecated parameter. It has been replaced by max_intervals")
max_intervals = max_barcodes
@@ -127,6 +112,11 @@ def plot_persistence_barcode(
key=lambda life_time: life_time[1][1] - life_time[1][0],
reverse=True,
)[:max_intervals]
+
+ if colormap == None:
+ colormap = plt.cm.Set1.colors
+ if axes == None:
+ fig, axes = plt.subplots(1, 1)
persistence = sorted(persistence, key=lambda birth: birth[1][0])
@@ -141,41 +131,43 @@ def plot_persistence_barcode(
for interval in reversed(persistence):
if float(interval[1][1]) != float("inf"):
# Finite death case
- plt.barh(
+ axes.barh(
ind,
(interval[1][1] - interval[1][0]),
height=0.8,
left=interval[1][0],
alpha=alpha,
- color=palette[interval[0]],
+ color=colormap[interval[0]],
linewidth=0,
)
else:
# Infinite death case for diagram to be nicer
- plt.barh(
+ axes.barh(
ind,
(infinity - interval[1][0]),
height=0.8,
left=interval[1][0],
alpha=alpha,
- color=palette[interval[0]],
+ color=colormap[interval[0]],
linewidth=0,
)
ind = ind + 1
if legend:
dimensions = list(set(item[0] for item in persistence))
- plt.legend(
+ axes.legend(
handles=[
- mpatches.Patch(color=palette[dim], label=str(dim))
+ mpatches.Patch(color=colormap[dim], label=str(dim))
for dim in dimensions
],
loc="lower right",
)
- plt.title("Persistence barcode")
+
+ axes.set_title("Persistence barcode")
+
# Ends plot on infinity value and starts a little bit before min_birth
- plt.axis([axis_start, infinity, 0, ind])
- return plt
+ axes.axis([axis_start, infinity, 0, ind])
+ return axes
except ImportError:
print("This function is not available, you may be missing matplotlib.")
@@ -190,6 +182,8 @@ def plot_persistence_diagram(
max_plots=1000,
inf_delta=0.1,
legend=False,
+ colormap=None,
+ axes=None
):
"""This function plots the persistence diagram from persistence values
list or from a :doc:`persistence file <fileformats>`.
@@ -214,14 +208,19 @@ def plot_persistence_diagram(
:type inf_delta: float.
:param legend: Display the dimension color legend (default is False).
:type legend: boolean.
- :returns: A matplotlib object containing diagram plot of persistence
- (launch `show()` method on it to display it).
+ :param colormap: A matplotlib-like qualitative colormaps. Default is None
+ which means :code:`matplotlib.cm.Set1.colors`.
+ :type colormap: tuple of colors (3-tuple of float between 0. and 1.).
+ :param axes: A matplotlib-like subplot axes. If None, the plot is drawn on
+ a new set of axes.
+ :type axes: `matplotlib.axes.Axes`
+ :returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
try:
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
- if persistence_file is not "":
+ if persistence_file != "":
if path.isfile(persistence_file):
# Reset persistence
persistence = []
@@ -235,7 +234,7 @@ def plot_persistence_diagram(
print("file " + persistence_file + " not found.")
return None
- if max_plots is not 1000:
+ if max_plots != 1000:
print("Deprecated parameter. It has been replaced by max_intervals")
max_intervals = max_plots
@@ -247,6 +246,11 @@ def plot_persistence_diagram(
reverse=True,
)[:max_intervals]
+ if colormap == None:
+ colormap = plt.cm.Set1.colors
+ if axes == None:
+ fig, axes = plt.subplots(1, 1)
+
(min_birth, max_death) = __min_birth_max_death(persistence, band)
delta = (max_death - min_birth) * inf_delta
# Replace infinity values with max_death + delta for diagram to be more
@@ -257,44 +261,44 @@ def plot_persistence_diagram(
# line display of equation : birth = death
x = np.linspace(axis_start, infinity, 1000)
# infinity line and text
- plt.plot(x, x, color="k", linewidth=1.0)
- plt.plot(x, [infinity] * len(x), linewidth=1.0, color="k", alpha=alpha)
- plt.text(axis_start, infinity, r"$\infty$", color="k", alpha=alpha)
+ axes.plot(x, x, color="k", linewidth=1.0)
+ axes.plot(x, [infinity] * len(x), linewidth=1.0, color="k", alpha=alpha)
+ axes.text(axis_start, infinity, r"$\infty$", color="k", alpha=alpha)
# bootstrap band
if band > 0.0:
- plt.fill_between(x, x, x + band, alpha=alpha, facecolor="red")
+ axes.fill_between(x, x, x + band, alpha=alpha, facecolor="red")
# Draw points in loop
for interval in reversed(persistence):
if float(interval[1][1]) != float("inf"):
# Finite death case
- plt.scatter(
+ axes.scatter(
interval[1][0],
interval[1][1],
alpha=alpha,
- color=palette[interval[0]],
+ color=colormap[interval[0]],
)
else:
# Infinite death case for diagram to be nicer
- plt.scatter(
- interval[1][0], infinity, alpha=alpha, color=palette[interval[0]]
+ axes.scatter(
+ interval[1][0], infinity, alpha=alpha, color=colormap[interval[0]]
)
if legend:
dimensions = list(set(item[0] for item in persistence))
- plt.legend(
+ axes.legend(
handles=[
- mpatches.Patch(color=palette[dim], label=str(dim))
+ mpatches.Patch(color=colormap[dim], label=str(dim))
for dim in dimensions
]
)
- plt.title("Persistence diagram")
- plt.xlabel("Birth")
- plt.ylabel("Death")
+ axes.set_xlabel("Birth")
+ axes.set_ylabel("Death")
# Ends plot on infinity value and starts a little bit before min_birth
- plt.axis([axis_start, infinity, axis_start, infinity + delta])
- return plt
+ axes.axis([axis_start, infinity, axis_start, infinity + delta])
+ axes.set_title("Persistence diagram")
+ return axes
except ImportError:
print("This function is not available, you may be missing matplotlib.")
@@ -309,6 +313,7 @@ def plot_persistence_density(
dimension=None,
cmap=None,
legend=False,
+ axes=None
):
"""This function plots the persistence density from persistence
values list or from a :doc:`persistence file <fileformats>`. Be
@@ -347,14 +352,16 @@ def plot_persistence_density(
:type cmap: cf. matplotlib colormap.
:param legend: Display the color bar values (default is False).
:type legend: boolean.
- :returns: A matplotlib object containing diagram plot of persistence
- (launch `show()` method on it to display it).
+ :param axes: A matplotlib-like subplot axes. If None, the plot is drawn on
+ a new set of axes.
+ :type axes: `matplotlib.axes.Axes`
+ :returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
try:
import matplotlib.pyplot as plt
from scipy.stats import kde
- if persistence_file is not "":
+ if persistence_file != "":
if dimension is None:
# All dimension case
dimension = -1
@@ -362,7 +369,6 @@ def plot_persistence_density(
persistence_dim = read_persistence_intervals_in_dimension(
persistence_file=persistence_file, only_this_dim=dimension
)
- print(persistence_dim)
else:
print("file " + persistence_file + " not found.")
return None
@@ -391,9 +397,15 @@ def plot_persistence_density(
birth = persistence_dim[:, 0]
death = persistence_dim[:, 1]
+ # default cmap value cannot be done at argument definition level as matplotlib is not yet defined.
+ if cmap is None:
+ cmap = plt.cm.hot_r
+ if axes == None:
+ fig, axes = plt.subplots(1, 1)
+
# line display of equation : birth = death
x = np.linspace(death.min(), birth.max(), 1000)
- plt.plot(x, x, color="k", linewidth=1.0)
+ axes.plot(x, x, color="k", linewidth=1.0)
# Evaluate a gaussian kde on a regular grid of nbins x nbins over data extents
k = kde.gaussian_kde([birth, death], bw_method=bw_method)
@@ -403,19 +415,16 @@ def plot_persistence_density(
]
zi = k(np.vstack([xi.flatten(), yi.flatten()]))
- # default cmap value cannot be done at argument definition level as matplotlib is not yet defined.
- if cmap is None:
- cmap = plt.cm.hot_r
# Make the plot
- plt.pcolormesh(xi, yi, zi.reshape(xi.shape), cmap=cmap)
+ img = axes.pcolormesh(xi, yi, zi.reshape(xi.shape), cmap=cmap)
if legend:
- plt.colorbar()
+ plt.colorbar(img, ax=axes)
- plt.title("Persistence density")
- plt.xlabel("Birth")
- plt.ylabel("Death")
- return plt
+ axes.set_xlabel("Birth")
+ axes.set_ylabel("Death")
+ axes.set_title("Persistence density")
+ return axes
except ImportError:
print(
diff --git a/src/python/gudhi/reader_utils.pyx b/src/python/gudhi/reader_utils.pyx
index 147fae71..d6033b86 100644
--- a/src/python/gudhi/reader_utils.pyx
+++ b/src/python/gudhi/reader_utils.pyx
@@ -1,3 +1,12 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2017 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.string cimport string
@@ -7,16 +16,6 @@ from libcpp.pair cimport pair
from os import path
from numpy import array as np_array
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2017 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
-
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2017 Inria"
__license__ = "MIT"
@@ -37,9 +36,9 @@ def read_lower_triangular_matrix_from_csv_file(csv_file='', separator=';'):
:returns: The lower triangular matrix.
:rtype: vector[vector[double]]
"""
- if csv_file is not '':
+ if csv_file:
if path.isfile(csv_file):
- return read_matrix_from_csv_file(str.encode(csv_file), ord(separator[0]))
+ return read_matrix_from_csv_file(csv_file.encode('utf-8'), ord(separator[0]))
print("file " + csv_file + " not set or not found.")
return []
@@ -56,9 +55,9 @@ def read_persistence_intervals_grouped_by_dimension(persistence_file=''):
:returns: The persistence pairs grouped by dimension.
:rtype: map[int, vector[pair[double, double]]]
"""
- if persistence_file is not '':
+ if persistence_file:
if path.isfile(persistence_file):
- return read_pers_intervals_grouped_by_dimension(str.encode(persistence_file))
+ return read_pers_intervals_grouped_by_dimension(persistence_file.encode('utf-8'))
print("file " + persistence_file + " not set or not found.")
return []
@@ -79,9 +78,9 @@ def read_persistence_intervals_in_dimension(persistence_file='', only_this_dim=-
:returns: The persistence intervals.
:rtype: numpy array of dimension 2
"""
- if persistence_file is not '':
+ if persistence_file:
if path.isfile(persistence_file):
- return np_array(read_pers_intervals_in_dimension(str.encode(
- persistence_file), only_this_dim))
+ return np_array(read_pers_intervals_in_dimension(persistence_file.encode(
+ 'utf-8'), only_this_dim))
print("file " + persistence_file + " not set or not found.")
return []
diff --git a/src/python/gudhi/representations/__init__.py b/src/python/gudhi/representations/__init__.py
new file mode 100644
index 00000000..f020248d
--- /dev/null
+++ b/src/python/gudhi/representations/__init__.py
@@ -0,0 +1,6 @@
+from .kernel_methods import *
+from .metrics import *
+from .preprocessing import *
+from .vector_methods import *
+
+__all__ = ["kernel_methods", "metrics", "preprocessing", "vector_methods"]
diff --git a/src/python/gudhi/representations/kernel_methods.py b/src/python/gudhi/representations/kernel_methods.py
new file mode 100644
index 00000000..bfc83aff
--- /dev/null
+++ b/src/python/gudhi/representations/kernel_methods.py
@@ -0,0 +1,206 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Mathieu Carrière
+#
+# Copyright (C) 2018-2019 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+from sklearn.base import BaseEstimator, TransformerMixin
+from sklearn.metrics import pairwise_distances
+from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance
+
+#############################################
+# Kernel methods ############################
+#############################################
+
+class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the sliced Wasserstein kernel matrix from a list of persistence diagrams. The sliced Wasserstein kernel is computed by exponentiating the corresponding sliced Wasserstein distance with a Gaussian kernel. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
+ """
+ def __init__(self, num_directions=10, bandwidth=1.0):
+ """
+ Constructor for the SlicedWassersteinKernel class.
+
+ Parameters:
+ bandwidth (double): bandwidth of the Gaussian kernel applied to the sliced Wasserstein distance (default 1.).
+ num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the kernel computation (default 10).
+ """
+ self.bandwidth = bandwidth
+ self.sw_ = SlicedWassersteinDistance(num_directions=num_directions)
+
+ def fit(self, X, y=None):
+ """
+ Fit the SlicedWassersteinKernel class on a list of persistence diagrams: an instance of the SlicedWassersteinDistance class is fitted on the diagrams and then stored.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.sw_.fit(X, y)
+ return self
+
+ def transform(self, X):
+ """
+ Compute all sliced Wasserstein kernel values between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein kernel values.
+ """
+ return np.exp(-self.sw_.transform(X)/self.bandwidth)
+
+class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the persistence weighted Gaussian kernel matrix from a list of persistence diagrams. The persistence weighted Gaussian kernel is computed by convolving the persistence diagram points with weighted Gaussian kernels. See http://proceedings.mlr.press/v48/kusano16.html for more details.
+ """
+ def __init__(self, bandwidth=1., weight=lambda x: 1, kernel_approx=None):
+ """
+ Constructor for the PersistenceWeightedGaussianKernel class.
+
+ Parameters:
+ bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved (default 1.)
+ weight (function): weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y].
+ kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ """
+ self.bandwidth, self.weight = bandwidth, weight
+ self.kernel_approx = kernel_approx
+
+ def fit(self, X, y=None):
+ """
+ Fit the PersistenceWeightedGaussianKernel class on a list of persistence diagrams: persistence diagrams are stored in a numpy array called **diagrams** and the kernel approximation class (if not None) is applied on them.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.diagrams_ = list(X)
+ self.ws_ = [ np.array([self.weight(self.diagrams_[i][j,:]) for j in range(self.diagrams_[i].shape[0])]) for i in range(len(self.diagrams_)) ]
+ if self.kernel_approx is not None:
+ self.approx_ = np.concatenate([np.sum(np.multiply(self.ws_[i][:,np.newaxis], self.kernel_approx.transform(self.diagrams_[i])), axis=0)[np.newaxis,:] for i in range(len(self.diagrams_))])
+ return self
+
+ def transform(self, X):
+ """
+ Compute all persistence weighted Gaussian kernel values between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence weighted Gaussian kernel values.
+ """
+ Xp = list(X)
+ Xfit = np.zeros((len(Xp), len(self.diagrams_)))
+ if len(self.diagrams_) == len(Xp) and np.all([np.array_equal(self.diagrams_[i], Xp[i]) for i in range(len(Xp))]):
+ if self.kernel_approx is not None:
+ Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(self.approx_, self.approx_.T)
+ else:
+ for i in range(len(self.diagrams_)):
+ for j in range(i+1, len(self.diagrams_)):
+ W = np.matmul(self.ws_[i][:,np.newaxis], self.ws_[j][np.newaxis,:])
+ E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(self.diagrams_[i], self.diagrams_[j]))/(2*np.square(self.bandwidth)))
+ Xfit[i,j] = np.sum(np.multiply(W, E))
+ Xfit[j,i] = Xfit[i,j]
+ else:
+ ws = [ np.array([self.weight(Xp[i][j,:]) for j in range(Xp[i].shape[0])]) for i in range(len(Xp)) ]
+ if self.kernel_approx is not None:
+ approx = np.concatenate([np.sum(np.multiply(ws[i][:,np.newaxis], self.kernel_approx.transform(Xp[i])), axis=0)[np.newaxis,:] for i in range(len(Xp))])
+ Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(approx, self.approx_.T)
+ else:
+ for i in range(len(Xp)):
+ for j in range(len(self.diagrams_)):
+ W = np.matmul(ws[i][:,np.newaxis], self.ws_[j][np.newaxis,:])
+ E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(Xp[i], self.diagrams_[j]))/(2*np.square(self.bandwidth)))
+ Xfit[i,j] = np.sum(np.multiply(W, E))
+
+ return Xfit
+
+class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the persistence scale space kernel matrix from a list of persistence diagrams. The persistence scale space kernel is computed by adding the symmetric to the diagonal of each point in each persistence diagram, with negative weight, and then convolving the points with a Gaussian kernel. See https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Reininghaus_A_Stable_Multi-Scale_2015_CVPR_paper.pdf for more details.
+ """
+ def __init__(self, bandwidth=1., kernel_approx=None):
+ """
+ Constructor for the PersistenceScaleSpaceKernel class.
+
+ Parameters:
+ bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved (default 1.)
+ kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ """
+ self.pwg_ = PersistenceWeightedGaussianKernel(bandwidth=bandwidth, weight=lambda x: 1 if x[1] >= x[0] else -1, kernel_approx=kernel_approx)
+
+ def fit(self, X, y=None):
+ """
+ Fit the PersistenceScaleSpaceKernel class on a list of persistence diagrams: symmetric to the diagonal of all points are computed and an instance of the PersistenceWeightedGaussianKernel class is fitted on the diagrams and then stored.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.diagrams_ = list(X)
+ for i in range(len(self.diagrams_)):
+ op_D = self.diagrams_[i][:,[1,0]]
+ self.diagrams_[i] = np.concatenate([self.diagrams_[i], op_D], axis=0)
+ self.pwg_.fit(X)
+ return self
+
+ def transform(self, X):
+ """
+ Compute all persistence scale space kernel values between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence scale space kernel values.
+ """
+ Xp = list(X)
+ for i in range(len(Xp)):
+ op_X = Xp[i][:,[1,0]]
+ Xp[i] = np.concatenate([Xp[i], op_X], axis=0)
+ return self.pwg_.transform(Xp)
+
+class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the persistence Fisher kernel matrix from a list of persistence diagrams. The persistence Fisher kernel is computed by exponentiating the corresponding persistence Fisher distance with a Gaussian kernel. See papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details.
+ """
+ def __init__(self, bandwidth_fisher=1., bandwidth=1., kernel_approx=None):
+ """
+ Constructor for the PersistenceFisherKernel class.
+
+ Parameters:
+ bandwidth (double): bandwidth of the Gaussian kernel applied to the persistence Fisher distance (default 1.).
+ bandwidth_fisher (double): bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions by PersistenceFisherDistance class (default 1.).
+ kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ """
+ self.bandwidth = bandwidth
+ self.pf_ = PersistenceFisherDistance(bandwidth=bandwidth_fisher, kernel_approx=kernel_approx)
+
+ def fit(self, X, y=None):
+ """
+ Fit the PersistenceFisherKernel class on a list of persistence diagrams: an instance of the PersistenceFisherDistance class is fitted on the diagrams and then stored.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.pf_.fit(X, y)
+ return self
+
+ def transform(self, X):
+ """
+ Compute all persistence Fisher kernel values between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher kernel values.
+ """
+ return np.exp(-self.pf_.transform(X)/self.bandwidth)
+
diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py
new file mode 100644
index 00000000..5f9ec6ab
--- /dev/null
+++ b/src/python/gudhi/representations/metrics.py
@@ -0,0 +1,244 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Mathieu Carrière
+#
+# Copyright (C) 2018-2019 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+from sklearn.base import BaseEstimator, TransformerMixin
+from sklearn.metrics import pairwise_distances
+try:
+ from .. import bottleneck_distance
+ USE_GUDHI = True
+except ImportError:
+ USE_GUDHI = False
+ print("Gudhi built without CGAL: BottleneckDistance will return a null matrix")
+
+#############################################
+# Metrics ###################################
+#############################################
+
+class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the sliced Wasserstein distance matrix from a list of persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally integrating over all possible lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
+ """
+ def __init__(self, num_directions=10):
+ """
+ Constructor for the SlicedWassersteinDistance class.
+
+ Parameters:
+ num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation (default 10).
+ """
+ self.num_directions = num_directions
+ thetas = np.linspace(-np.pi/2, np.pi/2, num=self.num_directions+1)[np.newaxis,:-1]
+ self.lines_ = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
+
+ def fit(self, X, y=None):
+ """
+ Fit the SlicedWassersteinDistance class on a list of persistence diagrams: persistence diagrams are projected onto the different lines. The diagrams themselves and their projections are then stored in numpy arrays, called **diagrams_** and **approx_diag_**.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.diagrams_ = X
+ self.approx_ = [np.matmul(X[i], self.lines_) for i in range(len(X))]
+ diag_proj = (1./2) * np.ones((2,2))
+ self.approx_diag_ = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))]
+ return self
+
+ def transform(self, X):
+ """
+ Compute all sliced Wasserstein distances between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein distances.
+ """
+ Xfit = np.zeros((len(X), len(self.approx_)))
+ if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
+ for i in range(len(self.approx_)):
+ for j in range(i+1, len(self.approx_)):
+ A = np.sort(np.concatenate([self.approx_[i], self.approx_diag_[j]], axis=0), axis=0)
+ B = np.sort(np.concatenate([self.approx_[j], self.approx_diag_[i]], axis=0), axis=0)
+ L1 = np.sum(np.abs(A-B), axis=0)
+ Xfit[i,j] = np.mean(L1)
+ Xfit[j,i] = Xfit[i,j]
+ else:
+ diag_proj = (1./2) * np.ones((2,2))
+ approx = [np.matmul(X[i], self.lines_) for i in range(len(X))]
+ approx_diag = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))]
+ for i in range(len(approx)):
+ for j in range(len(self.approx_)):
+ A = np.sort(np.concatenate([approx[i], self.approx_diag_[j]], axis=0), axis=0)
+ B = np.sort(np.concatenate([self.approx_[j], approx_diag[i]], axis=0), axis=0)
+ L1 = np.sum(np.abs(A-B), axis=0)
+ Xfit[i,j] = np.mean(L1)
+
+ return Xfit
+
+class BottleneckDistance(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the bottleneck distance matrix from a list of persistence diagrams.
+ """
+ def __init__(self, epsilon=None):
+ """
+ Constructor for the BottleneckDistance class.
+
+ Parameters:
+ epsilon (double): absolute (additive) error tolerated on the distance (default is the smallest positive float), see :func:`gudhi.bottleneck_distance`.
+ """
+ self.epsilon = epsilon
+
+ def fit(self, X, y=None):
+ """
+ Fit the BottleneckDistance class on a list of persistence diagrams: persistence diagrams are stored in a numpy array called **diagrams**.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.diagrams_ = X
+ return self
+
+ def transform(self, X):
+ """
+ Compute all bottleneck distances between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise bottleneck distances.
+ """
+ num_diag1 = len(X)
+
+ #if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
+ if X is self.diagrams_:
+ matrix = np.zeros((num_diag1, num_diag1))
+
+ if USE_GUDHI:
+ for i in range(num_diag1):
+ for j in range(i+1, num_diag1):
+ matrix[i,j] = bottleneck_distance(X[i], X[j], self.epsilon)
+ matrix[j,i] = matrix[i,j]
+ else:
+ print("Gudhi built without CGAL: returning a null matrix")
+
+ else:
+ num_diag2 = len(self.diagrams_)
+ matrix = np.zeros((num_diag1, num_diag2))
+
+ if USE_GUDHI:
+ for i in range(num_diag1):
+ for j in range(num_diag2):
+ matrix[i,j] = bottleneck_distance(X[i], self.diagrams_[j], self.epsilon)
+ else:
+ print("Gudhi built without CGAL: returning a null matrix")
+
+ Xfit = matrix
+
+ return Xfit
+
+class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the persistence Fisher distance matrix from a list of persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details.
+ """
+ def __init__(self, bandwidth=1., kernel_approx=None):
+ """
+ Constructor for the PersistenceFisherDistance class.
+
+ Parameters:
+ bandwidth (double): bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions (default 1.).
+ kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ """
+ self.bandwidth, self.kernel_approx = bandwidth, kernel_approx
+
+ def fit(self, X, y=None):
+ """
+ Fit the PersistenceFisherDistance class on a list of persistence diagrams: persistence diagrams are stored in a numpy array called **diagrams** and the kernel approximation class (if not None) is applied on them.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.diagrams_ = X
+ projection = (1./2) * np.ones((2,2))
+ self.diagonal_projections_ = [np.matmul(X[i], projection) for i in range(len(X))]
+ if self.kernel_approx is not None:
+ self.approx_ = [self.kernel_approx.transform(X[i]) for i in range(len(X))]
+ self.approx_diagonal_ = [self.kernel_approx.transform(self.diagonal_projections_[i]) for i in range(len(X))]
+ return self
+
+ def transform(self, X):
+ """
+ Compute all persistence Fisher distances between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher distances.
+ """
+ Xfit = np.zeros((len(X), len(self.diagrams_)))
+ if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
+ for i in range(len(self.diagrams_)):
+ for j in range(i+1, len(self.diagrams_)):
+ if self.kernel_approx is not None:
+ Z = np.concatenate([self.approx_[i], self.approx_diagonal_[i], self.approx_[j], self.approx_diagonal_[j]], axis=0)
+ U, V = np.sum(np.concatenate([self.approx_[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], self.approx_diagonal_[i]], axis=0), axis=0)
+ vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ Xfit[j,i] = Xfit[i,j]
+ else:
+ Z = np.concatenate([self.diagrams_[i], self.diagonal_projections_[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0)
+ U, V = np.concatenate([self.diagrams_[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], self.diagonal_projections_[i]], axis=0)
+ vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ Xfit[j,i] = Xfit[i,j]
+ else:
+ projection = (1./2) * np.ones((2,2))
+ diagonal_projections = [np.matmul(X[i], projection) for i in range(len(X))]
+ if self.kernel_approx is not None:
+ approx = [self.kernel_approx.transform(X[i]) for i in range(len(X))]
+ approx_diagonal = [self.kernel_approx.transform(diagonal_projections[i]) for i in range(len(X))]
+ for i in range(len(X)):
+ for j in range(len(self.diagrams_)):
+ if self.kernel_approx is not None:
+ Z = np.concatenate([approx[i], approx_diagonal[i], self.approx_[j], self.approx_diagonal_[j]], axis=0)
+ U, V = np.sum(np.concatenate([approx[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], approx_diagonal[i]], axis=0), axis=0)
+ vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ else:
+ Z = np.concatenate([X[i], diagonal_projections[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0)
+ U, V = np.concatenate([X[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], diagonal_projections[i]], axis=0)
+ vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ return Xfit
diff --git a/src/python/gudhi/representations/preprocessing.py b/src/python/gudhi/representations/preprocessing.py
new file mode 100644
index 00000000..a39b00e4
--- /dev/null
+++ b/src/python/gudhi/representations/preprocessing.py
@@ -0,0 +1,305 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Mathieu Carrière
+#
+# Copyright (C) 2018-2019 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+from sklearn.base import BaseEstimator, TransformerMixin
+from sklearn.preprocessing import StandardScaler
+
+#############################################
+# Preprocessing #############################
+#############################################
+
+class BirthPersistenceTransform(BaseEstimator, TransformerMixin):
+ """
+ This is a class for the affine transformation (x,y) -> (x,y-x) to be applied on persistence diagrams.
+ """
+ def __init__(self):
+ """
+ Constructor for BirthPersistenceTransform class.
+ """
+ return None
+
+ def fit(self, X, y=None):
+ """
+ Fit the BirthPersistenceTransform class on a list of persistence diagrams (this function actually does nothing but is useful when BirthPersistenceTransform is included in a scikit-learn Pipeline).
+
+ Parameters:
+ X (list of n x 2 numpy array): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ return self
+
+ def transform(self, X):
+ """
+ Apply the BirthPersistenceTransform function on the persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy array): input persistence diagrams.
+
+ Returns:
+ list of n x 2 numpy array: transformed persistence diagrams.
+ """
+ Xfit = []
+ for diag in X:
+ #new_diag = np.empty(diag.shape)
+ #np.copyto(new_diag, diag)
+ new_diag = np.copy(diag)
+ new_diag[:,1] = new_diag[:,1] - new_diag[:,0]
+ Xfit.append(new_diag)
+ return Xfit
+
+class Clamping(BaseEstimator, TransformerMixin):
+ """
+ This is a class for clamping values. It can be used as a parameter for the DiagramScaler class, for instance if you want to clamp abscissae or ordinates of persistence diagrams.
+ """
+ def __init__(self, minimum=-np.inf, maximum=np.inf):
+ """
+ Constructor for the Clamping class.
+
+ Parameters:
+ limit (double): clamping value (default np.inf).
+ """
+ self.minimum = minimum
+ self.maximum = maximum
+
+ def fit(self, X, y=None):
+ """
+ Fit the Clamping class on a list of values (this function actually does nothing but is useful when Clamping is included in a scikit-learn Pipeline).
+
+ Parameters:
+ X (numpy array of size n): input values.
+ y (n x 1 array): value labels (unused).
+ """
+ return self
+
+ def transform(self, X):
+ """
+ Clamp list of values.
+
+ Parameters:
+ X (numpy array of size n): input list of values.
+
+ Returns:
+ numpy array of size n: output list of values.
+ """
+ Xfit = np.clip(X, self.minimum, self.maximum)
+ return Xfit
+
+class DiagramScaler(BaseEstimator, TransformerMixin):
+ """
+ This is a class for preprocessing persistence diagrams with a given list of scalers, such as those included in scikit-learn.
+ """
+ def __init__(self, use=False, scalers=[]):
+ """
+ Constructor for the DiagramScaler class.
+
+ Parameters:
+ use (bool): whether to use the class or not (default False).
+ scalers (list of classes): list of scalers to be fit on the persistence diagrams (default []). Each element of the list is a tuple with two elements: the first one is a list of coordinates, and the second one is a scaler (i.e. a class with fit() and transform() methods) that is going to be applied to these coordinates. Common scalers can be found in the scikit-learn library (such as MinMaxScaler for instance).
+ """
+ self.scalers = scalers
+ self.use = use
+
+ def fit(self, X, y=None):
+ """
+ Fit the DiagramScaler class on a list of persistence diagrams: persistence diagrams are concatenated in a big numpy array, and scalers are fit (by calling their fit() method) on their corresponding coordinates in this big array.
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ if self.use:
+ if len(X) == 1:
+ P = X[0]
+ else:
+ P = np.concatenate(X,0)
+ for (indices, scaler) in self.scalers:
+ scaler.fit(np.reshape(P[:,indices], [-1, 1]))
+ return self
+
+ def transform(self, X):
+ """
+ Apply the DiagramScaler function on the persistence diagrams. The fitted scalers are applied (by calling their transform() method) to their corresponding coordinates in each persistence diagram individually.
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+
+ Returns:
+ list of n x 2 or n x 1 numpy arrays: transformed persistence diagrams.
+ """
+ Xfit = [np.copy(d) for d in X]
+ if self.use:
+ for i in range(len(Xfit)):
+ if Xfit[i].shape[0] > 0:
+ for (indices, scaler) in self.scalers:
+ for I in indices:
+ Xfit[i][:,I] = np.squeeze(scaler.transform(np.reshape(Xfit[i][:,I], [-1,1])))
+ return Xfit
+
+class Padding(BaseEstimator, TransformerMixin):
+ """
+ This is a class for padding a list of persistence diagrams with dummy points, so that all persistence diagrams end up with the same number of points.
+ """
+ def __init__(self, use=False):
+ """
+ Constructor for the Padding class.
+
+ Parameters:
+ use (bool): whether to use the class or not (default False).
+ """
+ self.use = use
+
+ def fit(self, X, y=None):
+ """
+ Fit the Padding class on a list of persistence diagrams (this function actually does nothing but is useful when Padding is included in a scikit-learn Pipeline).
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.max_pts = max([len(diag) for diag in X])
+ return self
+
+ def transform(self, X):
+ """
+ Add dummy points to each persistence diagram so that they all have the same cardinality. All points are given an additional coordinate indicating if the point was added after padding (0) or already present before (1).
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+
+ Returns:
+ list of n x 3 or n x 2 numpy arrays: padded persistence diagrams.
+ """
+ if self.use:
+ Xfit, num_diag = [], len(X)
+ for diag in X:
+ diag_pad = np.pad(diag, ((0,max(0, self.max_pts - diag.shape[0])), (0,1)), "constant", constant_values=((0,0),(0,0)))
+ diag_pad[:diag.shape[0],2] = np.ones(diag.shape[0])
+ Xfit.append(diag_pad)
+ else:
+ Xfit = X
+ return Xfit
+
+class ProminentPoints(BaseEstimator, TransformerMixin):
+ """
+ This is a class for removing points that are close or far from the diagonal in persistence diagrams. If persistence diagrams are n x 2 numpy arrays (i.e. persistence diagrams with ordinary features), points are ordered and thresholded by distance-to-diagonal. If persistence diagrams are n x 1 numpy arrays (i.e. persistence diagrams with essential features), points are not ordered and thresholded by first coordinate.
+ """
+ def __init__(self, use=False, num_pts=10, threshold=-1, location="upper"):
+ """
+ Constructor for the ProminentPoints class.
+
+ Parameters:
+ use (bool): whether to use the class or not (default False).
+ location (string): either "upper" or "lower" (default "upper"). Whether to keep the points that are far away ("upper") or close ("lower") to the diagonal.
+ num_pts (int): cardinality threshold (default 10). If location == "upper", keep the top **num_pts** points that are the farthest away from the diagonal. If location == "lower", keep the top **num_pts** points that are the closest to the diagonal.
+ threshold (double): distance-to-diagonal threshold (default -1). If location == "upper", keep the points that are at least at a distance **threshold** from the diagonal. If location == "lower", keep the points that are at most at a distance **threshold** from the diagonal.
+ """
+ self.num_pts = num_pts
+ self.threshold = threshold
+ self.use = use
+ self.location = location
+
+ def fit(self, X, y=None):
+ """
+ Fit the ProminentPoints class on a list of persistence diagrams (this function actually does nothing but is useful when ProminentPoints is included in a scikit-learn Pipeline).
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ return self
+
+ def transform(self, X):
+ """
+ If location == "upper", first select the top **num_pts** points that are the farthest away from the diagonal, then select and return from these points the ones that are at least at distance **threshold** from the diagonal for each persistence diagram individually. If location == "lower", first select the top **num_pts** points that are the closest to the diagonal, then select and return from these points the ones that are at most at distance **threshold** from the diagonal for each persistence diagram individually.
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+
+ Returns:
+ list of n x 2 or n x 1 numpy arrays: thresholded persistence diagrams.
+ """
+ if self.use:
+ Xfit, num_diag = [], len(X)
+ for i in range(num_diag):
+ diag = X[i]
+ if diag.shape[1] >= 2:
+ if diag.shape[0] > 0:
+ pers = np.abs(diag[:,1] - diag[:,0])
+ idx_thresh = pers >= self.threshold
+ thresh_diag, thresh_pers = diag[idx_thresh], pers[idx_thresh]
+ sort_index = np.flip(np.argsort(thresh_pers, axis=None), 0)
+ if self.location == "upper":
+ new_diag = thresh_diag[sort_index[:min(self.num_pts, thresh_diag.shape[0])],:]
+ if self.location == "lower":
+ new_diag = np.concatenate( [ thresh_diag[sort_index[min(self.num_pts, thresh_diag.shape[0]):],:], diag[~idx_thresh] ], axis=0)
+ else:
+ new_diag = diag
+
+ else:
+ if diag.shape[0] > 0:
+ birth = diag[:,:1]
+ idx_thresh = birth >= self.threshold
+ thresh_diag, thresh_birth = diag[idx_thresh], birth[idx_thresh]
+ if self.location == "upper":
+ new_diag = thresh_diag[:min(self.num_pts, thresh_diag.shape[0]),:]
+ if self.location == "lower":
+ new_diag = np.concatenate( [ thresh_diag[min(self.num_pts, thresh_diag.shape[0]):,:], diag[~idx_thresh] ], axis=0)
+ else:
+ new_diag = diag
+
+ Xfit.append(new_diag)
+ else:
+ Xfit = X
+ return Xfit
+
+class DiagramSelector(BaseEstimator, TransformerMixin):
+ """
+ This is a class for extracting finite or essential points in persistence diagrams.
+ """
+ def __init__(self, use=False, limit=np.inf, point_type="finite"):
+ """
+ Constructor for the DiagramSelector class.
+
+ Parameters:
+ use (bool): whether to use the class or not (default False).
+ limit (double): second coordinate value that is the criterion for being an essential point (default numpy.inf).
+ point_type (string): either "finite" or "essential". The type of the points that are going to be extracted.
+ """
+ self.use, self.limit, self.point_type = use, limit, point_type
+
+ def fit(self, X, y=None):
+ """
+ Fit the DiagramSelector class on a list of persistence diagrams (this function actually does nothing but is useful when DiagramSelector is included in a scikit-learn Pipeline).
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ return self
+
+ def transform(self, X):
+ """
+ Extract and return the finite or essential points of each persistence diagram individually.
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+
+ Returns:
+ list of n x 2 or n x 1 numpy arrays: extracted persistence diagrams.
+ """
+ if self.use:
+ Xfit, num_diag = [], len(X)
+ if self.point_type == "finite":
+ Xfit = [ diag[diag[:,1] < self.limit] if diag.shape[0] != 0 else diag for diag in X]
+ else:
+ Xfit = [ diag[diag[:,1] >= self.limit, 0:1] if diag.shape[0] != 0 else diag for diag in X]
+ else:
+ Xfit = X
+ return Xfit
diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py
new file mode 100644
index 00000000..fe26dbe2
--- /dev/null
+++ b/src/python/gudhi/representations/vector_methods.py
@@ -0,0 +1,492 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Mathieu Carrière
+#
+# Copyright (C) 2018-2019 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+from sklearn.base import BaseEstimator, TransformerMixin
+from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler
+from sklearn.neighbors import DistanceMetric
+
+from .preprocessing import DiagramScaler, BirthPersistenceTransform
+
+#############################################
+# Finite Vectorization methods ##############
+#############################################
+
+class PersistenceImage(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing persistence images from a list of persistence diagrams. A persistence image is a 2D function computed from a persistence diagram by convolving the diagram points with a weighted Gaussian kernel. The plane is then discretized into an image with pixels, which is flattened and returned as a vector. See http://jmlr.org/papers/v18/16-337.html for more details.
+ """
+ def __init__(self, bandwidth=1., weight=lambda x: 1, resolution=[20,20], im_range=[np.nan, np.nan, np.nan, np.nan]):
+ """
+ Constructor for the PersistenceImage class.
+
+ Parameters:
+ bandwidth (double): bandwidth of the Gaussian kernel (default 1.).
+ weight (function): weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y].
+ resolution ([int,int]): size (in pixels) of the persistence image (default [20,20]).
+ im_range ([double,double,double,double]): minimum and maximum of each axis of the persistence image, of the form [x_min, x_max, y_min, y_max] (default [numpy.nan, numpy.nan, numpy.nan, numpyp.nan]). If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ """
+ self.bandwidth, self.weight = bandwidth, weight
+ self.resolution, self.im_range = resolution, im_range
+
+ def fit(self, X, y=None):
+ """
+ Fit the PersistenceImage class on a list of persistence diagrams: if any of the values in **im_range** is numpy.nan, replace it with the corresponding value computed on the given list of persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ if np.isnan(np.array(self.im_range)).any():
+ new_X = BirthPersistenceTransform().fit_transform(X)
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(new_X,y)
+ [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ self.im_range = np.where(np.isnan(np.array(self.im_range)), np.array([mx, Mx, my, My]), np.array(self.im_range))
+ return self
+
+ def transform(self, X):
+ """
+ Compute the persistence image for each persistence diagram individually and store the results in a single numpy array.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array with shape (number of diagrams) x (number of pixels = **resolution[0]** x **resolution[1]**): output persistence images.
+ """
+ num_diag, Xfit = len(X), []
+ new_X = BirthPersistenceTransform().fit_transform(X)
+
+ for i in range(num_diag):
+
+ diagram, num_pts_in_diag = new_X[i], X[i].shape[0]
+
+ w = np.empty(num_pts_in_diag)
+ for j in range(num_pts_in_diag):
+ w[j] = self.weight(diagram[j,:])
+
+ x_values, y_values = np.linspace(self.im_range[0], self.im_range[1], self.resolution[0]), np.linspace(self.im_range[2], self.im_range[3], self.resolution[1])
+ Xs, Ys = np.tile((diagram[:,0][:,np.newaxis,np.newaxis]-x_values[np.newaxis,np.newaxis,:]),[1,self.resolution[1],1]), np.tile(diagram[:,1][:,np.newaxis,np.newaxis]-y_values[np.newaxis,:,np.newaxis],[1,1,self.resolution[0]])
+ image = np.tensordot(w, np.exp((-np.square(Xs)-np.square(Ys))/(2*np.square(self.bandwidth)))/(np.square(self.bandwidth)*2*np.pi), 1)
+
+ Xfit.append(image.flatten()[np.newaxis,:])
+
+ Xfit = np.concatenate(Xfit,0)
+
+ return Xfit
+
+class Landscape(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing persistence landscapes from a list of persistence diagrams. A persistence landscape is a collection of 1D piecewise-linear functions computed from the rank function associated to the persistence diagram. These piecewise-linear functions are then sampled evenly on a given range and the corresponding vectors of samples are concatenated and returned. See http://jmlr.org/papers/v16/bubenik15a.html for more details.
+ """
+ def __init__(self, num_landscapes=5, resolution=100, sample_range=[np.nan, np.nan]):
+ """
+ Constructor for the Landscape class.
+
+ Parameters:
+ num_landscapes (int): number of piecewise-linear functions to output (default 5).
+ resolution (int): number of sample for all piecewise-linear functions (default 100).
+ sample_range ([double, double]): minimum and maximum of all piecewise-linear function domains, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ """
+ self.num_landscapes, self.resolution, self.sample_range = num_landscapes, resolution, sample_range
+ self.nan_in_range = np.isnan(np.array(self.sample_range))
+ self.new_resolution = self.resolution + self.nan_in_range.sum()
+
+ def fit(self, X, y=None):
+ """
+ Fit the Landscape class on a list of persistence diagrams: if any of the values in **sample_range** is numpy.nan, replace it with the corresponding value computed on the given list of persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ if self.nan_in_range.any():
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
+ [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ self.sample_range = np.where(self.nan_in_range, np.array([mx, My]), np.array(self.sample_range))
+ return self
+
+ def transform(self, X):
+ """
+ Compute the persistence landscape for each persistence diagram individually and concatenate the results.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array with shape (number of diagrams) x (number of samples = **num_landscapes** x **resolution**): output persistence landscapes.
+ """
+ num_diag, Xfit = len(X), []
+ x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.new_resolution)
+ step_x = x_values[1] - x_values[0]
+
+ for i in range(num_diag):
+
+ diagram, num_pts_in_diag = X[i], X[i].shape[0]
+
+ ls = np.zeros([self.num_landscapes, self.new_resolution])
+
+ events = []
+ for j in range(self.new_resolution):
+ events.append([])
+
+ for j in range(num_pts_in_diag):
+ [px,py] = diagram[j,:2]
+ min_idx = np.clip(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0, self.new_resolution)
+ mid_idx = np.clip(np.ceil((0.5*(py+px) - self.sample_range[0]) / step_x).astype(int), 0, self.new_resolution)
+ max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.new_resolution)
+
+ if min_idx < self.new_resolution and max_idx > 0:
+
+ landscape_value = self.sample_range[0] + min_idx * step_x - px
+ for k in range(min_idx, mid_idx):
+ events[k].append(landscape_value)
+ landscape_value += step_x
+
+ landscape_value = py - self.sample_range[0] - mid_idx * step_x
+ for k in range(mid_idx, max_idx):
+ events[k].append(landscape_value)
+ landscape_value -= step_x
+
+ for j in range(self.new_resolution):
+ events[j].sort(reverse=True)
+ for k in range( min(self.num_landscapes, len(events[j])) ):
+ ls[k,j] = events[j][k]
+
+ if self.nan_in_range[0]:
+ ls = ls[:,1:]
+ if self.nan_in_range[1]:
+ ls = ls[:,:-1]
+ ls = np.sqrt(2)*np.reshape(ls,[1,-1])
+ Xfit.append(ls)
+
+ Xfit = np.concatenate(Xfit,0)
+
+ return Xfit
+
+class Silhouette(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing persistence silhouettes from a list of persistence diagrams. A persistence silhouette is computed by taking a weighted average of the collection of 1D piecewise-linear functions given by the persistence landscapes, and then by evenly sampling this average on a given range. Finally, the corresponding vector of samples is returned. See https://arxiv.org/abs/1312.0308 for more details.
+ """
+ def __init__(self, weight=lambda x: 1, resolution=100, sample_range=[np.nan, np.nan]):
+ """
+ Constructor for the Silhouette class.
+
+ Parameters:
+ weight (function): weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie on lists or numpy arrays of the form [p_x,p_y].
+ resolution (int): number of samples for the weighted average (default 100).
+ sample_range ([double, double]): minimum and maximum for the weighted average domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ """
+ self.weight, self.resolution, self.sample_range = weight, resolution, sample_range
+
+ def fit(self, X, y=None):
+ """
+ Fit the Silhouette class on a list of persistence diagrams: if any of the values in **sample_range** is numpy.nan, replace it with the corresponding value computed on the given list of persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ if np.isnan(np.array(self.sample_range)).any():
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
+ [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ return self
+
+ def transform(self, X):
+ """
+ Compute the persistence silhouette for each persistence diagram individually and concatenate the results.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array with shape (number of diagrams) x (**resolution**): output persistence silhouettes.
+ """
+ num_diag, Xfit = len(X), []
+ x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
+ step_x = x_values[1] - x_values[0]
+
+ for i in range(num_diag):
+
+ diagram, num_pts_in_diag = X[i], X[i].shape[0]
+
+ sh, weights = np.zeros(self.resolution), np.zeros(num_pts_in_diag)
+ for j in range(num_pts_in_diag):
+ weights[j] = self.weight(diagram[j,:])
+ total_weight = np.sum(weights)
+
+ for j in range(num_pts_in_diag):
+
+ [px,py] = diagram[j,:2]
+ weight = weights[j] / total_weight
+ min_idx = np.clip(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
+ mid_idx = np.clip(np.ceil((0.5*(py+px) - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
+ max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
+
+ if min_idx < self.resolution and max_idx > 0:
+
+ silhouette_value = self.sample_range[0] + min_idx * step_x - px
+ for k in range(min_idx, mid_idx):
+ sh[k] += weight * silhouette_value
+ silhouette_value += step_x
+
+ silhouette_value = py - self.sample_range[0] - mid_idx * step_x
+ for k in range(mid_idx, max_idx):
+ sh[k] += weight * silhouette_value
+ silhouette_value -= step_x
+
+ Xfit.append(np.reshape(np.sqrt(2) * sh, [1,-1]))
+
+ Xfit = np.concatenate(Xfit, 0)
+
+ return Xfit
+
+class BettiCurve(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing Betti curves from a list of persistence diagrams. A Betti curve is a 1D piecewise-constant function obtained from the rank function. It is sampled evenly on a given range and the vector of samples is returned. See https://www.researchgate.net/publication/316604237_Time_Series_Classification_via_Topological_Data_Analysis for more details.
+ """
+ def __init__(self, resolution=100, sample_range=[np.nan, np.nan]):
+ """
+ Constructor for the BettiCurve class.
+
+ Parameters:
+ resolution (int): number of sample for the piecewise-constant function (default 100).
+ sample_range ([double, double]): minimum and maximum of the piecewise-constant function domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ """
+ self.resolution, self.sample_range = resolution, sample_range
+
+ def fit(self, X, y=None):
+ """
+ Fit the BettiCurve class on a list of persistence diagrams: if any of the values in **sample_range** is numpy.nan, replace it with the corresponding value computed on the given list of persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ if np.isnan(np.array(self.sample_range)).any():
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
+ [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ return self
+
+ def transform(self, X):
+ """
+ Compute the Betti curve for each persistence diagram individually and concatenate the results.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array with shape (number of diagrams) x (**resolution**): output Betti curves.
+ """
+ num_diag, Xfit = len(X), []
+ x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
+ step_x = x_values[1] - x_values[0]
+
+ for i in range(num_diag):
+
+ diagram, num_pts_in_diag = X[i], X[i].shape[0]
+
+ bc = np.zeros(self.resolution)
+ for j in range(num_pts_in_diag):
+ [px,py] = diagram[j,:2]
+ min_idx = np.clip(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
+ max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
+ for k in range(min_idx, max_idx):
+ bc[k] += 1
+
+ Xfit.append(np.reshape(bc,[1,-1]))
+
+ Xfit = np.concatenate(Xfit, 0)
+
+ return Xfit
+
+class Entropy(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing persistence entropy. Persistence entropy is a statistic for persistence diagrams inspired from Shannon entropy. This statistic can also be used to compute a feature vector, called the entropy summary function. See https://arxiv.org/pdf/1803.08304.pdf for more details. Note that a previous implementation was contributed by Manuel Soriano-Trigueros.
+ """
+ def __init__(self, mode="scalar", normalized=True, resolution=100, sample_range=[np.nan, np.nan]):
+ """
+ Constructor for the Entropy class.
+
+ Parameters:
+ mode (string): what entropy to compute: either "scalar" for computing the entropy statistics, or "vector" for computing the entropy summary functions (default "scalar").
+ normalized (bool): whether to normalize the entropy summary function (default True). Used only if **mode** = "vector".
+ resolution (int): number of sample for the entropy summary function (default 100). Used only if **mode** = "vector".
+ sample_range ([double, double]): minimum and maximum of the entropy summary function domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method. Used only if **mode** = "vector".
+ """
+ self.mode, self.normalized, self.resolution, self.sample_range = mode, normalized, resolution, sample_range
+
+ def fit(self, X, y=None):
+ """
+ Fit the Entropy class on a list of persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ if np.isnan(np.array(self.sample_range)).any():
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
+ [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ return self
+
+ def transform(self, X):
+ """
+ Compute the entropy for each persistence diagram individually and concatenate the results.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array with shape (number of diagrams) x (1 if **mode** = "scalar" else **resolution**): output entropy.
+ """
+ num_diag, Xfit = len(X), []
+ x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
+ step_x = x_values[1] - x_values[0]
+ new_X = BirthPersistenceTransform().fit_transform(X)
+
+ for i in range(num_diag):
+
+ orig_diagram, diagram, num_pts_in_diag = X[i], new_X[i], X[i].shape[0]
+ new_diagram = DiagramScaler(use=True, scalers=[([1], MaxAbsScaler())]).fit_transform([diagram])[0]
+
+ if self.mode == "scalar":
+ ent = - np.sum( np.multiply(new_diagram[:,1], np.log(new_diagram[:,1])) )
+ Xfit.append(np.array([[ent]]))
+
+ else:
+ ent = np.zeros(self.resolution)
+ for j in range(num_pts_in_diag):
+ [px,py] = orig_diagram[j,:2]
+ min_idx = np.clip(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
+ max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
+ for k in range(min_idx, max_idx):
+ ent[k] += (-1) * new_diagram[j,1] * np.log(new_diagram[j,1])
+ if self.normalized:
+ ent = ent / np.linalg.norm(ent, ord=1)
+ Xfit.append(np.reshape(ent,[1,-1]))
+
+ Xfit = np.concatenate(Xfit, 0)
+
+ return Xfit
+
+class TopologicalVector(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing topological vectors from a list of persistence diagrams. The topological vector associated to a persistence diagram is the sorted vector of a slight modification of the pairwise distances between the persistence diagram points. See https://diglib.eg.org/handle/10.1111/cgf12692 for more details.
+ """
+ def __init__(self, threshold=10):
+ """
+ Constructor for the TopologicalVector class.
+
+ Parameters:
+ threshold (int): number of distances to keep (default 10). This is the dimension of the topological vector. If -1, this threshold is computed from the list of persistence diagrams by considering the one with the largest number of points and using the dimension of its corresponding topological vector as threshold.
+ """
+ self.threshold = threshold
+
+ def fit(self, X, y=None):
+ """
+ Fit the TopologicalVector class on a list of persistence diagrams (this function actually does nothing but is useful when TopologicalVector is included in a scikit-learn Pipeline).
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ return self
+
+ def transform(self, X):
+ """
+ Compute the topological vector for each persistence diagram individually and concatenate the results.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array with shape (number of diagrams) x (**threshold**): output topological vectors.
+ """
+ if self.threshold == -1:
+ thresh = np.array([X[i].shape[0] for i in range(len(X))]).max()
+ else:
+ thresh = self.threshold
+
+ num_diag = len(X)
+ Xfit = np.zeros([num_diag, thresh])
+
+ for i in range(num_diag):
+
+ diagram, num_pts_in_diag = X[i], X[i].shape[0]
+ pers = 0.5 * (diagram[:,1]-diagram[:,0])
+ min_pers = np.minimum(pers,np.transpose(pers))
+ distances = DistanceMetric.get_metric("chebyshev").pairwise(diagram)
+ vect = np.flip(np.sort(np.triu(np.minimum(distances, min_pers)), axis=None), 0)
+ dim = min(len(vect), thresh)
+ Xfit[i, :dim] = vect[:dim]
+
+ return Xfit
+
+class ComplexPolynomial(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing complex polynomials from a list of persistence diagrams. The persistence diagram points are seen as the roots of some complex polynomial, whose coefficients are returned in a complex vector. See https://link.springer.com/chapter/10.1007%2F978-3-319-23231-7_27 for more details.
+ """
+ def __init__(self, polynomial_type="R", threshold=10):
+ """
+ Constructor for the ComplexPolynomial class.
+
+ Parameters:
+ polynomial_type (char): either "R", "S" or "T" (default "R"). Type of complex polynomial that is going to be computed (explained in https://link.springer.com/chapter/10.1007%2F978-3-319-23231-7_27).
+ threshold (int): number of coefficients (default 10). This is the dimension of the complex vector of coefficients, i.e. the number of coefficients corresponding to the largest degree terms of the polynomial. If -1, this threshold is computed from the list of persistence diagrams by considering the one with the largest number of points and using the dimension of its corresponding complex vector of coefficients as threshold.
+ """
+ self.threshold, self.polynomial_type = threshold, polynomial_type
+
+ def fit(self, X, y=None):
+ """
+ Fit the ComplexPolynomial class on a list of persistence diagrams (this function actually does nothing but is useful when ComplexPolynomial is included in a scikit-learn Pipeline).
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ return self
+
+ def transform(self, X):
+ """
+ Compute the complex vector of coefficients for each persistence diagram individually and concatenate the results.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array with shape (number of diagrams) x (**threshold**): output complex vectors of coefficients.
+ """
+ if self.threshold == -1:
+ thresh = np.array([X[i].shape[0] for i in range(len(X))]).max()
+ else:
+ thresh = self.threshold
+
+ Xfit = np.zeros([len(X), thresh]) + 1j * np.zeros([len(X), thresh])
+ for d in range(len(X)):
+ D, N = X[d], X[d].shape[0]
+ if self.polynomial_type == "R":
+ roots = D[:,0] + 1j * D[:,1]
+ elif self.polynomial_type == "S":
+ alpha = np.linalg.norm(D, axis=1)
+ alpha = np.where(alpha==0, np.ones(N), alpha)
+ roots = np.multiply( np.multiply( (D[:,0]+1j*D[:,1]), (D[:,1]-D[:,0]) ), 1./(np.sqrt(2)*alpha) )
+ elif self.polynomial_type == "T":
+ alpha = np.linalg.norm(D, axis=1)
+ roots = np.multiply( (D[:,1]-D[:,0])/2, np.cos(alpha) - np.sin(alpha) + 1j * (np.cos(alpha) + np.sin(alpha)) )
+ coeff = [0] * (N+1)
+ coeff[N] = 1
+ for i in range(1, N+1):
+ for j in range(N-i-1, N):
+ coeff[j] += ((-1) * roots[i-1] * coeff[j+1])
+ coeff = np.array(coeff[::-1])[1:]
+ Xfit[d, :min(thresh, coeff.shape[0])] = coeff[:min(thresh, coeff.shape[0])]
+ return Xfit
diff --git a/src/python/gudhi/rips_complex.pyx b/src/python/gudhi/rips_complex.pyx
index f2cd6a8d..722cdcdc 100644
--- a/src/python/gudhi/rips_complex.pyx
+++ b/src/python/gudhi/rips_complex.pyx
@@ -1,3 +1,12 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
@@ -8,16 +17,6 @@ from libc.stdint cimport intptr_t
from gudhi.simplex_tree cimport *
from gudhi.simplex_tree import SimplexTree
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
-
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd
index 5f86cfe2..1066d44b 100644
--- a/src/python/gudhi/simplex_tree.pxd
+++ b/src/python/gudhi/simplex_tree.pxd
@@ -1,19 +1,18 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
from libcpp cimport bool
from libcpp.string cimport string
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
-
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx
index 9f490271..b18627c4 100644
--- a/src/python/gudhi/simplex_tree.pyx
+++ b/src/python/gudhi/simplex_tree.pyx
@@ -1,17 +1,16 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from libc.stdint cimport intptr_t
from numpy import array as np_array
cimport simplex_tree
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
-
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
@@ -75,13 +74,22 @@ cdef class SimplexTree:
return self.get_ptr().simplex_filtration(simplex)
def assign_filtration(self, simplex, filtration):
- """This function assigns the simplicial complex filtration value for a
+ """This function assigns a new filtration value to a
given N-simplex.
:param simplex: The N-simplex, represented by a list of vertex.
:type simplex: list of int.
- :param filtration: The simplicial complex filtration value.
+ :param filtration: The new filtration value.
:type filtration: float
+
+ .. note::
+ Beware that after this operation, the structure may not be a valid
+ filtration anymore, a simplex could have a lower filtration value
+ than one of its faces. Callers are responsible for fixing this
+ (with more :meth:`assign_filtration` or
+ :meth:`make_filtration_non_decreasing` for instance) before calling
+ any function that relies on the filtration property, like
+ :meth:`initialize_filtration`.
"""
self.get_ptr().assign_simplex_filtration(simplex, filtration)
@@ -362,7 +370,7 @@ cdef class SimplexTree:
value than its faces by increasing the filtration values.
:returns: True if any filtration value was modified,
- False if the filtration was already non-decreasing.
+ False if the filtration was already non-decreasing.
:rtype: bool
@@ -500,7 +508,7 @@ cdef class SimplexTree:
"""
if self.pcohptr != NULL:
if persistence_file != '':
- self.pcohptr.write_output_diagram(str.encode(persistence_file))
+ self.pcohptr.write_output_diagram(persistence_file.encode('utf-8'))
else:
print("persistence_file must be specified")
else:
diff --git a/src/python/gudhi/strong_witness_complex.pyx b/src/python/gudhi/strong_witness_complex.pyx
index e757abea..2c33c3f2 100644
--- a/src/python/gudhi/strong_witness_complex.pyx
+++ b/src/python/gudhi/strong_witness_complex.pyx
@@ -1,3 +1,12 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
@@ -6,16 +15,6 @@ from libc.stdint cimport intptr_t
from gudhi.simplex_tree cimport *
from gudhi.simplex_tree import SimplexTree
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
-
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
@@ -69,7 +68,7 @@ cdef class StrongWitnessComplex:
"""
stree = SimplexTree()
cdef intptr_t stree_int_ptr=stree.thisptr
- if limit_dimension is not -1:
+ if limit_dimension != -1:
self.thisptr.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr,
max_alpha_square, limit_dimension)
else:
diff --git a/src/python/gudhi/subsampling.pyx b/src/python/gudhi/subsampling.pyx
index 1135c1fb..c501d16b 100644
--- a/src/python/gudhi/subsampling.pyx
+++ b/src/python/gudhi/subsampling.pyx
@@ -1,19 +1,18 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.string cimport string
from libcpp cimport bool
import os
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
-
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "GPL v3"
@@ -44,19 +43,19 @@ def choose_n_farthest_points(points=None, off_file='', nb_points=0, starting_poi
:param nb_points: Number of points of the subsample.
:type nb_points: unsigned.
:param starting_point: The iteration starts with the landmark `starting \
- point`,which is the index of the poit to start with. If not set, this \
- index is choosen randomly.
+ point`,which is the index of the point to start with. If not set, this \
+ index is chosen randomly.
:type starting_point: unsigned.
:returns: The subsample point set.
:rtype: vector[vector[double]]
"""
- if off_file is not '':
+ if off_file:
if os.path.isfile(off_file):
- if starting_point is '':
- return subsampling_n_farthest_points_from_file(str.encode(off_file),
+ if starting_point == '':
+ return subsampling_n_farthest_points_from_file(off_file.encode('utf-8'),
nb_points)
else:
- return subsampling_n_farthest_points_from_file(str.encode(off_file),
+ return subsampling_n_farthest_points_from_file(off_file.encode('utf-8'),
nb_points,
starting_point)
else:
@@ -65,7 +64,7 @@ def choose_n_farthest_points(points=None, off_file='', nb_points=0, starting_poi
if points is None:
# Empty points
points=[]
- if starting_point is '':
+ if starting_point == '':
return subsampling_n_farthest_points(points, nb_points)
else:
return subsampling_n_farthest_points(points, nb_points,
@@ -87,9 +86,9 @@ def pick_n_random_points(points=None, off_file='', nb_points=0):
:returns: The subsample point set.
:rtype: vector[vector[double]]
"""
- if off_file is not '':
+ if off_file:
if os.path.isfile(off_file):
- return subsampling_n_random_points_from_file(str.encode(off_file),
+ return subsampling_n_random_points_from_file(off_file.encode('utf-8'),
nb_points)
else:
print("file " + off_file + " not found.")
@@ -117,9 +116,9 @@ def sparsify_point_set(points=None, off_file='', min_squared_dist=0.0):
:returns: The subsample point set.
:rtype: vector[vector[double]]
"""
- if off_file is not '':
+ if off_file:
if os.path.isfile(off_file):
- return subsampling_sparsify_points_from_file(str.encode(off_file),
+ return subsampling_sparsify_points_from_file(off_file.encode('utf-8'),
min_squared_dist)
else:
print("file " + off_file + " not found.")
diff --git a/src/python/gudhi/tangential_complex.pyx b/src/python/gudhi/tangential_complex.pyx
index 3a945fe2..6391488c 100644
--- a/src/python/gudhi/tangential_complex.pyx
+++ b/src/python/gudhi/tangential_complex.pyx
@@ -1,3 +1,12 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
@@ -9,16 +18,6 @@ import os
from gudhi.simplex_tree cimport *
from gudhi.simplex_tree import SimplexTree
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
-
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "GPL v3"
@@ -65,9 +64,9 @@ cdef class TangentialComplex:
# The real cython constructor
def __cinit__(self, intrisic_dim, points=None, off_file=''):
- if off_file is not '':
+ if off_file:
if os.path.isfile(off_file):
- self.thisptr = new Tangential_complex_interface(intrisic_dim, str.encode(off_file), True)
+ self.thisptr = new Tangential_complex_interface(intrisic_dim, off_file.encode('utf-8'), True)
else:
print("file " + off_file + " not found.")
else:
@@ -92,7 +91,7 @@ cdef class TangentialComplex:
Raises:
ValueError: In debug mode, if the computed star dimension is too
low. Try to set a bigger maximal edge length value with
- :func:`~gudhi.Tangential_complex.set_max_squared_edge_length`
+ :meth:`set_max_squared_edge_length`
if this happens.
"""
self.thisptr.compute_tangential_complex()
@@ -167,7 +166,7 @@ cdef class TangentialComplex:
:type max_squared_edge_length: double
If the maximal edge length value is too low
- :func:`~gudhi.Tangential_complex.compute_tangential_complex`
+ :meth:`compute_tangential_complex`
will throw an exception in debug mode.
"""
self.thisptr.set_max_squared_edge_length(max_squared_edge_length)
diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py
new file mode 100644
index 00000000..db5ddff2
--- /dev/null
+++ b/src/python/gudhi/wasserstein.py
@@ -0,0 +1,97 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Theo Lacombe
+#
+# Copyright (C) 2019 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+import scipy.spatial.distance as sc
+try:
+ import ot
+except ImportError:
+ print("POT (Python Optimal Transport) package is not installed. Try to run $ conda install -c conda-forge pot ; or $ pip install POT")
+
+def _proj_on_diag(X):
+ '''
+ :param X: (n x 2) array encoding the points of a persistent diagram.
+ :returns: (n x 2) array encoding the (respective orthogonal) projections of the points onto the diagonal
+ '''
+ Z = (X[:,0] + X[:,1]) / 2.
+ return np.array([Z , Z]).T
+
+
+def _build_dist_matrix(X, Y, order=2., internal_p=2.):
+ '''
+ :param X: (n x 2) numpy.array encoding the (points of the) first diagram.
+ :param Y: (m x 2) numpy.array encoding the second diagram.
+ :param internal_p: Ground metric (i.e. norm l_p).
+ :param order: exponent for the Wasserstein metric.
+ :returns: (n+1) x (m+1) np.array encoding the cost matrix C.
+ For 1 <= i <= n, 1 <= j <= m, C[i,j] encodes the distance between X[i] and Y[j], while C[i, m+1] (resp. C[n+1, j]) encodes the distance (to the p) between X[i] (resp Y[j]) and its orthogonal proj onto the diagonal.
+ note also that C[n+1, m+1] = 0 (it costs nothing to move from the diagonal to the diagonal).
+ '''
+ Xdiag = _proj_on_diag(X)
+ Ydiag = _proj_on_diag(Y)
+ if np.isinf(internal_p):
+ C = sc.cdist(X,Y, metric='chebyshev')**order
+ Cxd = np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order
+ Cdy = np.linalg.norm(Y - Ydiag, ord=internal_p, axis=1)**order
+ else:
+ C = sc.cdist(X,Y, metric='minkowski', p=internal_p)**order
+ Cxd = np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order
+ Cdy = np.linalg.norm(Y - Ydiag, ord=internal_p, axis=1)**order
+ Cf = np.hstack((C, Cxd[:,None]))
+ Cdy = np.append(Cdy, 0)
+
+ Cf = np.vstack((Cf, Cdy[None,:]))
+
+ return Cf
+
+
+def _perstot(X, order, internal_p):
+ '''
+ :param X: (n x 2) numpy.array (points of a given diagram).
+ :param internal_p: Ground metric on the (upper-half) plane (i.e. norm l_p in R^2); Default value is 2 (Euclidean norm).
+ :param order: exponent for Wasserstein. Default value is 2.
+ :returns: float, the total persistence of the diagram (that is, its distance to the empty diagram).
+ '''
+ Xdiag = _proj_on_diag(X)
+ return (np.sum(np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order))**(1./order)
+
+
+def wasserstein_distance(X, Y, order=2., internal_p=2.):
+ '''
+ :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ :param Y: (m x 2) numpy.array encoding the second diagram.
+ :param internal_p: Ground metric on the (upper-half) plane (i.e. norm l_p in R^2); Default value is 2 (euclidean norm).
+ :param order: exponent for Wasserstein; Default value is 2.
+ :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with respect to the internal_p-norm as ground metric.
+ :rtype: float
+ '''
+ n = len(X)
+ m = len(Y)
+
+ # handle empty diagrams
+ if X.size == 0:
+ if Y.size == 0:
+ return 0.
+ else:
+ return _perstot(Y, order, internal_p)
+ elif Y.size == 0:
+ return _perstot(X, order, internal_p)
+
+ M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p)
+ a = np.full(n+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here.
+ a[-1] = a[-1] * m # normalized so that we have a probability measure, required by POT
+ b = np.full(m+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here.
+ b[-1] = b[-1] * n # so that we have a probability measure, required by POT
+
+ # Comptuation of the otcost using the ot.emd2 library.
+ # Note: it is the Wasserstein distance to the power q.
+ # The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value?
+ ot_cost = (n+m) * ot.emd2(a, b, M, numItermax=2000000)
+
+ return ot_cost ** (1./order)
diff --git a/src/python/gudhi/witness_complex.pyx b/src/python/gudhi/witness_complex.pyx
index baa70b7a..b032a5a1 100644
--- a/src/python/gudhi/witness_complex.pyx
+++ b/src/python/gudhi/witness_complex.pyx
@@ -1,3 +1,12 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2016 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
@@ -6,16 +15,6 @@ from libc.stdint cimport intptr_t
from gudhi.simplex_tree cimport *
from gudhi.simplex_tree import SimplexTree
-""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
- See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
- Author(s): Vincent Rouvreau
-
- Copyright (C) 2016 Inria
-
- Modification(s):
- - YYYY/MM Author: Description of the modification
-"""
-
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
@@ -69,7 +68,7 @@ cdef class WitnessComplex:
"""
stree = SimplexTree()
cdef intptr_t stree_int_ptr=stree.thisptr
- if limit_dimension is not -1:
+ if limit_dimension != -1:
self.thisptr.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr,
max_alpha_square, limit_dimension)
else:
diff --git a/src/python/include/Alpha_complex_interface.h b/src/python/include/Alpha_complex_interface.h
index b3553d32..e9bbadb0 100644
--- a/src/python/include/Alpha_complex_interface.h
+++ b/src/python/include/Alpha_complex_interface.h
@@ -13,8 +13,11 @@
#include <gudhi/Simplex_tree.h>
#include <gudhi/Alpha_complex.h>
+#include <CGAL/Epeck_d.h>
#include <CGAL/Epick_d.h>
+#include <boost/range/adaptor/transformed.hpp>
+
#include "Simplex_tree_interface.h"
#include <iostream>
@@ -26,12 +29,15 @@ namespace Gudhi {
namespace alpha_complex {
class Alpha_complex_interface {
- using Dynamic_kernel = CGAL::Epick_d< CGAL::Dynamic_dimension_tag >;
+ using Dynamic_kernel = CGAL::Epeck_d< CGAL::Dynamic_dimension_tag >;
using Point_d = Dynamic_kernel::Point_d;
public:
Alpha_complex_interface(const std::vector<std::vector<double>>& points) {
- alpha_complex_ = new Alpha_complex<Dynamic_kernel>(points);
+ auto mkpt = [](std::vector<double> const& vec){
+ return Point_d(vec.size(), vec.begin(), vec.end());
+ };
+ alpha_complex_ = new Alpha_complex<Dynamic_kernel>(boost::adaptors::transform(points, mkpt));
}
Alpha_complex_interface(const std::string& off_file_name, bool from_file = true) {
@@ -45,9 +51,9 @@ class Alpha_complex_interface {
std::vector<double> get_point(int vh) {
std::vector<double> vd;
try {
- Point_d ph = alpha_complex_->get_point(vh);
- for (auto coord = ph.cartesian_begin(); coord < ph.cartesian_end(); coord++)
- vd.push_back(*coord);
+ Point_d const& ph = alpha_complex_->get_point(vh);
+ for (auto coord = ph.cartesian_begin(); coord != ph.cartesian_end(); coord++)
+ vd.push_back(CGAL::to_double(*coord));
} catch (std::out_of_range const&) {
// std::out_of_range is thrown in case not found. Other exceptions must be re-thrown
}
diff --git a/src/python/setup.py.in b/src/python/setup.py.in
index 3f1d4424..9c2124f4 100644
--- a/src/python/setup.py.in
+++ b/src/python/setup.py.in
@@ -1,7 +1,3 @@
-from setuptools import setup, Extension
-from Cython.Build import cythonize
-from numpy import get_include as numpy_get_include
-
"""This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): Vincent Rouvreau
@@ -12,6 +8,11 @@ from numpy import get_include as numpy_get_include
- YYYY/MM Author: Description of the modification
"""
+from setuptools import setup, Extension
+from Cython.Build import cythonize
+from numpy import get_include as numpy_get_include
+import sys
+
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
@@ -38,7 +39,8 @@ for module in modules:
libraries=libraries,
library_dirs=library_dirs,
include_dirs=include_dirs,
- runtime_library_dirs=runtime_library_dirs,))
+ runtime_library_dirs=runtime_library_dirs,
+ cython_directives = {'language_level': str(sys.version_info[0])},))
setup(
name = 'gudhi',
diff --git a/src/python/test/test_alpha_complex.py b/src/python/test/test_alpha_complex.py
index 24f8bf53..0d9e9e45 100755
--- a/src/python/test/test_alpha_complex.py
+++ b/src/python/test/test_alpha_complex.py
@@ -1,5 +1,3 @@
-from gudhi import AlphaComplex, SimplexTree
-
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): Vincent Rouvreau
@@ -10,6 +8,17 @@ from gudhi import AlphaComplex, SimplexTree
- YYYY/MM Author: Description of the modification
"""
+from gudhi import AlphaComplex, SimplexTree
+import math
+import numpy as np
+import pytest
+try:
+ # python3
+ from itertools import zip_longest
+except ImportError:
+ # python2
+ from itertools import izip_longest as zip_longest
+
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
@@ -88,3 +97,28 @@ def test_filtered_alpha():
]
assert simplex_tree.get_star([0]) == [([0], 0.0), ([0, 1], 0.25), ([0, 2], 0.25)]
assert simplex_tree.get_cofaces([0], 1) == [([0, 1], 0.25), ([0, 2], 0.25)]
+
+def test_safe_alpha_persistence_comparison():
+ #generate periodic signal
+ time = np.arange(0, 10, 1)
+ signal = [math.sin(x) for x in time]
+ delta = math.pi
+ delayed = [math.sin(x + delta) for x in time]
+
+ #construct embedding
+ embedding1 = [[signal[i], -signal[i]] for i in range(len(time))]
+ embedding2 = [[signal[i], delayed[i]] for i in range(len(time))]
+
+ #build alpha complex and simplex tree
+ alpha_complex1 = AlphaComplex(points=embedding1)
+ simplex_tree1 = alpha_complex1.create_simplex_tree()
+
+ alpha_complex2 = AlphaComplex(points=embedding2)
+ simplex_tree2 = alpha_complex2.create_simplex_tree()
+
+ diag1 = simplex_tree1.persistence()
+ diag2 = simplex_tree2.persistence()
+
+ for (first_p, second_p) in zip_longest(diag1, diag2):
+ assert first_p[0] == pytest.approx(second_p[0])
+ assert first_p[1] == pytest.approx(second_p[1])
diff --git a/src/python/test/test_bottleneck_distance.py b/src/python/test/test_bottleneck_distance.py
index f5f019b9..70b2abad 100755
--- a/src/python/test/test_bottleneck_distance.py
+++ b/src/python/test/test_bottleneck_distance.py
@@ -1,5 +1,3 @@
-import gudhi
-
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): Vincent Rouvreau
@@ -10,6 +8,8 @@ import gudhi
- YYYY/MM Author: Description of the modification
"""
+import gudhi
+
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
diff --git a/src/python/test/test_cover_complex.py b/src/python/test/test_cover_complex.py
index 8cd12272..32bc5a26 100755
--- a/src/python/test/test_cover_complex.py
+++ b/src/python/test/test_cover_complex.py
@@ -1,5 +1,3 @@
-from gudhi import CoverComplex
-
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): Vincent Rouvreau
@@ -10,6 +8,8 @@ from gudhi import CoverComplex
- YYYY/MM Author: Description of the modification
"""
+from gudhi import CoverComplex
+
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2018 Inria"
__license__ = "MIT"
diff --git a/src/python/test/test_cubical_complex.py b/src/python/test/test_cubical_complex.py
index 68f54fbe..8c1b2600 100755
--- a/src/python/test/test_cubical_complex.py
+++ b/src/python/test/test_cubical_complex.py
@@ -1,5 +1,3 @@
-from gudhi import CubicalComplex
-
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): Vincent Rouvreau
@@ -10,6 +8,9 @@ from gudhi import CubicalComplex
- YYYY/MM Author: Description of the modification
"""
+from gudhi import CubicalComplex, PeriodicCubicalComplex
+import numpy as np
+
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
@@ -56,7 +57,7 @@ def test_dimension_or_perseus_file_constructor():
assert cub.__is_persistence_defined() == False
-def test_dimension_simple_constructor():
+def simple_constructor(cub):
cub = CubicalComplex(
dimensions=[3, 3], top_dimensional_cells=[1, 2, 3, 4, 5, 6, 7, 8, 9]
)
@@ -67,12 +68,22 @@ def test_dimension_simple_constructor():
assert cub.betti_numbers() == [1, 0, 0]
assert cub.persistent_betti_numbers(0, 1000) == [0, 0, 0]
-
-def test_user_case_simple_constructor():
+def test_simple_constructor_from_top_cells():
cub = CubicalComplex(
dimensions=[3, 3],
- top_dimensional_cells=[float("inf"), 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
+ top_dimensional_cells=[1, 2, 3, 4, 5, 6, 7, 8, 9],
)
+ simple_constructor(cub)
+
+def test_simple_constructor_from_numpy_array():
+ cub = CubicalComplex(
+ top_dimensional_cells=np.array([[1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9]])
+ )
+ simple_constructor(cub)
+
+def user_case_simple_constructor(cub):
assert cub.__is_defined() == True
assert cub.__is_persistence_defined() == False
assert cub.persistence() == [(1, (0.0, 1.0)), (0, (0.0, float("inf")))]
@@ -83,6 +94,20 @@ def test_user_case_simple_constructor():
)
assert other_cub.persistence() == [(1, (0.0, 1.0)), (0, (0.0, float("inf")))]
+def test_user_case_simple_constructor_from_top_cells():
+ cub = CubicalComplex(
+ dimensions=[3, 3],
+ top_dimensional_cells=[float("inf"), 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
+ )
+ user_case_simple_constructor(cub)
+
+def test_user_case_simple_constructor_from_numpy_array():
+ cub = CubicalComplex(
+ top_dimensional_cells=np.array([[float("inf"), 0.0, 0.0],
+ [0.0, 1.0, 0.0],
+ [0.0, 0.0, 0.0]])
+ )
+ user_case_simple_constructor(cub)
def test_dimension_file_constructor():
# Create test file
@@ -96,3 +121,29 @@ def test_dimension_file_constructor():
assert cub.__is_persistence_defined() == True
assert cub.betti_numbers() == [1, 0, 0]
assert cub.persistent_betti_numbers(0, 1000) == [1, 0, 0]
+
+def test_connected_sublevel_sets():
+ array_cells = np.array([[3, 3], [2, 2], [4, 4]])
+ linear_cells = [3, 3, 2, 2, 4, 4]
+ dimensions = [2, 3]
+ periodic_dimensions = [False, False]
+ # with a numpy array version
+ cub = CubicalComplex(top_dimensional_cells = array_cells)
+ assert cub.persistence() == [(0, (2.0, float("inf")))]
+ assert cub.betti_numbers() == [1, 0, 0]
+ # with vector of dimensions
+ cub = CubicalComplex(dimensions = dimensions,
+ top_dimensional_cells = linear_cells)
+ assert cub.persistence() == [(0, (2.0, float("inf")))]
+ assert cub.betti_numbers() == [1, 0, 0]
+ # periodic with a numpy array version
+ cub = PeriodicCubicalComplex(top_dimensional_cells = array_cells,
+ periodic_dimensions = periodic_dimensions)
+ assert cub.persistence() == [(0, (2.0, float("inf")))]
+ assert cub.betti_numbers() == [1, 0, 0]
+ # periodic with vector of dimensions
+ cub = PeriodicCubicalComplex(dimensions = dimensions,
+ top_dimensional_cells = linear_cells,
+ periodic_dimensions = periodic_dimensions)
+ assert cub.persistence() == [(0, (2.0, float("inf")))]
+ assert cub.betti_numbers() == [1, 0, 0]
diff --git a/src/python/test/test_euclidean_witness_complex.py b/src/python/test/test_euclidean_witness_complex.py
index f5eae5fa..c18d2484 100755
--- a/src/python/test/test_euclidean_witness_complex.py
+++ b/src/python/test/test_euclidean_witness_complex.py
@@ -1,5 +1,3 @@
-import gudhi
-
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): Vincent Rouvreau
@@ -10,6 +8,8 @@ import gudhi
- YYYY/MM Author: Description of the modification
"""
+import gudhi
+
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
diff --git a/src/python/test/test_reader_utils.py b/src/python/test/test_reader_utils.py
index 4c7b32c2..90da6651 100755
--- a/src/python/test/test_reader_utils.py
+++ b/src/python/test/test_reader_utils.py
@@ -1,6 +1,3 @@
-import gudhi
-import numpy as np
-
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): Vincent Rouvreau
@@ -11,6 +8,9 @@ import numpy as np
- YYYY/MM Author: Description of the modification
"""
+import gudhi
+import numpy as np
+
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2017 Inria"
__license__ = "MIT"
diff --git a/src/python/test/test_representations.py b/src/python/test/test_representations.py
new file mode 100755
index 00000000..dba7f952
--- /dev/null
+++ b/src/python/test/test_representations.py
@@ -0,0 +1,12 @@
+import os
+import sys
+import matplotlib.pyplot as plt
+
+def test_representations_examples():
+ # Disable graphics for testing purposes
+ plt.show = lambda:None
+ here = os.path.dirname(os.path.realpath(__file__))
+ sys.path.append(here + "/../example")
+ import diagram_vectorizations_distances_kernels
+
+ return None
diff --git a/src/python/test/test_rips_complex.py b/src/python/test/test_rips_complex.py
index d55ae22f..b02a68e1 100755
--- a/src/python/test/test_rips_complex.py
+++ b/src/python/test/test_rips_complex.py
@@ -1,6 +1,3 @@
-from gudhi import RipsComplex
-from math import sqrt
-
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): Vincent Rouvreau
@@ -11,6 +8,9 @@ from math import sqrt
- YYYY/MM Author: Description of the modification
"""
+from gudhi import RipsComplex
+from math import sqrt
+
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py
index 8d8971c1..1822c43b 100755
--- a/src/python/test/test_simplex_tree.py
+++ b/src/python/test/test_simplex_tree.py
@@ -1,5 +1,3 @@
-from gudhi import SimplexTree
-
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): Vincent Rouvreau
@@ -10,6 +8,8 @@ from gudhi import SimplexTree
- YYYY/MM Author: Description of the modification
"""
+from gudhi import SimplexTree
+
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
diff --git a/src/python/test/test_subsampling.py b/src/python/test/test_subsampling.py
index c816e203..fe0985fa 100755
--- a/src/python/test/test_subsampling.py
+++ b/src/python/test/test_subsampling.py
@@ -1,5 +1,3 @@
-import gudhi
-
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): Vincent Rouvreau
@@ -10,6 +8,8 @@ import gudhi
- YYYY/MM Author: Description of the modification
"""
+import gudhi
+
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
diff --git a/src/python/test/test_tangential_complex.py b/src/python/test/test_tangential_complex.py
index 0f828d8e..e650e99c 100755
--- a/src/python/test/test_tangential_complex.py
+++ b/src/python/test/test_tangential_complex.py
@@ -1,5 +1,3 @@
-from gudhi import TangentialComplex, SimplexTree
-
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): Vincent Rouvreau
@@ -10,6 +8,8 @@ from gudhi import TangentialComplex, SimplexTree
- YYYY/MM Author: Description of the modification
"""
+from gudhi import TangentialComplex, SimplexTree
+
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py
new file mode 100755
index 00000000..43dda77e
--- /dev/null
+++ b/src/python/test/test_wasserstein_distance.py
@@ -0,0 +1,48 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Theo Lacombe
+
+ Copyright (C) 2019 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.wasserstein import wasserstein_distance
+import numpy as np
+
+__author__ = "Theo Lacombe"
+__copyright__ = "Copyright (C) 2019 Inria"
+__license__ = "MIT"
+
+
+def test_basic_wasserstein():
+ diag1 = np.array([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]])
+ diag2 = np.array([[2.8, 4.45], [9.5, 14.1]])
+ diag3 = np.array([[0, 2], [4, 6]])
+ diag4 = np.array([[0, 3], [4, 8]])
+ emptydiag = np.array([[]])
+
+ assert wasserstein_distance(emptydiag, emptydiag, internal_p=2., order=1.) == 0.
+ assert wasserstein_distance(emptydiag, emptydiag, internal_p=np.inf, order=1.) == 0.
+ assert wasserstein_distance(emptydiag, emptydiag, internal_p=np.inf, order=2.) == 0.
+ assert wasserstein_distance(emptydiag, emptydiag, internal_p=2., order=2.) == 0.
+
+ assert wasserstein_distance(diag3, emptydiag, internal_p=np.inf, order=1.) == 2.
+ assert wasserstein_distance(diag3, emptydiag, internal_p=1., order=1.) == 4.
+
+ assert wasserstein_distance(diag4, emptydiag, internal_p=1., order=2.) == 5. # thank you Pythagorician triplets
+ assert wasserstein_distance(diag4, emptydiag, internal_p=np.inf, order=2.) == 2.5
+ assert wasserstein_distance(diag4, emptydiag, internal_p=2., order=2.) == 3.5355339059327378
+
+ assert wasserstein_distance(diag1, diag2, internal_p=2., order=1.) == 1.4453593023967701
+ assert wasserstein_distance(diag1, diag2, internal_p=2.35, order=1.74) == 0.9772734057168739
+
+ assert wasserstein_distance(diag1, emptydiag, internal_p=2.35, order=1.7863) == 3.141592214572228
+
+ assert wasserstein_distance(diag3, diag4, internal_p=1., order=1.) == 3.
+ assert wasserstein_distance(diag3, diag4, internal_p=np.inf, order=1.) == 3. # no diag matching here
+ assert wasserstein_distance(diag3, diag4, internal_p=np.inf, order=2.) == np.sqrt(5)
+ assert wasserstein_distance(diag3, diag4, internal_p=1., order=2.) == np.sqrt(5)
+ assert wasserstein_distance(diag3, diag4, internal_p=4.5, order=2.) == np.sqrt(5)
+
diff --git a/src/python/test/test_witness_complex.py b/src/python/test/test_witness_complex.py
index 36ced635..7baf18c9 100755
--- a/src/python/test/test_witness_complex.py
+++ b/src/python/test/test_witness_complex.py
@@ -1,5 +1,3 @@
-from gudhi import WitnessComplex, StrongWitnessComplex, SimplexTree
-
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
Author(s): Vincent Rouvreau
@@ -10,6 +8,8 @@ from gudhi import WitnessComplex, StrongWitnessComplex, SimplexTree
- YYYY/MM Author: Description of the modification
"""
+from gudhi import WitnessComplex, StrongWitnessComplex, SimplexTree
+
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"