summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorROUVREAU Vincent <vincent.rouvreau@inria.fr>2020-03-22 10:25:27 +0100
committerROUVREAU Vincent <vincent.rouvreau@inria.fr>2020-03-22 10:25:27 +0100
commitbb6f10aff11e05baec2d2d10c544a2ea1c302bc6 (patch)
tree665b6fcbdf6c951d31206852a2d356ef103ada18
parentf78d65f0bd6aaf5f92639e2b809e1711acf929f7 (diff)
parent2ec0ac1f006577d520accbe605a61fc10ede3352 (diff)
Merge master and fix conflicts
-rw-r--r--.github/CONTRIBUTING.md2
-rw-r--r--.github/build-requirements.txt5
-rw-r--r--.github/how_to_use_github_to_contribute_to_gudhi.md7
-rw-r--r--.github/test-requirements.txt9
-rw-r--r--.gitmodules2
-rw-r--r--.travis.yml78
-rw-r--r--Dockerfile_for_circleci_image24
-rw-r--r--README.md6
-rw-r--r--azure-pipelines.yml38
m---------ext/hera0
-rw-r--r--src/Alpha_complex/example/CMakeLists.txt6
-rw-r--r--src/Alpha_complex/include/gudhi/Alpha_complex_3d.h5
-rw-r--r--src/Alpha_complex/utilities/CMakeLists.txt12
-rw-r--r--src/Doxyfile.in1
-rw-r--r--src/Nerve_GIC/include/gudhi/GIC.h14
-rw-r--r--src/Persistence_representations/utilities/CMakeLists.txt10
-rw-r--r--src/Persistence_representations/utilities/persistence_heat_maps/CMakeLists.txt19
-rw-r--r--src/Persistence_representations/utilities/persistence_intervals/CMakeLists.txt7
-rw-r--r--src/Persistence_representations/utilities/persistence_landscapes/CMakeLists.txt8
-rw-r--r--src/Persistence_representations/utilities/persistence_landscapes_on_grid/CMakeLists.txt8
-rw-r--r--src/Persistence_representations/utilities/persistence_vectors/CMakeLists.txt8
-rw-r--r--src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h29
-rw-r--r--src/Rips_complex/example/CMakeLists.txt8
-rw-r--r--src/Simplex_tree/example/simple_simplex_tree.cpp1
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree.h94
-rw-r--r--src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h12
-rw-r--r--src/Simplex_tree/test/CMakeLists.txt6
-rw-r--r--src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp148
-rw-r--r--src/Simplex_tree/test/simplex_tree_unit_test.cpp120
-rw-r--r--src/cmake/modules/GUDHI_compilation_flags.cmake37
-rw-r--r--src/cmake/modules/GUDHI_third_party_libraries.cmake2
-rw-r--r--src/cmake/modules/GUDHI_user_version_target.cmake2
-rw-r--r--src/common/example/CMakeLists.txt1
-rw-r--r--src/python/CMakeLists.txt17
-rw-r--r--src/python/doc/persistence_graphical_tools_sum.inc8
-rw-r--r--src/python/doc/persistence_graphical_tools_user.rst32
-rw-r--r--src/python/doc/point_cloud.rst8
-rw-r--r--src/python/doc/wasserstein_distance_user.rst43
-rwxr-xr-xsrc/python/example/alpha_complex_from_points_example.py8
-rwxr-xr-xsrc/python/example/rips_complex_from_points_example.py5
-rwxr-xr-xsrc/python/example/simplex_tree_example.py9
-rw-r--r--src/python/gudhi/persistence_graphical_tools.py124
-rw-r--r--src/python/gudhi/point_cloud/__init__.py0
-rw-r--r--src/python/gudhi/point_cloud/timedelay.py95
-rw-r--r--src/python/gudhi/simplex_tree.pxd26
-rw-r--r--src/python/gudhi/simplex_tree.pyx52
-rw-r--r--src/python/gudhi/wasserstein.py57
-rw-r--r--src/python/include/Simplex_tree_interface.h64
-rwxr-xr-xsrc/python/test/test_alpha_complex.py5
-rwxr-xr-xsrc/python/test/test_euclidean_witness_complex.py6
-rwxr-xr-xsrc/python/test/test_rips_complex.py6
-rwxr-xr-xsrc/python/test/test_simplex_tree.py25
-rwxr-xr-xsrc/python/test/test_tangential_complex.py3
-rwxr-xr-xsrc/python/test/test_time_delay.py43
-rwxr-xr-xsrc/python/test/test_wasserstein_distance.py33
55 files changed, 945 insertions, 453 deletions
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 13d6cad7..a18ff8bd 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -19,7 +19,7 @@ There is a label **enhancement** in the [new issue](https://github.com/GUDHI/gud
## You are not familiar with GitHub ?
-Please take some time to read our [how to use GitHub to contribute to GUDHI](/home/vincent/workspace/gudhi/gudhi-devel/for_dev/how_to_use_github_to_contribute_to_gudhi.md).
+Please take some time to read our [how to use GitHub to contribute to GUDHI](how_to_use_github_to_contribute_to_gudhi.md).
## Something you want to improve in the documentation
diff --git a/.github/build-requirements.txt b/.github/build-requirements.txt
new file mode 100644
index 00000000..7de60d23
--- /dev/null
+++ b/.github/build-requirements.txt
@@ -0,0 +1,5 @@
+setuptools
+wheel
+numpy
+Cython
+pybind11 \ No newline at end of file
diff --git a/.github/how_to_use_github_to_contribute_to_gudhi.md b/.github/how_to_use_github_to_contribute_to_gudhi.md
index 6ab05e36..747ca39b 100644
--- a/.github/how_to_use_github_to_contribute_to_gudhi.md
+++ b/.github/how_to_use_github_to_contribute_to_gudhi.md
@@ -25,10 +25,10 @@ This creates a directory gudhi-devel, which you are free to move around or renam
cd gudhi-devel
```
-Everytime you clone the repository, you will have to download the *submodules*.
+When you clone the repository, you also need to download the *submodules*.
## Submodules
-An interface to Hera for Wasserstein distance is available on an external git repository. To download it:
+Hera, used for Wasserstein distance, is available on an external git repository. To download it:
```bash
git submodule update --init
```
@@ -60,8 +60,9 @@ This is a command you can run quite regularly.
It tells git to check all that happened on github.
It is safe, it will not mess with your files.
-**Reminder:** Everytime you checkout master or merge from master, afterwards, if the version of one the submodule has changed, or if a submodule was added, you will have to:
+**Reminder:** If the version of a submodule has changed, or if a submodule was added, you may need to:
```bash
+git submodule sync
git submodule update --init
```
diff --git a/.github/test-requirements.txt b/.github/test-requirements.txt
new file mode 100644
index 00000000..18882792
--- /dev/null
+++ b/.github/test-requirements.txt
@@ -0,0 +1,9 @@
+pytest
+sphinx
+sphinxcontrib-bibtex
+sphinx-paramlinks
+matplotlib
+scipy
+scikit-learn
+POT
+tensorflow \ No newline at end of file
diff --git a/.gitmodules b/.gitmodules
index 6e8b3ab1..f70c570d 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +1,3 @@
[submodule "ext/hera"]
path = ext/hera
- url = https://bitbucket.org/grey_narn/hera.git
+ url = https://github.com/grey-narn/hera.git
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 8980be10..00000000
--- a/.travis.yml
+++ /dev/null
@@ -1,78 +0,0 @@
-language: cpp
-
-sudo: required
-
-git:
- depth: 3
-
-os: osx
-osx_image: xcode10.2
-compiler: clang
-
-matrix:
- include:
- - env:
- # 1. Only examples and associated tests
- - CMAKE_EXAMPLE='ON' CMAKE_TEST='OFF' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='OFF' MAKE_TARGET='all' CTEST_COMMAND='ctest --output-on-failure'
- - env:
- # 2. Only unitary tests
- - CMAKE_EXAMPLE='OFF' CMAKE_TEST='ON' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='OFF' MAKE_TARGET='all' CTEST_COMMAND='ctest --output-on-failure'
- - env:
- # 3. Only utilities and associated tests
- - CMAKE_EXAMPLE='OFF' CMAKE_TEST='OFF' CMAKE_UTILITIES='ON' CMAKE_PYTHON='OFF' MAKE_TARGET='all' CTEST_COMMAND='ctest --output-on-failure'
- - env:
- # 4. Only doxygen documentation
- - CMAKE_EXAMPLE='OFF' CMAKE_TEST='OFF' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='OFF' MAKE_TARGET='doxygen' CTEST_COMMAND='echo No tests for doxygen target'
- - env:
- # 5. Only Python, associated tests and sphinx documentation
- # $ which python3 => /usr/local/bin/python3
- # cmake => -- Found PythonInterp: /usr/local/bin/python3 (found version "3.7.5")
- # In python3-sphinx-build.py, print(sys.executable) => /usr/local/opt/python/bin/python3.7 ???
- # should be : MAKE_TARGET='all sphinx' CTEST_COMMAND='ctest --output-on-failure'
- - CMAKE_EXAMPLE='OFF' CMAKE_TEST='OFF' CMAKE_UTILITIES='OFF' CMAKE_PYTHON='ON' MAKE_TARGET='all' CTEST_COMMAND='ctest --output-on-failure -E sphinx'
-
-cache:
- directories:
- - $HOME/.cache/pip
- - $HOME/Library/Caches/Homebrew
-
-before_install:
- - brew update && brew unlink python@2 && brew upgrade python
-
-addons:
- homebrew:
- packages:
- - python3
- - cmake
- - graphviz
- - doxygen
- - boost
- - eigen
- - gmp
- - mpfr
- - tbb
- - cgal
-
-before_cache:
- - rm -f $HOME/.cache/pip/log/debug.log
- - brew cleanup
-
-# When installing through libcgal-dev apt, CMake Error at CGAL Exports.cmake The imported target "CGAL::CGAL Qt5" references the file
-install:
- - python3 -m pip install --upgrade pip setuptools wheel
- - python3 -m pip install --user pytest Cython sphinx sphinxcontrib-bibtex sphinx-paramlinks matplotlib numpy scipy scikit-learn
- - python3 -m pip install --user POT pybind11
-
-script:
- - rm -rf build
- - mkdir -p build
- - cd build
- - cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=${CMAKE_EXAMPLE} -DWITH_GUDHI_TEST=${CMAKE_TEST} -DWITH_GUDHI_UTILITIES=${CMAKE_UTILITIES} -DWITH_GUDHI_PYTHON=${CMAKE_PYTHON} -DUSER_VERSION_DIR=version -DPython_ADDITIONAL_VERSIONS=3 ..
- - make ${MAKE_TARGET}
- - ${CTEST_COMMAND}
- - cd ..
-
-notifications:
- email:
- on_success: change # default: always
- on_failure: always # default: always
diff --git a/Dockerfile_for_circleci_image b/Dockerfile_for_circleci_image
index ebd2f366..1eededb5 100644
--- a/Dockerfile_for_circleci_image
+++ b/Dockerfile_for_circleci_image
@@ -30,7 +30,7 @@ RUN apt-get install -y make \
cmake \
graphviz \
perl \
- texlive-bibtex-extra \
+ texlive-full \
biber \
doxygen \
libboost-all-dev \
@@ -42,24 +42,16 @@ RUN apt-get install -y make \
locales \
python3 \
python3-pip \
- python3-pytest \
python3-tk \
- python3-pybind11 \
libfreetype6-dev \
- pkg-config
+ pkg-config \
+ curl
-RUN pip3 install \
- setuptools \
- numpy \
- matplotlib \
- scipy \
- Cython \
- POT \
- scikit-learn \
- sphinx \
- sphinx-paramlinks \
- sphinxcontrib-bibtex \
- tensorflow
+ADD .github/build-requirements.txt /
+ADD .github/test-requirements.txt /
+
+RUN pip3 install -r build-requirements.txt
+RUN pip3 install -r test-requirements.txt
# apt clean up
RUN apt autoremove && rm -rf /var/lib/apt/lists/*
diff --git a/README.md b/README.md
index f7e3d70c..279953e1 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
-[![Build Status](https://travis-ci.org/GUDHI/gudhi-devel.svg?branch=master)](https://travis-ci.org/GUDHI/gudhi-devel)
-[![CircleCI](https://circleci.com/gh/GUDHI/gudhi-devel/tree/master.svg?style=svg)](https://circleci.com/gh/GUDHI/gudhi-devel/tree/master)
-[![Build status](https://ci.appveyor.com/api/projects/status/976j2uut8xgalvx2/branch/master?svg=true)](https://ci.appveyor.com/project/GUDHI/gudhi-devel/branch/master)
+[![OSx on Azure](https://dev.azure.com/GUDHI/gudhi-devel/_apis/build/status/GUDHI.gudhi-devel?branchName=master)](https://dev.azure.com/GUDHI/gudhi-devel/_build/latest?definitionId=1&branchName=master)
+[![Linux on CircleCI](https://circleci.com/gh/GUDHI/gudhi-devel/tree/master.svg?style=svg)](https://circleci.com/gh/GUDHI/gudhi-devel/tree/master)
+[![Win on Appveyor](https://ci.appveyor.com/api/projects/status/976j2uut8xgalvx2/branch/master?svg=true)](https://ci.appveyor.com/project/GUDHI/gudhi-devel/branch/master)
[![Anaconda Cloud](https://anaconda.org/conda-forge/gudhi/badges/version.svg)](https://anaconda.org/conda-forge/gudhi)
[![Anaconda downloads](https://anaconda.org/conda-forge/gudhi/badges/downloads.svg)](https://anaconda.org/conda-forge/gudhi)
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
new file mode 100644
index 00000000..95b15db2
--- /dev/null
+++ b/azure-pipelines.yml
@@ -0,0 +1,38 @@
+jobs:
+
+ - job: 'Test'
+ displayName: "Build and test"
+ timeoutInMinutes: 0
+ cancelTimeoutInMinutes: 60
+
+ strategy:
+ matrix:
+ macOSrelease:
+ imageName: 'macos-10.14'
+ CMakeBuildType: Release
+ customInstallation: 'brew update && brew install graphviz doxygen boost eigen gmp mpfr tbb cgal'
+
+ pool:
+ vmImage: $(imageName)
+
+ steps:
+ - task: UsePythonVersion@0
+ inputs:
+ versionSpec: '3.7'
+ architecture: 'x64'
+
+ - script: |
+ $(customInstallation)
+ git submodule update --init
+ python -m pip install --upgrade pip
+ python -m pip install --user -r .github/build-requirements.txt
+ python -m pip install --user -r .github/test-requirements.txt
+ displayName: 'Install build dependencies'
+ - script: |
+ mkdir build
+ cd build
+ cmake -DCMAKE_BUILD_TYPE:STRING=$(CMakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..
+ make
+ make doxygen
+ ctest -j 8 --output-on-failure -E sphinx # remove sphinx build as it fails
+ displayName: 'Build, test and documentation generation'
diff --git a/ext/hera b/ext/hera
-Subproject cb1838e682ec07f80720241cf9098400caeb83c
+Subproject 0019cae9dc1e9d11aa03bc59681435ba7f21eea
diff --git a/src/Alpha_complex/example/CMakeLists.txt b/src/Alpha_complex/example/CMakeLists.txt
index b0337934..2eecd50c 100644
--- a/src/Alpha_complex/example/CMakeLists.txt
+++ b/src/Alpha_complex/example/CMakeLists.txt
@@ -32,14 +32,18 @@ if (DIFF_PATH)
add_test(Alpha_complex_example_from_off_60_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_result_60.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_60.txt)
+ set_tests_properties(Alpha_complex_example_from_off_60_diff_files PROPERTIES DEPENDS Alpha_complex_example_from_off_60)
add_test(Alpha_complex_example_from_off_32_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_result_32.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_32.txt)
+ set_tests_properties(Alpha_complex_example_from_off_32_diff_files PROPERTIES DEPENDS Alpha_complex_example_from_off_32)
add_test(Alpha_complex_example_fast_from_off_60_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/fastalphaoffreader_result_60.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_60.txt)
+ set_tests_properties(Alpha_complex_example_fast_from_off_60_diff_files PROPERTIES DEPENDS Alpha_complex_example_fast_from_off_60)
add_test(Alpha_complex_example_fast_from_off_32_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/fastalphaoffreader_result_32.txt ${CMAKE_CURRENT_BINARY_DIR}/alphaoffreader_for_doc_32.txt)
- endif()
+ set_tests_properties(Alpha_complex_example_fast_from_off_32_diff_files PROPERTIES DEPENDS Alpha_complex_example_fast_from_off_32)
+endif()
add_executable ( Alpha_complex_example_weighted_3d_from_points Weighted_alpha_complex_3d_from_points.cpp )
target_link_libraries(Alpha_complex_example_weighted_3d_from_points ${CGAL_LIBRARY})
diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
index c40beba0..c19ebb79 100644
--- a/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
+++ b/src/Alpha_complex/include/gudhi/Alpha_complex_3d.h
@@ -61,10 +61,7 @@ namespace Gudhi {
namespace alpha_complex {
-#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL
-thread_local
-#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- double RELATIVE_PRECISION_OF_TO_DOUBLE = 0.00001;
+thread_local double RELATIVE_PRECISION_OF_TO_DOUBLE = 0.00001;
// Value_from_iterator returns the filtration value from an iterator on alpha shapes values
//
diff --git a/src/Alpha_complex/utilities/CMakeLists.txt b/src/Alpha_complex/utilities/CMakeLists.txt
index a3b0cc24..2ffbdde0 100644
--- a/src/Alpha_complex/utilities/CMakeLists.txt
+++ b/src/Alpha_complex/utilities/CMakeLists.txt
@@ -16,8 +16,14 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
if (DIFF_PATH)
add_test(Alpha_complex_utilities_diff_exact_alpha_complex ${DIFF_PATH}
"exact.pers" "safe.pers")
+ set_tests_properties(Alpha_complex_utilities_diff_exact_alpha_complex PROPERTIES DEPENDS
+ "Alpha_complex_utilities_exact_alpha_complex_persistence;Alpha_complex_utilities_safe_alpha_complex_persistence")
+
add_test(Alpha_complex_utilities_diff_fast_alpha_complex ${DIFF_PATH}
"fast.pers" "safe.pers")
+ set_tests_properties(Alpha_complex_utilities_diff_fast_alpha_complex PROPERTIES DEPENDS
+ "Alpha_complex_utilities_fast_alpha_complex_persistence;Alpha_complex_utilities_safe_alpha_complex_persistence")
+
endif()
install(TARGETS alpha_complex_persistence DESTINATION bin)
@@ -36,15 +42,19 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
"${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off"
"-p" "2" "-m" "0.45" "-o" "exact_3d.pers" "-e")
- add_test(NAME Alpha_complex_utilities_safe_alpha_complex_3d COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
+ add_test(NAME Alpha_complex_utilities_fast_alpha_complex_3d COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
"${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off"
"-p" "2" "-m" "0.45" "-o" "fast_3d.pers" "-f")
if (DIFF_PATH)
add_test(Alpha_complex_utilities_diff_exact_alpha_complex_3d ${DIFF_PATH}
"exact_3d.pers" "safe_3d.pers")
+ set_tests_properties(Alpha_complex_utilities_diff_exact_alpha_complex_3d PROPERTIES DEPENDS
+ "Alpha_complex_utilities_exact_alpha_complex_3d;Alpha_complex_utilities_alpha_complex_3d")
add_test(Alpha_complex_utilities_diff_fast_alpha_complex_3d ${DIFF_PATH}
"fast_3d.pers" "safe_3d.pers")
+ set_tests_properties(Alpha_complex_utilities_diff_fast_alpha_complex_3d PROPERTIES DEPENDS
+ "Alpha_complex_utilities_fast_alpha_complex_3d;Alpha_complex_utilities_alpha_complex_3d")
endif()
add_test(NAME Alpha_complex_utilities_periodic_alpha_complex_3d_persistence COMMAND $<TARGET_FILE:alpha_complex_3d_persistence>
diff --git a/src/Doxyfile.in b/src/Doxyfile.in
index ec551882..49e781bd 100644
--- a/src/Doxyfile.in
+++ b/src/Doxyfile.in
@@ -785,6 +785,7 @@ EXCLUDE = data/ \
GudhUI/ \
cmake/ \
python/ \
+ ext/ \
README.md
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
diff --git a/src/Nerve_GIC/include/gudhi/GIC.h b/src/Nerve_GIC/include/gudhi/GIC.h
index 348dcc85..1b1f9323 100644
--- a/src/Nerve_GIC/include/gudhi/GIC.h
+++ b/src/Nerve_GIC/include/gudhi/GIC.h
@@ -139,19 +139,9 @@ class Cover_complex {
for (boost::tie(ei, ei_end) = boost::edges(G); ei != ei_end; ++ei) boost::remove_edge(*ei, G);
}
- // Thread local is not available on XCode version < V.8
- // If not available, random engine is a class member.
-#ifndef GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- std::default_random_engine re;
-#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL
-
// Find random number in [0,1].
double GetUniform() {
- // Thread local is not available on XCode version < V.8
- // If available, random engine is defined for each thread.
-#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL
thread_local std::default_random_engine re;
-#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL
std::uniform_real_distribution<double> Dist(0, 1);
return Dist(re);
}
@@ -456,9 +446,7 @@ class Cover_complex {
if (distances.size() == 0) compute_pairwise_distances(distance);
- // This cannot be parallelized if thread_local is not defined
- // thread_local is not defined for XCode < v.8
- #if defined(GUDHI_USE_TBB) && defined(GUDHI_CAN_USE_CXX11_THREAD_LOCAL)
+ #ifdef GUDHI_USE_TBB
std::mutex deltamutex;
tbb::parallel_for(0, N, [&](int i){
std::vector<int> samples(m);
diff --git a/src/Persistence_representations/utilities/CMakeLists.txt b/src/Persistence_representations/utilities/CMakeLists.txt
index fc51b1d6..85633b7b 100644
--- a/src/Persistence_representations/utilities/CMakeLists.txt
+++ b/src/Persistence_representations/utilities/CMakeLists.txt
@@ -14,7 +14,7 @@ function(add_persistence_representation_creation_utility creation_utility)
install(TARGETS ${creation_utility} DESTINATION bin)
endfunction(add_persistence_representation_creation_utility)
-function(add_persistence_representation_plot_utility plot_utility tool_extension)
+function(add_persistence_representation_plot_utility creation_utility plot_utility tool_extension)
add_executable ( ${plot_utility} ${plot_utility}.cpp )
# as the function is called in a subdirectory level, need to '../' to find persistence heat maps files
@@ -22,17 +22,21 @@ function(add_persistence_representation_plot_utility plot_utility tool_extension
"${CMAKE_CURRENT_BINARY_DIR}/../first.pers${tool_extension}")
#add_test(NAME Persistence_representation_utilities_${plot_utility}_second COMMAND $<TARGET_FILE:${plot_utility}>
# "${CMAKE_CURRENT_BINARY_DIR}/../second.pers${tool_extension}")
+ set_tests_properties(Persistence_representation_utilities_${plot_utility}_first PROPERTIES DEPENDS
+ Persistence_representation_utilities_${creation_utility})
if(GNUPLOT_PATH)
add_test(NAME Persistence_representation_utilities_${plot_utility}_first_gnuplot COMMAND ${GNUPLOT_PATH}
"-e" "load '${CMAKE_CURRENT_BINARY_DIR}/../first.pers${tool_extension}_GnuplotScript'")
#add_test(NAME Persistence_representation_utilities_${plot_utility}_second_gnuplot COMMAND ${GNUPLOT_PATH}
# "-e" "load '${CMAKE_CURRENT_BINARY_DIR}/../second.pers${tool_extension}_GnuplotScript'")
+ set_tests_properties(Persistence_representation_utilities_${plot_utility}_first_gnuplot PROPERTIES DEPENDS
+ Persistence_representation_utilities_${plot_utility}_first)
endif()
install(TARGETS ${plot_utility} DESTINATION bin)
endfunction(add_persistence_representation_plot_utility)
-function(add_persistence_representation_function_utility function_utility tool_extension)
+function(add_persistence_representation_function_utility creation_utility function_utility tool_extension)
add_executable ( ${function_utility} ${function_utility}.cpp )
# ARGV2 is an optional argument
@@ -48,6 +52,8 @@ function(add_persistence_representation_function_utility function_utility tool_e
"${CMAKE_CURRENT_BINARY_DIR}/../first.pers${tool_extension}"
"${CMAKE_CURRENT_BINARY_DIR}/../second.pers${tool_extension}")
endif()
+ set_tests_properties(Persistence_representation_utilities_${function_utility} PROPERTIES DEPENDS
+ Persistence_representation_utilities_${creation_utility})
install(TARGETS ${function_utility} DESTINATION bin)
endfunction(add_persistence_representation_function_utility)
diff --git a/src/Persistence_representations/utilities/persistence_heat_maps/CMakeLists.txt b/src/Persistence_representations/utilities/persistence_heat_maps/CMakeLists.txt
index 89ef232f..e4c471c2 100644
--- a/src/Persistence_representations/utilities/persistence_heat_maps/CMakeLists.txt
+++ b/src/Persistence_representations/utilities/persistence_heat_maps/CMakeLists.txt
@@ -2,13 +2,24 @@ project(Persistence_representations_heat_maps_utilities)
add_persistence_representation_creation_utility(create_pssk "10" "-1" "-1" "4" "-1")
add_persistence_representation_creation_utility(create_p_h_m_weighted_by_arctan_of_their_persistence "10" "-1" "-1" "4" "-1")
+
add_persistence_representation_creation_utility(create_p_h_m_weighted_by_distance_from_diagonal "10" "-1" "-1" "4" "-1")
+# Tests output the same file
+set_tests_properties(Persistence_representation_utilities_create_p_h_m_weighted_by_distance_from_diagonal PROPERTIES DEPENDS
+ Persistence_representation_utilities_create_p_h_m_weighted_by_arctan_of_their_persistence)
+
add_persistence_representation_creation_utility(create_p_h_m_weighted_by_squared_diag_distance "10" "-1" "-1" "4" "-1")
+# Tests output the same file
+set_tests_properties(Persistence_representation_utilities_create_p_h_m_weighted_by_squared_diag_distance PROPERTIES DEPENDS
+ Persistence_representation_utilities_create_p_h_m_weighted_by_distance_from_diagonal)
+
# Need to set grid min and max for further average, distance and scalar_product
add_persistence_representation_creation_utility(create_persistence_heat_maps "10" "0" "35" "10" "-1")
+set_tests_properties(Persistence_representation_utilities_create_persistence_heat_maps PROPERTIES DEPENDS
+ Persistence_representation_utilities_create_p_h_m_weighted_by_squared_diag_distance)
-add_persistence_representation_plot_utility(plot_persistence_heat_map ".mps")
+add_persistence_representation_plot_utility(create_persistence_heat_maps plot_persistence_heat_map ".mps")
-add_persistence_representation_function_utility(average_persistence_heat_maps ".mps")
-add_persistence_representation_function_utility(compute_distance_of_persistence_heat_maps ".mps" "1")
-add_persistence_representation_function_utility(compute_scalar_product_of_persistence_heat_maps ".mps")
+add_persistence_representation_function_utility(create_persistence_heat_maps average_persistence_heat_maps ".mps")
+add_persistence_representation_function_utility(create_persistence_heat_maps compute_distance_of_persistence_heat_maps ".mps" "1")
+add_persistence_representation_function_utility(create_persistence_heat_maps compute_scalar_product_of_persistence_heat_maps ".mps")
diff --git a/src/Persistence_representations/utilities/persistence_intervals/CMakeLists.txt b/src/Persistence_representations/utilities/persistence_intervals/CMakeLists.txt
index a025183e..118c1e9b 100644
--- a/src/Persistence_representations/utilities/persistence_intervals/CMakeLists.txt
+++ b/src/Persistence_representations/utilities/persistence_intervals/CMakeLists.txt
@@ -3,17 +3,16 @@ project(Persistence_representations_intervals_utilities)
add_executable ( plot_histogram_of_intervals_lengths plot_histogram_of_intervals_lengths.cpp )
-add_test(NAME plot_histogram_of_intervals_lengths COMMAND $<TARGET_FILE:plot_histogram_of_intervals_lengths>
+add_test(NAME Persistence_representation_utilities_plot_histogram_of_intervals_lengths COMMAND $<TARGET_FILE:plot_histogram_of_intervals_lengths>
"${CMAKE_CURRENT_BINARY_DIR}/../first.pers" "-1")
install(TARGETS plot_histogram_of_intervals_lengths DESTINATION bin)
-add_persistence_representation_plot_utility(plot_persistence_intervals "")
-add_persistence_representation_plot_utility(plot_persistence_Betti_numbers "")
+add_persistence_representation_plot_utility(plot_histogram_of_intervals_lengths plot_persistence_intervals "")
+add_persistence_representation_plot_utility(plot_histogram_of_intervals_lengths plot_persistence_Betti_numbers "")
add_persistence_representation_creation_utility(compute_birth_death_range_in_persistence_diagram "-1")
-
add_executable ( compute_number_of_dominant_intervals compute_number_of_dominant_intervals.cpp )
add_test(NAME Persistence_representation_utilities_compute_number_of_dominant_intervals
COMMAND $<TARGET_FILE:compute_number_of_dominant_intervals>
diff --git a/src/Persistence_representations/utilities/persistence_landscapes/CMakeLists.txt b/src/Persistence_representations/utilities/persistence_landscapes/CMakeLists.txt
index 6b24d032..4df84d36 100644
--- a/src/Persistence_representations/utilities/persistence_landscapes/CMakeLists.txt
+++ b/src/Persistence_representations/utilities/persistence_landscapes/CMakeLists.txt
@@ -2,8 +2,8 @@ project(Persistence_representations_landscapes_utilities)
add_persistence_representation_creation_utility(create_landscapes "-1")
-add_persistence_representation_plot_utility(plot_landscapes ".land")
+add_persistence_representation_plot_utility(create_landscapes plot_landscapes ".land")
-add_persistence_representation_function_utility(average_landscapes ".land")
-add_persistence_representation_function_utility(compute_distance_of_landscapes ".land" "1")
-add_persistence_representation_function_utility(compute_scalar_product_of_landscapes ".land")
+add_persistence_representation_function_utility(create_landscapes average_landscapes ".land")
+add_persistence_representation_function_utility(create_landscapes compute_distance_of_landscapes ".land" "1")
+add_persistence_representation_function_utility(create_landscapes compute_scalar_product_of_landscapes ".land")
diff --git a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/CMakeLists.txt b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/CMakeLists.txt
index 36f3196b..8cd965f1 100644
--- a/src/Persistence_representations/utilities/persistence_landscapes_on_grid/CMakeLists.txt
+++ b/src/Persistence_representations/utilities/persistence_landscapes_on_grid/CMakeLists.txt
@@ -3,8 +3,8 @@ project(Persistence_representations_lanscapes_on_grid_utilities)
# Need to set grid min and max for further average, distance and scalar_product
add_persistence_representation_creation_utility(create_landscapes_on_grid "100" "0" "35" "-1")
-add_persistence_representation_plot_utility(plot_landscapes_on_grid ".g_land")
+add_persistence_representation_plot_utility(create_landscapes_on_grid plot_landscapes_on_grid ".g_land")
-add_persistence_representation_function_utility(average_landscapes_on_grid ".g_land")
-add_persistence_representation_function_utility(compute_distance_of_landscapes_on_grid ".g_land" "1")
-add_persistence_representation_function_utility(compute_scalar_product_of_landscapes_on_grid ".g_land")
+add_persistence_representation_function_utility(create_landscapes_on_grid average_landscapes_on_grid ".g_land")
+add_persistence_representation_function_utility(create_landscapes_on_grid compute_distance_of_landscapes_on_grid ".g_land" "1")
+add_persistence_representation_function_utility(create_landscapes_on_grid compute_scalar_product_of_landscapes_on_grid ".g_land")
diff --git a/src/Persistence_representations/utilities/persistence_vectors/CMakeLists.txt b/src/Persistence_representations/utilities/persistence_vectors/CMakeLists.txt
index bc982094..5b22ca84 100644
--- a/src/Persistence_representations/utilities/persistence_vectors/CMakeLists.txt
+++ b/src/Persistence_representations/utilities/persistence_vectors/CMakeLists.txt
@@ -2,8 +2,8 @@ project(Persistence_vectors_utilities)
add_persistence_representation_creation_utility(create_persistence_vectors "-1")
-add_persistence_representation_plot_utility(plot_persistence_vectors ".vect")
+add_persistence_representation_plot_utility(create_persistence_vectors plot_persistence_vectors ".vect")
-add_persistence_representation_function_utility(average_persistence_vectors ".vect")
-add_persistence_representation_function_utility(compute_distance_of_persistence_vectors ".vect" "1")
-add_persistence_representation_function_utility(compute_scalar_product_of_persistence_vectors ".vect")
+add_persistence_representation_function_utility(create_persistence_vectors average_persistence_vectors ".vect")
+add_persistence_representation_function_utility(create_persistence_vectors compute_distance_of_persistence_vectors ".vect" "1")
+add_persistence_representation_function_utility(create_persistence_vectors compute_scalar_product_of_persistence_vectors ".vect")
diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
index 0f1876d0..ca4bc10d 100644
--- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
+++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
@@ -288,10 +288,7 @@ class Persistent_cohomology {
// with multiplicity. We used to sum the coefficients directly in
// annotations_in_boundary by using a map, we now do it later.
typedef std::pair<Column *, int> annotation_t;
-#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- thread_local
-#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- std::vector<annotation_t> annotations_in_boundary;
+ thread_local std::vector<annotation_t> annotations_in_boundary;
annotations_in_boundary.clear();
int sign = 1 - 2 * (dim_sigma % 2); // \in {-1,1} provides the sign in the
// alternate sum in the boundary.
@@ -566,15 +563,9 @@ class Persistent_cohomology {
std::sort(std::begin(persistent_pairs_), std::end(persistent_pairs_), cmp);
bool has_infinity = std::numeric_limits<Filtration_value>::has_infinity;
for (auto pair : persistent_pairs_) {
- // Special case on windows, inf is "1.#INF" (cf. unitary tests and R package TDA)
- if (has_infinity && cpx_->filtration(get<1>(pair)) == std::numeric_limits<Filtration_value>::infinity()) {
- ostream << get<2>(pair) << " " << cpx_->dimension(get<0>(pair)) << " "
- << cpx_->filtration(get<0>(pair)) << " inf " << std::endl;
- } else {
- ostream << get<2>(pair) << " " << cpx_->dimension(get<0>(pair)) << " "
- << cpx_->filtration(get<0>(pair)) << " "
- << cpx_->filtration(get<1>(pair)) << " " << std::endl;
- }
+ ostream << get<2>(pair) << " " << cpx_->dimension(get<0>(pair)) << " "
+ << cpx_->filtration(get<0>(pair)) << " "
+ << cpx_->filtration(get<1>(pair)) << " " << std::endl;
}
}
@@ -584,15 +575,9 @@ class Persistent_cohomology {
std::sort(std::begin(persistent_pairs_), std::end(persistent_pairs_), cmp);
bool has_infinity = std::numeric_limits<Filtration_value>::has_infinity;
for (auto pair : persistent_pairs_) {
- // Special case on windows, inf is "1.#INF"
- if (has_infinity && cpx_->filtration(get<1>(pair)) == std::numeric_limits<Filtration_value>::infinity()) {
- diagram_out << cpx_->dimension(get<0>(pair)) << " "
- << cpx_->filtration(get<0>(pair)) << " inf" << std::endl;
- } else {
- diagram_out << cpx_->dimension(get<0>(pair)) << " "
- << cpx_->filtration(get<0>(pair)) << " "
- << cpx_->filtration(get<1>(pair)) << std::endl;
- }
+ diagram_out << cpx_->dimension(get<0>(pair)) << " "
+ << cpx_->filtration(get<0>(pair)) << " "
+ << cpx_->filtration(get<1>(pair)) << std::endl;
}
}
diff --git a/src/Rips_complex/example/CMakeLists.txt b/src/Rips_complex/example/CMakeLists.txt
index e7772bdb..244a93ec 100644
--- a/src/Rips_complex/example/CMakeLists.txt
+++ b/src/Rips_complex/example/CMakeLists.txt
@@ -53,15 +53,23 @@ if (DIFF_PATH)
add_test(Rips_complex_example_from_off_doc_12_1_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/ripsoffreader_result_12_1.txt
${CMAKE_CURRENT_BINARY_DIR}/one_skeleton_rips_for_doc.txt)
+ set_tests_properties(Rips_complex_example_from_off_doc_12_1_diff_files PROPERTIES DEPENDS Rips_complex_example_from_off_doc_12_1)
+
add_test(Rips_complex_example_from_off_doc_12_3_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/ripsoffreader_result_12_3.txt
${CMAKE_CURRENT_BINARY_DIR}/full_skeleton_rips_for_doc.txt)
+ set_tests_properties(Rips_complex_example_from_off_doc_12_3_diff_files PROPERTIES DEPENDS Rips_complex_example_from_off_doc_12_3)
+
add_test(Rips_complex_example_from_csv_distance_matrix_doc_12_1_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/ripscsvreader_result_12_1.txt
${CMAKE_CURRENT_BINARY_DIR}/one_skeleton_rips_for_doc.txt)
+ set_tests_properties(Rips_complex_example_from_csv_distance_matrix_doc_12_1_diff_files PROPERTIES DEPENDS Rips_complex_example_from_csv_distance_matrix_doc_12_1)
+
add_test(Rips_complex_example_from_csv_distance_matrix_doc_12_3_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/ripscsvreader_result_12_3.txt
${CMAKE_CURRENT_BINARY_DIR}/full_skeleton_rips_for_doc.txt)
+ set_tests_properties(Rips_complex_example_from_csv_distance_matrix_doc_12_3_diff_files PROPERTIES DEPENDS Rips_complex_example_from_csv_distance_matrix_doc_12_3)
+
endif()
install(TARGETS Rips_complex_example_from_off DESTINATION bin)
diff --git a/src/Simplex_tree/example/simple_simplex_tree.cpp b/src/Simplex_tree/example/simple_simplex_tree.cpp
index ca39df5b..e8bec596 100644
--- a/src/Simplex_tree/example/simple_simplex_tree.cpp
+++ b/src/Simplex_tree/example/simple_simplex_tree.cpp
@@ -166,7 +166,6 @@ int main(int argc, char* const argv[]) {
// ++ GENERAL VARIABLE SET
std::clog << "********************************************************************\n";
- // Display the Simplex_tree - Can not be done in the middle of 2 inserts
std::clog << "* The complex contains " << simplexTree.num_simplices() << " simplices\n";
std::clog << " - dimension " << simplexTree.dimension() << "\n";
std::clog << "* Iterator on Simplices in the filtration, with [filtration value]:\n";
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h
index 6e80b77f..430d1ac4 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h
@@ -24,6 +24,7 @@
#include <boost/iterator/transform_iterator.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/range/adaptor/reversed.hpp>
+#include <boost/container/static_vector.hpp>
#ifdef GUDHI_USE_TBB
#include <tbb/parallel_sort.h>
@@ -246,8 +247,8 @@ class Simplex_tree {
* which is consequenlty
* equal to \f$(-1)^{\text{dim} \sigma}\f$ the canonical orientation on the simplex.
*/
- Simplex_vertex_range simplex_vertex_range(Simplex_handle sh) {
- assert(sh != null_simplex()); // Empty simplex
+ Simplex_vertex_range simplex_vertex_range(Simplex_handle sh) const {
+ GUDHI_CHECK(sh != null_simplex(), "empty simplex");
return Simplex_vertex_range(Simplex_vertex_iterator(this, sh),
Simplex_vertex_iterator(this));
}
@@ -450,6 +451,15 @@ class Simplex_tree {
return true;
}
+ /** \brief Returns the filtration value of a simplex.
+ *
+ * Same as `filtration()`, but does not handle `null_simplex()`.
+ */
+ static Filtration_value filtration_(Simplex_handle sh) {
+ GUDHI_CHECK (sh != null_simplex(), "null simplex");
+ return sh->second.filtration();
+ }
+
public:
/** \brief Returns the key associated to a simplex.
*
@@ -755,12 +765,7 @@ class Simplex_tree {
if (first == last)
return { null_simplex(), true }; // FIXME: false would make more sense to me.
- // Copy before sorting
- // Thread local is not available on XCode version < V.8 - It will slow down computation
-#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- thread_local
-#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- std::vector<Vertex_handle> copy;
+ thread_local std::vector<Vertex_handle> copy;
copy.clear();
copy.insert(copy.end(), first, last);
std::sort(copy.begin(), copy.end());
@@ -827,7 +832,7 @@ class Simplex_tree {
/** Returns the Siblings containing a simplex.*/
template<class SimplexHandle>
- Siblings* self_siblings(SimplexHandle sh) {
+ static Siblings* self_siblings(SimplexHandle sh) {
if (sh->second.children()->parent() == sh->first)
return sh->second.children()->oncles();
else
@@ -1123,10 +1128,7 @@ class Simplex_tree {
Dictionary_it next = siblings->members().begin();
++next;
-#ifdef GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- thread_local
-#endif // GUDHI_CAN_USE_CXX11_THREAD_LOCAL
- std::vector<std::pair<Vertex_handle, Node> > inter;
+ thread_local std::vector<std::pair<Vertex_handle, Node> > inter;
for (Dictionary_it s_h = siblings->members().begin();
s_h != siblings->members().end(); ++s_h, ++next) {
Simplex_handle root_sh = find_vertex(s_h->first);
@@ -1317,6 +1319,8 @@ class Simplex_tree {
* \post Some simplex tree functions require the filtration to be valid. `make_filtration_non_decreasing()`
* function is not launching `initialize_filtration()` but returns the filtration modification information. If the
* complex has changed , please call `initialize_filtration()` to recompute it.
+ *
+ * If a simplex has a `NaN` filtration value, it is considered lower than any other defined filtration value.
*/
bool make_filtration_non_decreasing() {
bool modified = false;
@@ -1347,7 +1351,9 @@ class Simplex_tree {
});
Filtration_value max_filt_border_value = filtration(*max_border);
- if (simplex.second.filtration() < max_filt_border_value) {
+ // Replacing if(f<max) with if(!(f>=max)) would mean that if f is NaN, we replace it with the max of the children.
+ // That seems more useful than keeping NaN.
+ if (!(simplex.second.filtration() >= max_filt_border_value)) {
// Store the filtration modification information
modified = true;
simplex.second.assign_filtration(max_filt_border_value);
@@ -1465,6 +1471,66 @@ class Simplex_tree {
}
}
+ /** \brief Returns a vertex of `sh` that has the same filtration value as `sh` if it exists, and `null_vertex()` otherwise.
+ *
+ * For a lower-star filtration built with `make_filtration_non_decreasing()`, this is a way to invert the process and find out which vertex had its filtration value propagated to `sh`.
+ * If several vertices have the same filtration value, the one it returns is arbitrary. */
+ Vertex_handle vertex_with_same_filtration(Simplex_handle sh) {
+ auto filt = filtration_(sh);
+ for(auto v : simplex_vertex_range(sh))
+ if(filtration_(find_vertex(v)) == filt)
+ return v;
+ return null_vertex();
+ }
+
+ /** \brief Returns an edge of `sh` that has the same filtration value as `sh` if it exists, and `null_simplex()` otherwise.
+ *
+ * For a flag-complex built with `expansion()`, this is a way to invert the process and find out which edge had its filtration value propagated to `sh`.
+ * If several edges have the same filtration value, the one it returns is arbitrary.
+ *
+ * \pre `sh` must have dimension at least 1. */
+ Simplex_handle edge_with_same_filtration(Simplex_handle sh) {
+ // See issue #251 for potential speed improvements.
+ auto&& vertices = simplex_vertex_range(sh); // vertices in decreasing order
+ auto end = std::end(vertices);
+ auto vi = std::begin(vertices);
+ GUDHI_CHECK(vi != end, "empty simplex");
+ auto v0 = *vi;
+ ++vi;
+ GUDHI_CHECK(vi != end, "simplex of dimension 0");
+ if(std::next(vi) == end) return sh; // shortcut for dimension 1
+ boost::container::static_vector<Vertex_handle, 40> suffix;
+ suffix.push_back(v0);
+ auto filt = filtration_(sh);
+ do
+ {
+ Vertex_handle v = *vi;
+ auto&& children1 = find_vertex(v)->second.children()->members_;
+ for(auto w : suffix){
+ // Can we take advantage of the fact that suffix is ordered?
+ Simplex_handle s = children1.find(w);
+ if(filtration_(s) == filt)
+ return s;
+ }
+ suffix.push_back(v);
+ }
+ while(++vi != end);
+ return null_simplex();
+ }
+
+ /** \brief Returns a minimal face of `sh` that has the same filtration value as `sh`.
+ *
+ * For a filtration built with `make_filtration_non_decreasing()`, this is a way to invert the process and find out which simplex had its filtration value propagated to `sh`.
+ * If several minimal (for inclusion) simplices have the same filtration value, the one it returns is arbitrary, and it is not guaranteed to be the one with smallest dimension. */
+ Simplex_handle minimal_simplex_with_same_filtration(Simplex_handle sh) {
+ auto filt = filtration_(sh);
+ // Naive implementation, it can be sped up.
+ for(auto b : boundary_simplex_range(sh))
+ if(filtration_(b) == filt)
+ return minimal_simplex_with_same_filtration(b);
+ return sh; // None of its faces has the same filtration.
+ }
+
private:
Vertex_handle null_vertex_;
/** \brief Total number of simplices in the complex, without the empty simplex.*/
diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h
index efccf2f2..9007b6bd 100644
--- a/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h
+++ b/src/Simplex_tree/include/gudhi/Simplex_tree/Simplex_tree_iterators.h
@@ -15,9 +15,7 @@
#include <boost/iterator/iterator_facade.hpp>
#include <boost/version.hpp>
-#if BOOST_VERSION >= 105600
-# include <boost/container/static_vector.hpp>
-#endif
+#include <boost/container/static_vector.hpp>
#include <vector>
@@ -42,13 +40,13 @@ class Simplex_tree_simplex_vertex_iterator : public boost::iterator_facade<
typedef typename SimplexTree::Siblings Siblings;
typedef typename SimplexTree::Vertex_handle Vertex_handle;
- explicit Simplex_tree_simplex_vertex_iterator(SimplexTree * st)
+ explicit Simplex_tree_simplex_vertex_iterator(SimplexTree const* st)
: // any end() iterator
sib_(nullptr),
v_(st->null_vertex()) {
}
- Simplex_tree_simplex_vertex_iterator(SimplexTree * st, Simplex_handle sh)
+ Simplex_tree_simplex_vertex_iterator(SimplexTree const* st, Simplex_handle sh)
: sib_(st->self_siblings(sh)),
v_(sh->first) {
}
@@ -166,15 +164,11 @@ class Simplex_tree_boundary_simplex_iterator : public boost::iterator_facade<
// Most of the storage should be moved to the range, iterators should be light.
Vertex_handle last_; // last vertex of the simplex
Vertex_handle next_; // next vertex to push in suffix_
-#if BOOST_VERSION >= 105600
// 40 seems a conservative bound on the dimension of a Simplex_tree for now,
// as it would not fit on the biggest hard-drive.
boost::container::static_vector<Vertex_handle, 40> suffix_;
// static_vector still has some overhead compared to a trivial hand-made
// version using std::aligned_storage, or compared to making suffix_ static.
-#else
- std::vector<Vertex_handle> suffix_;
-#endif
Siblings * sib_; // where the next search will start from
Simplex_handle sh_; // current Simplex_handle in the boundary
SimplexTree * st_; // simplex containing the simplicial complex
diff --git a/src/Simplex_tree/test/CMakeLists.txt b/src/Simplex_tree/test/CMakeLists.txt
index 8b9163f5..cf2b0153 100644
--- a/src/Simplex_tree/test/CMakeLists.txt
+++ b/src/Simplex_tree/test/CMakeLists.txt
@@ -28,3 +28,9 @@ if (TBB_FOUND)
target_link_libraries(Simplex_tree_ctor_and_move_test_unit ${TBB_LIBRARIES})
endif()
gudhi_add_boost_test(Simplex_tree_ctor_and_move_test_unit)
+
+add_executable ( Simplex_tree_make_filtration_non_decreasing_test_unit simplex_tree_make_filtration_non_decreasing_unit_test.cpp )
+if (TBB_FOUND)
+ target_link_libraries(Simplex_tree_make_filtration_non_decreasing_test_unit ${TBB_LIBRARIES})
+endif()
+gudhi_add_boost_test(Simplex_tree_make_filtration_non_decreasing_test_unit)
diff --git a/src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp
new file mode 100644
index 00000000..e0e7cadf
--- /dev/null
+++ b/src/Simplex_tree/test/simplex_tree_make_filtration_non_decreasing_unit_test.cpp
@@ -0,0 +1,148 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <iostream>
+#include <limits> // for NaN
+#include <cmath> // for isNaN
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "simplex_tree_make_filtration_non_decreasing"
+#include <boost/test/unit_test.hpp>
+#include <boost/mpl/list.hpp>
+
+// ^
+// /!\ Nothing else from Simplex_tree shall be included to test includes are well defined.
+#include "gudhi/Simplex_tree.h"
+
+using namespace Gudhi;
+
+typedef boost::mpl::list<Simplex_tree<>, Simplex_tree<Simplex_tree_options_fast_persistence>> list_of_tested_variants;
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(make_filtration_non_decreasing, typeST, list_of_tested_variants) {
+ typeST st;
+
+ st.insert_simplex_and_subfaces({2, 1, 0}, 2.0);
+ st.insert_simplex_and_subfaces({3, 0}, 2.0);
+ st.insert_simplex_and_subfaces({3, 4, 5}, 2.0);
+
+ /* Inserted simplex: */
+ /* 1 */
+ /* o */
+ /* /X\ */
+ /* o---o---o---o */
+ /* 2 0 3\X/4 */
+ /* o */
+ /* 5 */
+
+ std::clog << "Check default insertion ensures the filtration values are non decreasing" << std::endl;
+ BOOST_CHECK(!st.make_filtration_non_decreasing());
+
+ // Because of non decreasing property of simplex tree, { 0 } , { 1 } and { 0, 1 } are going to be set from value 2.0
+ // to 1.0
+ st.insert_simplex_and_subfaces({0, 1, 6, 7}, 1.0);
+
+ // Inserted simplex:
+ // 1 6
+ // o---o
+ // /X\7/
+ // o---o---o---o
+ // 2 0 3\X/4
+ // o
+ // 5
+
+ std::clog << "Check default second insertion ensures the filtration values are non decreasing" << std::endl;
+ BOOST_CHECK(!st.make_filtration_non_decreasing());
+
+ // Copy original simplex tree
+ typeST st_copy = st;
+
+ // Modify specific values for st to become like st_copy thanks to make_filtration_non_decreasing
+ st.assign_filtration(st.find({0,1,6,7}), 0.8);
+ st.assign_filtration(st.find({0,1,6}), 0.9);
+ st.assign_filtration(st.find({0,6}), 0.6);
+ st.assign_filtration(st.find({3,4,5}), 1.2);
+ st.assign_filtration(st.find({3,4}), 1.1);
+ st.assign_filtration(st.find({4,5}), 1.99);
+
+ std::clog << "Check the simplex_tree is rolled back in case of decreasing filtration values" << std::endl;
+ BOOST_CHECK(st.make_filtration_non_decreasing());
+ BOOST_CHECK(st == st_copy);
+
+ // Other simplex tree
+ typeST st_other;
+ st_other.insert_simplex_and_subfaces({2, 1, 0}, 3.0); // This one is different from st
+ st_other.insert_simplex_and_subfaces({3, 0}, 2.0);
+ st_other.insert_simplex_and_subfaces({3, 4, 5}, 2.0);
+ st_other.insert_simplex_and_subfaces({0, 1, 6, 7}, 1.0);
+
+ // Modify specific values for st to become like st_other thanks to make_filtration_non_decreasing
+ st.assign_filtration(st.find({2}), 3.0);
+ // By modifying just the simplex {2}
+ // {0,1,2}, {1,2} and {0,2} will be modified
+
+ std::clog << "Check the simplex_tree is repaired in case of decreasing filtration values" << std::endl;
+ BOOST_CHECK(st.make_filtration_non_decreasing());
+ BOOST_CHECK(st == st_other);
+
+ // Modify specific values for st still to be non-decreasing
+ st.assign_filtration(st.find({0,1,2}), 10.0);
+ st.assign_filtration(st.find({0,2}), 9.0);
+ st.assign_filtration(st.find({0,1,6,7}), 50.0);
+ st.assign_filtration(st.find({0,1,6}), 49.0);
+ st.assign_filtration(st.find({0,1,7}), 48.0);
+ // Other copy simplex tree
+ typeST st_other_copy = st;
+
+ std::clog << "Check the simplex_tree is not modified in case of non-decreasing filtration values" << std::endl;
+ BOOST_CHECK(!st.make_filtration_non_decreasing());
+ BOOST_CHECK(st == st_other_copy);
+
+}
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(make_filtration_non_decreasing_on_nan_values, typeST, list_of_tested_variants) {
+ typeST st;
+
+ st.insert_simplex_and_subfaces({2, 1, 0}, std::numeric_limits<double>::quiet_NaN());
+ st.insert_simplex_and_subfaces({3, 0}, std::numeric_limits<double>::quiet_NaN());
+ st.insert_simplex_and_subfaces({3, 4, 5}, std::numeric_limits<double>::quiet_NaN());
+
+ /* Inserted simplex: */
+ /* 1 */
+ /* o */
+ /* /X\ */
+ /* o---o---o---o */
+ /* 2 0 3\X/4 */
+ /* o */
+ /* 5 */
+
+ std::clog << "SPECIFIC CASE:" << std::endl;
+ std::clog << "Insertion with NaN values does not ensure the filtration values are non decreasing" << std::endl;
+ st.make_filtration_non_decreasing();
+
+ std::clog << "Check all filtration values are NaN" << std::endl;
+ for (auto f_simplex : st.complex_simplex_range()) {
+ BOOST_CHECK(std::isnan(st.filtration(f_simplex)));
+ }
+
+ st.assign_filtration(st.find({0}), 0.);
+ st.assign_filtration(st.find({1}), 0.);
+ st.assign_filtration(st.find({2}), 0.);
+ st.assign_filtration(st.find({3}), 0.);
+ st.assign_filtration(st.find({4}), 0.);
+ st.assign_filtration(st.find({5}), 0.);
+
+ std::clog << "Check make_filtration_non_decreasing is modifying the simplicial complex" << std::endl;
+ BOOST_CHECK(st.make_filtration_non_decreasing());
+
+ std::clog << "Check all filtration values are now defined" << std::endl;
+ for (auto f_simplex : st.complex_simplex_range()) {
+ BOOST_CHECK(!std::isnan(st.filtration(f_simplex)));
+ }
+}
diff --git a/src/Simplex_tree/test/simplex_tree_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_unit_test.cpp
index 7746fa2a..9b5fa8fe 100644
--- a/src/Simplex_tree/test/simplex_tree_unit_test.cpp
+++ b/src/Simplex_tree/test/simplex_tree_unit_test.cpp
@@ -791,90 +791,6 @@ BOOST_AUTO_TEST_CASE(non_contiguous) {
BOOST_CHECK(++i == std::end(b));
}
-BOOST_AUTO_TEST_CASE(make_filtration_non_decreasing) {
- std::clog << "********************************************************************" << std::endl;
- std::clog << "MAKE FILTRATION NON DECREASING" << std::endl;
- typedef Simplex_tree<> typeST;
- typeST st;
-
- st.insert_simplex_and_subfaces({2, 1, 0}, 2.0);
- st.insert_simplex_and_subfaces({3, 0}, 2.0);
- st.insert_simplex_and_subfaces({3, 4, 5}, 2.0);
-
- /* Inserted simplex: */
- /* 1 */
- /* o */
- /* /X\ */
- /* o---o---o---o */
- /* 2 0 3\X/4 */
- /* o */
- /* 5 */
-
- std::clog << "Check default insertion ensures the filtration values are non decreasing" << std::endl;
- BOOST_CHECK(!st.make_filtration_non_decreasing());
-
- // Because of non decreasing property of simplex tree, { 0 } , { 1 } and { 0, 1 } are going to be set from value 2.0
- // to 1.0
- st.insert_simplex_and_subfaces({0, 1, 6, 7}, 1.0);
-
- // Inserted simplex:
- // 1 6
- // o---o
- // /X\7/
- // o---o---o---o
- // 2 0 3\X/4
- // o
- // 5
-
- std::clog << "Check default second insertion ensures the filtration values are non decreasing" << std::endl;
- BOOST_CHECK(!st.make_filtration_non_decreasing());
-
- // Copy original simplex tree
- typeST st_copy = st;
-
- // Modify specific values for st to become like st_copy thanks to make_filtration_non_decreasing
- st.assign_filtration(st.find({0,1,6,7}), 0.8);
- st.assign_filtration(st.find({0,1,6}), 0.9);
- st.assign_filtration(st.find({0,6}), 0.6);
- st.assign_filtration(st.find({3,4,5}), 1.2);
- st.assign_filtration(st.find({3,4}), 1.1);
- st.assign_filtration(st.find({4,5}), 1.99);
-
- std::clog << "Check the simplex_tree is rolled back in case of decreasing filtration values" << std::endl;
- BOOST_CHECK(st.make_filtration_non_decreasing());
- BOOST_CHECK(st == st_copy);
-
- // Other simplex tree
- typeST st_other;
- st_other.insert_simplex_and_subfaces({2, 1, 0}, 3.0); // This one is different from st
- st_other.insert_simplex_and_subfaces({3, 0}, 2.0);
- st_other.insert_simplex_and_subfaces({3, 4, 5}, 2.0);
- st_other.insert_simplex_and_subfaces({0, 1, 6, 7}, 1.0);
-
- // Modify specific values for st to become like st_other thanks to make_filtration_non_decreasing
- st.assign_filtration(st.find({2}), 3.0);
- // By modifying just the simplex {2}
- // {0,1,2}, {1,2} and {0,2} will be modified
-
- std::clog << "Check the simplex_tree is repaired in case of decreasing filtration values" << std::endl;
- BOOST_CHECK(st.make_filtration_non_decreasing());
- BOOST_CHECK(st == st_other);
-
- // Modify specific values for st still to be non-decreasing
- st.assign_filtration(st.find({0,1,2}), 10.0);
- st.assign_filtration(st.find({0,2}), 9.0);
- st.assign_filtration(st.find({0,1,6,7}), 50.0);
- st.assign_filtration(st.find({0,1,6}), 49.0);
- st.assign_filtration(st.find({0,1,7}), 48.0);
- // Other copy simplex tree
- typeST st_other_copy = st;
-
- std::clog << "Check the simplex_tree is not modified in case of non-decreasing filtration values" << std::endl;
- BOOST_CHECK(!st.make_filtration_non_decreasing());
- BOOST_CHECK(st == st_other_copy);
-
-}
-
typedef boost::mpl::list<boost::adjacency_list<boost::setS, boost::vecS, boost::directedS,
boost::property<vertex_filtration_t, double>,
@@ -986,5 +902,41 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(insert_duplicated_vertices, typeST, list_of_tested
<< " - num_simplices = " << st.num_simplices() << std::endl;
BOOST_CHECK(st.dimension() == 1);
BOOST_CHECK(st.num_simplices() == st.num_vertices() + 1);
+}
+BOOST_AUTO_TEST_CASE_TEMPLATE(generators, typeST, list_of_tested_variants) {
+ std::cout << "********************************************************************" << std::endl;
+ std::cout << "TEST FIND GENERATORS" << std::endl;
+ {
+ typeST st;
+ st.insert_simplex_and_subfaces({0,1,2,3,4,5,6},0);
+ st.assign_filtration(st.find({0,2,4}), 10);
+ st.assign_filtration(st.find({1,5}), 20);
+ st.assign_filtration(st.find({1,2,4}), 30);
+ st.assign_filtration(st.find({3}), 5);
+ st.make_filtration_non_decreasing();
+ BOOST_CHECK(st.filtration(st.find({1,2}))==0);
+ BOOST_CHECK(st.filtration(st.find({0,1,2,3,4}))==30);
+ BOOST_CHECK(st.minimal_simplex_with_same_filtration(st.find({0,1,2,3,4,5}))==st.find({1,2,4}));
+ BOOST_CHECK(st.minimal_simplex_with_same_filtration(st.find({0,2,3}))==st.find({3}));
+ auto s=st.minimal_simplex_with_same_filtration(st.find({0,2,6}));
+ BOOST_CHECK(s==st.find({0})||s==st.find({2})||s==st.find({6}));
+ BOOST_CHECK(st.vertex_with_same_filtration(st.find({2}))==2);
+ BOOST_CHECK(st.vertex_with_same_filtration(st.find({1,5}))==st.null_vertex());
+ BOOST_CHECK(st.vertex_with_same_filtration(st.find({5,6}))>=5);
+ }
+ {
+ typeST st;
+ st.insert_simplex_and_subfaces({0,1}, 8);
+ st.insert_simplex_and_subfaces({0,2}, 10);
+ st.insert_simplex_and_subfaces({3,4}, 6);
+ st.insert_simplex_and_subfaces({1,2}, 5);
+ st.insert_simplex_and_subfaces({1,5}, 4);
+ st.insert_simplex_and_subfaces({0,5}, 3);
+ st.insert_simplex_and_subfaces({2,5}, 2);
+ st.insert_simplex_and_subfaces({1,3}, 9);
+ st.expansion(50);
+ BOOST_CHECK(st.edge_with_same_filtration(st.find({0,1,2,5}))==st.find({0,2}));
+ BOOST_CHECK(st.edge_with_same_filtration(st.find({1,5}))==st.find({1,5}));
+ }
}
diff --git a/src/cmake/modules/GUDHI_compilation_flags.cmake b/src/cmake/modules/GUDHI_compilation_flags.cmake
index 34c2e065..567fbc40 100644
--- a/src/cmake/modules/GUDHI_compilation_flags.cmake
+++ b/src/cmake/modules/GUDHI_compilation_flags.cmake
@@ -1,7 +1,6 @@
# This files manage compilation flags required by GUDHI
include(TestCXXAcceptsFlag)
-include(CheckCXXSourceCompiles)
# add a compiler flag only if it is accepted
macro(add_cxx_compiler_flag _flag)
@@ -12,32 +11,6 @@ macro(add_cxx_compiler_flag _flag)
endif()
endmacro()
-function(can_cgal_use_cxx11_thread_local)
- # This is because of https://github.com/CGAL/cgal/blob/master/Installation/include/CGAL/tss.h
- # CGAL is using boost thread if thread_local is not ready (requires XCode 8 for Mac).
- # The test in https://github.com/CGAL/cgal/blob/master/Installation/include/CGAL/config.h
- # #if __has_feature(cxx_thread_local) || \
- # ( (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 && __cplusplus >= 201103L ) || \
- # ( _MSC_VER >= 1900 )
- # #define CGAL_CAN_USE_CXX11_THREAD_LOCAL
- # #endif
- set(CGAL_CAN_USE_CXX11_THREAD_LOCAL "
- int main() {
- #ifndef __has_feature
- #define __has_feature(x) 0 // Compatibility with non-clang compilers.
- #endif
- #if __has_feature(cxx_thread_local) || \
- ( (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 && __cplusplus >= 201103L ) || \
- ( _MSC_VER >= 1900 )
- bool has_feature_thread_local = true;
- #else
- // Explicit error of compilation for CMake test purpose - has_feature_thread_local is not defined
- #endif
- bool result = has_feature_thread_local;
- } ")
- check_cxx_source_compiles("${CGAL_CAN_USE_CXX11_THREAD_LOCAL}" CGAL_CAN_USE_CXX11_THREAD_LOCAL_RESULT)
-endfunction()
-
set (CMAKE_CXX_STANDARD 14)
enable_testing()
@@ -58,16 +31,6 @@ if (DEBUG_TRACES)
add_definitions(-DDEBUG_TRACES)
endif()
-set(GUDHI_CAN_USE_CXX11_THREAD_LOCAL "
- int main() {
- thread_local int result = 0;
- return result;
- } ")
-check_cxx_source_compiles("${GUDHI_CAN_USE_CXX11_THREAD_LOCAL}" GUDHI_CAN_USE_CXX11_THREAD_LOCAL_RESULT)
-if (GUDHI_CAN_USE_CXX11_THREAD_LOCAL_RESULT)
- add_definitions(-DGUDHI_CAN_USE_CXX11_THREAD_LOCAL)
-endif()
-
if(CMAKE_BUILD_TYPE MATCHES Debug)
message("++ Debug compilation flags are: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_DEBUG}")
else()
diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake
index 6db2c76b..2d010483 100644
--- a/src/cmake/modules/GUDHI_third_party_libraries.cmake
+++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake
@@ -68,7 +68,7 @@ if(CGAL_FOUND)
endif()
# For those who dislike bundled dependencies, this indicates where to find a preinstalled Hera.
-set(HERA_WASSERSTEIN_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/ext/hera/geom_matching/wasserstein/include CACHE PATH "Directory where one can find Hera's wasserstein.h")
+set(HERA_WASSERSTEIN_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/ext/hera/wasserstein/include CACHE PATH "Directory where one can find Hera's wasserstein.h")
option(WITH_GUDHI_USE_TBB "Build with Intel TBB parallelization" ON)
diff --git a/src/cmake/modules/GUDHI_user_version_target.cmake b/src/cmake/modules/GUDHI_user_version_target.cmake
index 5047252f..257d1939 100644
--- a/src/cmake/modules/GUDHI_user_version_target.cmake
+++ b/src/cmake/modules/GUDHI_user_version_target.cmake
@@ -55,7 +55,7 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
copy_directory ${CMAKE_SOURCE_DIR}/src/GudhUI ${GUDHI_USER_VERSION_DIR}/GudhUI)
add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E
- copy_directory ${CMAKE_SOURCE_DIR}/ext/hera/geom_matching/wasserstein/include ${GUDHI_USER_VERSION_DIR}/ext/hera/geom_matching/wasserstein/include)
+ copy_directory ${CMAKE_SOURCE_DIR}/ext/hera/wasserstein/include ${GUDHI_USER_VERSION_DIR}/ext/hera/wasserstein/include)
set(GUDHI_DIRECTORIES "doc;example;concept;utilities")
diff --git a/src/common/example/CMakeLists.txt b/src/common/example/CMakeLists.txt
index 583a0027..fa8eb98c 100644
--- a/src/common/example/CMakeLists.txt
+++ b/src/common/example/CMakeLists.txt
@@ -12,6 +12,7 @@ if (DIFF_PATH)
add_test(Common_example_vector_double_off_reader_diff_files ${DIFF_PATH}
${CMAKE_CURRENT_BINARY_DIR}/vectordoubleoffreader_result.txt ${CMAKE_CURRENT_BINARY_DIR}/alphacomplexdoc.off.txt)
+ set_tests_properties(Common_example_vector_double_off_reader_diff_files PROPERTIES DEPENDS Common_example_vector_double_off_reader)
endif()
if(NOT CGAL_VERSION VERSION_LESS 4.11.0)
diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt
index 090a7446..f00966a5 100644
--- a/src/python/CMakeLists.txt
+++ b/src/python/CMakeLists.txt
@@ -56,6 +56,7 @@ if(PYTHONINTERP_FOUND)
# Modules that should not be auto-imported in __init__.py
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'representations', ")
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'wasserstein', ")
+ set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'point_cloud', ")
add_gudhi_debug_info("Python version ${PYTHON_VERSION_STRING}")
add_gudhi_debug_info("Cython version ${CYTHON_VERSION}")
@@ -127,16 +128,6 @@ if(PYTHONINTERP_FOUND)
endif ()
if(CGAL_FOUND)
- can_cgal_use_cxx11_thread_local()
- if (NOT CGAL_CAN_USE_CXX11_THREAD_LOCAL_RESULT)
- if(CMAKE_BUILD_TYPE MATCHES Debug)
- add_GUDHI_PYTHON_lib("${Boost_THREAD_LIBRARY_DEBUG}")
- else()
- add_GUDHI_PYTHON_lib("${Boost_THREAD_LIBRARY_RELEASE}")
- endif()
- message("** Add Boost ${Boost_LIBRARY_DIRS}")
- set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${Boost_LIBRARY_DIRS}', ")
- endif()
# Add CGAL compilation args
if(CGAL_HEADER_ONLY)
add_gudhi_debug_info("CGAL header only version ${CGAL_VERSION}")
@@ -181,7 +172,7 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYTHON_LIBRARY_DIRS "${GUDHI_PYTHON_LIBRARY_DIRS}'${MPFR_LIBRARIES_DIR}', ")
message("** Add mpfr ${MPFR_LIBRARIES}")
endif(MPFR_FOUND)
-endif(CGAL_FOUND)
+ endif(CGAL_FOUND)
# Specific for Mac
if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
@@ -226,6 +217,7 @@ endif(CGAL_FOUND)
file(COPY "gudhi/persistence_graphical_tools.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/")
file(COPY "gudhi/wasserstein.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
+ file(COPY "gudhi/point_cloud" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
add_custom_command(
OUTPUT gudhi.so
@@ -404,6 +396,9 @@ endif(CGAL_FOUND)
add_gudhi_py_test(test_representations)
endif()
+ # Time Delay
+ add_gudhi_py_test(test_time_delay)
+
# Documentation generation is available through sphinx - requires all modules
if(SPHINX_PATH)
if(MATPLOTLIB_FOUND)
diff --git a/src/python/doc/persistence_graphical_tools_sum.inc b/src/python/doc/persistence_graphical_tools_sum.inc
index 0cdf8072..ef376802 100644
--- a/src/python/doc/persistence_graphical_tools_sum.inc
+++ b/src/python/doc/persistence_graphical_tools_sum.inc
@@ -2,11 +2,11 @@
:widths: 30 50 20
+-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+
- | .. figure:: | These graphical tools comes on top of persistence results and allows | :Author: Vincent Rouvreau |
- | img/graphical_tools_representation.png | the user to build easily persistence barcode, diagram or density. | |
+ | .. figure:: | These graphical tools comes on top of persistence results and allows | :Author: Vincent Rouvreau, Theo Lacombe |
+ | img/graphical_tools_representation.png | the user to display easily persistence barcode, diagram or density. | |
| | | :Introduced in: GUDHI 2.0.0 |
- | | | |
- | | | :Copyright: MIT |
+ | | Note that these functions return the matplotlib axis, allowing | |
+ | | for further modifications (title, aspect, etc.) | :Copyright: MIT |
| | | |
| | | :Requires: matplotlib, numpy and scipy |
+-----------------------------------------------------------------+-----------------------------------------------------------------------+-----------------------------------------------+
diff --git a/src/python/doc/persistence_graphical_tools_user.rst b/src/python/doc/persistence_graphical_tools_user.rst
index 80002db6..91e52703 100644
--- a/src/python/doc/persistence_graphical_tools_user.rst
+++ b/src/python/doc/persistence_graphical_tools_user.rst
@@ -20,7 +20,7 @@ This function can display the persistence result as a barcode:
.. plot::
:include-source:
- import matplotlib.pyplot as plot
+ import matplotlib.pyplot as plt
import gudhi
off_file = gudhi.__root_source_dir__ + '/data/points/tore3D_300.off'
@@ -31,7 +31,7 @@ This function can display the persistence result as a barcode:
diag = simplex_tree.persistence(min_persistence=0.4)
gudhi.plot_persistence_barcode(diag)
- plot.show()
+ plt.show()
Show persistence as a diagram
-----------------------------
@@ -44,15 +44,31 @@ This function can display the persistence result as a diagram:
.. plot::
:include-source:
- import matplotlib.pyplot as plot
+ import matplotlib.pyplot as plt
import gudhi
# rips_on_tore3D_1307.pers obtained from write_persistence_diagram method
persistence_file=gudhi.__root_source_dir__ + \
'/data/persistence_diagram/rips_on_tore3D_1307.pers'
- gudhi.plot_persistence_diagram(persistence_file=persistence_file,
+ ax = gudhi.plot_persistence_diagram(persistence_file=persistence_file,
legend=True)
- plot.show()
+ # We can modify the title, aspect, etc.
+ ax.set_title("Persistence diagram of a torus")
+ ax.set_aspect("equal") # forces to be square shaped
+ plt.show()
+
+Note that (as barcode and density) it can also take a simple `np.array`
+of shape (N x 2) encoding a persistence diagram (in a given dimension).
+
+.. plot::
+ :include-source:
+
+ import matplotlib.pyplot as plt
+ import gudhi
+ import numpy as np
+ d = np.array([[0, 1], [1, 2], [1, np.inf]])
+ gudhi.plot_persistence_diagram(d)
+ plt.show()
Persistence density
-------------------
@@ -65,7 +81,7 @@ If you want more information on a specific dimension, for instance:
.. plot::
:include-source:
- import matplotlib.pyplot as plot
+ import matplotlib.pyplot as plt
import gudhi
# rips_on_tore3D_1307.pers obtained from write_persistence_diagram method
persistence_file=gudhi.__root_source_dir__ + \
@@ -75,9 +91,9 @@ If you want more information on a specific dimension, for instance:
only_this_dim=1)
pers_diag = [(1, elt) for elt in birth_death]
# Use subplots to display diagram and density side by side
- fig, axes = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
+ fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))
gudhi.plot_persistence_diagram(persistence=pers_diag,
axes=axes[0])
gudhi.plot_persistence_density(persistence=pers_diag,
dimension=1, legend=True, axes=axes[1])
- plot.show()
+ plt.show()
diff --git a/src/python/doc/point_cloud.rst b/src/python/doc/point_cloud.rst
index d668428a..c0d4b303 100644
--- a/src/python/doc/point_cloud.rst
+++ b/src/python/doc/point_cloud.rst
@@ -20,3 +20,11 @@ Subsampling
:members:
:special-members:
:show-inheritance:
+
+TimeDelayEmbedding
+------------------
+
+.. autoclass:: gudhi.point_cloud.timedelay.TimeDelayEmbedding
+ :members:
+ :special-members: __call__
+
diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst
index 94b454e2..a9b21fa5 100644
--- a/src/python/doc/wasserstein_distance_user.rst
+++ b/src/python/doc/wasserstein_distance_user.rst
@@ -36,10 +36,10 @@ Note that persistence diagrams must be submitted as (n x 2) numpy arrays and mus
import gudhi.wasserstein
import numpy as np
- diag1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974]])
- diag2 = np.array([[2.8, 4.45],[9.5, 14.1]])
+ dgm1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974]])
+ dgm2 = np.array([[2.8, 4.45],[9.5, 14.1]])
- message = "Wasserstein distance value = " + '%.2f' % gudhi.wasserstein.wasserstein_distance(diag1, diag2, order=1., internal_p=2.)
+ message = "Wasserstein distance value = " + '%.2f' % gudhi.wasserstein.wasserstein_distance(dgm1, dgm2, order=1., internal_p=2.)
print(message)
The output is:
@@ -47,3 +47,40 @@ The output is:
.. testoutput::
Wasserstein distance value = 1.45
+
+We can also have access to the optimal matching by letting `matching=True`.
+It is encoded as a list of indices (i,j), meaning that the i-th point in X
+is mapped to the j-th point in Y.
+An index of -1 represents the diagonal.
+
+.. testcode::
+
+ import gudhi.wasserstein
+ import numpy as np
+
+ dgm1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974]])
+ dgm2 = np.array([[2.8, 4.45], [5, 6], [9.5, 14.1]])
+ cost, matchings = gudhi.wasserstein.wasserstein_distance(dgm1, dgm2, matching=True, order=1, internal_p=2)
+
+ message_cost = "Wasserstein distance value = %.2f" %cost
+ print(message_cost)
+ dgm1_to_diagonal = matchings[matchings[:,1] == -1, 0]
+ dgm2_to_diagonal = matchings[matchings[:,0] == -1, 1]
+ off_diagonal_match = np.delete(matchings, np.where(matchings == -1)[0], axis=0)
+
+ for i,j in off_diagonal_match:
+ print("point %s in dgm1 is matched to point %s in dgm2" %(i,j))
+ for i in dgm1_to_diagonal:
+ print("point %s in dgm1 is matched to the diagonal" %i)
+ for j in dgm2_to_diagonal:
+ print("point %s in dgm2 is matched to the diagonal" %j)
+
+The output is:
+
+.. testoutput::
+
+ Wasserstein distance value = 2.15
+ point 0 in dgm1 is matched to point 0 in dgm2
+ point 1 in dgm1 is matched to point 2 in dgm2
+ point 2 in dgm1 is matched to the diagonal
+ point 1 in dgm2 is matched to the diagonal
diff --git a/src/python/example/alpha_complex_from_points_example.py b/src/python/example/alpha_complex_from_points_example.py
index 844d7a82..73faf17c 100755
--- a/src/python/example/alpha_complex_from_points_example.py
+++ b/src/python/example/alpha_complex_from_points_example.py
@@ -46,8 +46,14 @@ if simplex_tree.find([4]):
else:
print("[4] Not found...")
+# Some insertions, simplex_tree needs to initialize filtrations
+simplex_tree.initialize_filtration()
+
print("dimension=", simplex_tree.dimension())
-print("filtrations=", simplex_tree.get_filtration())
+print("filtrations=")
+for simplex_with_filtration in simplex_tree.get_filtration():
+ print("(%s, %.2f)" % tuple(simplex_with_filtration))
+
print("star([0])=", simplex_tree.get_star([0]))
print("coface([0], 1)=", simplex_tree.get_cofaces([0], 1))
diff --git a/src/python/example/rips_complex_from_points_example.py b/src/python/example/rips_complex_from_points_example.py
index 59d8a261..c05703c6 100755
--- a/src/python/example/rips_complex_from_points_example.py
+++ b/src/python/example/rips_complex_from_points_example.py
@@ -22,6 +22,9 @@ rips = gudhi.RipsComplex(points=[[0, 0], [1, 0], [0, 1], [1, 1]], max_edge_lengt
simplex_tree = rips.create_simplex_tree(max_dimension=1)
-print("filtrations=", simplex_tree.get_filtration())
+print("filtrations=")
+for simplex_with_filtration in simplex_tree.get_filtration():
+ print("(%s, %.2f)" % tuple(simplex_with_filtration))
+
print("star([0])=", simplex_tree.get_star([0]))
print("coface([0], 1)=", simplex_tree.get_cofaces([0], 1))
diff --git a/src/python/example/simplex_tree_example.py b/src/python/example/simplex_tree_example.py
index 30de00da..34833899 100755
--- a/src/python/example/simplex_tree_example.py
+++ b/src/python/example/simplex_tree_example.py
@@ -38,8 +38,15 @@ else:
print("dimension=", st.dimension())
+print("simplices=")
+for simplex_with_filtration in st.get_simplices():
+ print("(%s, %.2f)" % tuple(simplex_with_filtration))
+
st.initialize_filtration()
-print("filtration=", st.get_filtration())
+print("filtration=")
+for simplex_with_filtration in st.get_filtration():
+ print("(%s, %.2f)" % tuple(simplex_with_filtration))
+
print("filtration[1, 2]=", st.filtration([1, 2]))
print("filtration[4, 2]=", st.filtration([4, 2]))
diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py
index 246280de..cc3db467 100644
--- a/src/python/gudhi/persistence_graphical_tools.py
+++ b/src/python/gudhi/persistence_graphical_tools.py
@@ -5,6 +5,7 @@
# Copyright (C) 2016 Inria
#
# Modification(s):
+# - 2020/02 Theo Lacombe: Added more options for improved rendering and more flexibility.
# - YYYY/MM Author: Description of the modification
from os import path
@@ -14,7 +15,7 @@ import numpy as np
from gudhi.reader_utils import read_persistence_intervals_in_dimension
from gudhi.reader_utils import read_persistence_intervals_grouped_by_dimension
-__author__ = "Vincent Rouvreau, Bertrand Michel"
+__author__ = "Vincent Rouvreau, Bertrand Michel, Theo Lacombe"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
@@ -43,6 +44,19 @@ def __min_birth_max_death(persistence, band=0.0):
max_death += band
return (min_birth, max_death)
+
+def _array_handler(a):
+ '''
+ :param a: if array, assumes it is a (n x 2) np.array and return a
+ persistence-compatible list (padding with 0), so that the
+ plot can be performed seamlessly.
+ '''
+ if isinstance(a[0][1], np.float64) or isinstance(a[0][1], float):
+ return [[0, x] for x in a]
+ else:
+ return a
+
+
def plot_persistence_barcode(
persistence=[],
persistence_file="",
@@ -52,13 +66,16 @@ def plot_persistence_barcode(
inf_delta=0.1,
legend=False,
colormap=None,
- axes=None
+ axes=None,
+ fontsize=16,
):
"""This function plots the persistence bar code from persistence values list
+ , a np.array of shape (N x 2) (representing a diagram
+ in a single homology dimension),
or from a :doc:`persistence file <fileformats>`.
- :param persistence: Persistence intervals values list grouped by dimension.
- :type persistence: list of tuples(dimension, tuple(birth, death)).
+ :param persistence: Persistence intervals values list. Can be grouped by dimension or not.
+ :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death).
:param persistence_file: A :doc:`persistence file <fileformats>` style name
(reset persistence if both are set).
:type persistence_file: string
@@ -81,11 +98,19 @@ def plot_persistence_barcode(
:param axes: A matplotlib-like subplot axes. If None, the plot is drawn on
a new set of axes.
:type axes: `matplotlib.axes.Axes`
+ :param fontsize: Fontsize to use in axis.
+ :type fontsize: int
:returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
try:
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
+ from matplotlib import rc
+ plt.rc('text', usetex=True)
+ plt.rc('font', family='serif')
+
+
+ persistence = _array_handler(persistence)
if persistence_file != "":
if path.isfile(persistence_file):
@@ -163,7 +188,7 @@ def plot_persistence_barcode(
loc="lower right",
)
- axes.set_title("Persistence barcode")
+ axes.set_title("Persistence barcode", fontsize=fontsize)
# Ends plot on infinity value and starts a little bit before min_birth
axes.axis([axis_start, infinity, 0, ind])
@@ -183,13 +208,16 @@ def plot_persistence_diagram(
inf_delta=0.1,
legend=False,
colormap=None,
- axes=None
+ axes=None,
+ fontsize=16,
+ greyblock=True
):
"""This function plots the persistence diagram from persistence values
- list or from a :doc:`persistence file <fileformats>`.
+ list, a np.array of shape (N x 2) representing a diagram in a single
+ homology dimension, or from a :doc:`persistence file <fileformats>`.
- :param persistence: Persistence intervals values list grouped by dimension.
- :type persistence: list of tuples(dimension, tuple(birth, death)).
+ :param persistence: Persistence intervals values list. Can be grouped by dimension or not.
+ :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death).
:param persistence_file: A :doc:`persistence file <fileformats>` style name
(reset persistence if both are set).
:type persistence_file: string
@@ -214,11 +242,20 @@ def plot_persistence_diagram(
:param axes: A matplotlib-like subplot axes. If None, the plot is drawn on
a new set of axes.
:type axes: `matplotlib.axes.Axes`
+ :param fontsize: Fontsize to use in axis.
+ :type fontsize: int
+ :param greyblock: if we want to plot a grey patch on the lower half plane for nicer rendering. Default True.
+ :type greyblock: boolean
:returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
try:
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
+ from matplotlib import rc
+ plt.rc('text', usetex=True)
+ plt.rc('font', family='serif')
+
+ persistence = _array_handler(persistence)
if persistence_file != "":
if path.isfile(persistence_file):
@@ -256,19 +293,18 @@ def plot_persistence_diagram(
# Replace infinity values with max_death + delta for diagram to be more
# readable
infinity = max_death + delta
+ axis_end = max_death + delta / 2
axis_start = min_birth - delta
- # line display of equation : birth = death
- x = np.linspace(axis_start, infinity, 1000)
- # infinity line and text
- axes.plot(x, x, color="k", linewidth=1.0)
- axes.plot(x, [infinity] * len(x), linewidth=1.0, color="k", alpha=alpha)
- axes.text(axis_start, infinity, r"$\infty$", color="k", alpha=alpha)
# bootstrap band
if band > 0.0:
+ x = np.linspace(axis_start, infinity, 1000)
axes.fill_between(x, x, x + band, alpha=alpha, facecolor="red")
-
+ # lower diag patch
+ if greyblock:
+ axes.add_patch(mpatches.Polygon([[axis_start, axis_start], [axis_end, axis_start], [axis_end, axis_end]], fill=True, color='lightgrey'))
# Draw points in loop
+ pts_at_infty = False # Records presence of pts at infty
for interval in reversed(persistence):
if float(interval[1][1]) != float("inf"):
# Finite death case
@@ -279,10 +315,23 @@ def plot_persistence_diagram(
color=colormap[interval[0]],
)
else:
+ pts_at_infty = True
# Infinite death case for diagram to be nicer
axes.scatter(
interval[1][0], infinity, alpha=alpha, color=colormap[interval[0]]
)
+ if pts_at_infty:
+ # infinity line and text
+ axes.plot([axis_start, axis_end], [axis_start, axis_end], linewidth=1.0, color="k")
+ axes.plot([axis_start, axis_end], [infinity, infinity], linewidth=1.0, color="k", alpha=alpha)
+ # Infinity label
+ yt = axes.get_yticks()
+ yt = yt[np.where(yt < axis_end)] # to avoid ploting ticklabel higher than infinity
+ yt = np.append(yt, infinity)
+ ytl = ["%.3f" % e for e in yt] # to avoid float precision error
+ ytl[-1] = r'$+\infty$'
+ axes.set_yticks(yt)
+ axes.set_yticklabels(ytl)
if legend:
dimensions = list(set(item[0] for item in persistence))
@@ -293,11 +342,11 @@ def plot_persistence_diagram(
]
)
- axes.set_xlabel("Birth")
- axes.set_ylabel("Death")
+ axes.set_xlabel("Birth", fontsize=fontsize)
+ axes.set_ylabel("Death", fontsize=fontsize)
+ axes.set_title("Persistence diagram", fontsize=fontsize)
# Ends plot on infinity value and starts a little bit before min_birth
- axes.axis([axis_start, infinity, axis_start, infinity + delta])
- axes.set_title("Persistence diagram")
+ axes.axis([axis_start, axis_end, axis_start, infinity + delta/2])
return axes
except ImportError:
@@ -313,16 +362,22 @@ def plot_persistence_density(
dimension=None,
cmap=None,
legend=False,
- axes=None
+ axes=None,
+ fontsize=16,
+ greyblock=False
):
"""This function plots the persistence density from persistence
- values list or from a :doc:`persistence file <fileformats>`. Be
+ values list, np.array of shape (N x 2) representing a diagram
+ in a single homology dimension,
+ or from a :doc:`persistence file <fileformats>`. Be
aware that this function does not distinguish the dimension, it is
up to you to select the required one. This function also does not handle
degenerate data set (scipy correlation matrix inversion can fail).
- :param persistence: Persistence intervals values list grouped by dimension.
- :type persistence: list of tuples(dimension, tuple(birth, death)).
+ :param persistence: Persistence intervals values list.
+ Can be grouped by dimension or not.
+ :type persistence: an array of (dimension, array of (birth, death))
+ or an array of (birth, death).
:param persistence_file: A :doc:`persistence file <fileformats>`
style name (reset persistence if both are set).
:type persistence_file: string
@@ -355,11 +410,22 @@ def plot_persistence_density(
:param axes: A matplotlib-like subplot axes. If None, the plot is drawn on
a new set of axes.
:type axes: `matplotlib.axes.Axes`
+ :param fontsize: Fontsize to use in axis.
+ :type fontsize: int
+ :param greyblock: if we want to plot a grey patch on the lower half plane
+ for nicer rendering. Default False.
+ :type greyblock: boolean
:returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
try:
import matplotlib.pyplot as plt
+ import matplotlib.patches as mpatches
from scipy.stats import kde
+ from matplotlib import rc
+ plt.rc('text', usetex=True)
+ plt.rc('font', family='serif')
+
+ persistence = _array_handler(persistence)
if persistence_file != "":
if dimension is None:
@@ -418,12 +484,16 @@ def plot_persistence_density(
# Make the plot
img = axes.pcolormesh(xi, yi, zi.reshape(xi.shape), cmap=cmap)
+ if greyblock:
+ axes.add_patch(mpatches.Polygon([[birth.min(), birth.min()], [death.max(), birth.min()], [death.max(), death.max()]], fill=True, color='lightgrey'))
+
if legend:
plt.colorbar(img, ax=axes)
- axes.set_xlabel("Birth")
- axes.set_ylabel("Death")
- axes.set_title("Persistence density")
+ axes.set_xlabel("Birth", fontsize=fontsize)
+ axes.set_ylabel("Death", fontsize=fontsize)
+ axes.set_title("Persistence density", fontsize=fontsize)
+
return axes
except ImportError:
diff --git a/src/python/gudhi/point_cloud/__init__.py b/src/python/gudhi/point_cloud/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/python/gudhi/point_cloud/__init__.py
diff --git a/src/python/gudhi/point_cloud/timedelay.py b/src/python/gudhi/point_cloud/timedelay.py
new file mode 100644
index 00000000..f01df442
--- /dev/null
+++ b/src/python/gudhi/point_cloud/timedelay.py
@@ -0,0 +1,95 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Martin Royer, Yuichi Ike, Masatoshi Takenouchi
+#
+# Copyright (C) 2020 Inria, Copyright (C) 2020 Fujitsu Laboratories Ltd.
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+
+
+class TimeDelayEmbedding:
+ """Point cloud transformation class.
+ Embeds time-series data in the R^d according to [Takens' Embedding Theorem]
+ (https://en.wikipedia.org/wiki/Takens%27s_theorem) and obtains the
+ coordinates of each point.
+
+ Parameters
+ ----------
+ dim : int, optional (default=3)
+ `d` of R^d to be embedded.
+ delay : int, optional (default=1)
+ Time-Delay embedding.
+ skip : int, optional (default=1)
+ How often to skip embedded points.
+
+ Example
+ -------
+
+ Given delay=3 and skip=2, a point cloud which is obtained by embedding
+ a scalar time-series into R^3 is as follows::
+
+ time-series = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ point cloud = [[1, 4, 7],
+ [3, 6, 9]]
+
+ Given delay=1 and skip=1, a point cloud which is obtained by embedding
+ a 2D vector time-series data into R^4 is as follows::
+
+ time-series = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
+ point cloud = [[0, 1, 2, 3],
+ [2, 3, 4, 5],
+ [4, 5, 6, 7],
+ [6, 7, 8, 9]]
+ """
+
+ def __init__(self, dim=3, delay=1, skip=1):
+ self._dim = dim
+ self._delay = delay
+ self._skip = skip
+
+ def __call__(self, ts):
+ """Transform method for single time-series data.
+
+ Parameters
+ ----------
+ ts : Iterable[float] or Iterable[Iterable[float]]
+ A single time-series data, with scalar or vector values.
+
+ Returns
+ -------
+ point cloud : n x dim numpy arrays
+ Makes point cloud from a single time-series data.
+ """
+ return self._transform(np.array(ts))
+
+ def fit(self, ts, y=None):
+ return self
+
+ def _transform(self, ts):
+ """Guts of transform method."""
+ if ts.ndim == 1:
+ repeat = self._dim
+ else:
+ assert self._dim % ts.shape[1] == 0
+ repeat = self._dim // ts.shape[1]
+ end = len(ts) - self._delay * (repeat - 1)
+ short = np.arange(0, end, self._skip)
+ vertical = np.arange(0, repeat * self._delay, self._delay)
+ return ts[np.add.outer(short, vertical)].reshape(len(short), -1)
+
+ def transform(self, ts):
+ """Transform method for multiple time-series data.
+
+ Parameters
+ ----------
+ ts : Iterable[Iterable[float]] or Iterable[Iterable[Iterable[float]]]
+ Multiple time-series data, with scalar or vector values.
+
+ Returns
+ -------
+ point clouds : list of n x dim numpy arrays
+ Makes point cloud from each time-series data.
+ """
+ return [self._transform(np.array(s)) for s in ts]
diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd
index 96d14079..82f155de 100644
--- a/src/python/gudhi/simplex_tree.pxd
+++ b/src/python/gudhi/simplex_tree.pxd
@@ -21,6 +21,22 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
cdef cppclass Simplex_tree_options_full_featured:
pass
+ cdef cppclass Simplex_tree_simplex_handle "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>::Simplex_handle":
+ pass
+
+ cdef cppclass Simplex_tree_simplices_iterator "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>::Complex_simplex_iterator":
+ Simplex_tree_simplices_iterator()
+ Simplex_tree_simplex_handle& operator*()
+ Simplex_tree_simplices_iterator operator++()
+ bint operator!=(Simplex_tree_simplices_iterator)
+
+ cdef cppclass Simplex_tree_skeleton_iterator "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>::Skeleton_simplex_iterator":
+ Simplex_tree_skeleton_iterator()
+ Simplex_tree_simplex_handle& operator*()
+ Simplex_tree_skeleton_iterator operator++()
+ bint operator!=(Simplex_tree_skeleton_iterator)
+
+
cdef cppclass Simplex_tree_interface_full_featured "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>":
Simplex_tree()
double simplex_filtration(vector[int] simplex)
@@ -34,8 +50,6 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
bool find_simplex(vector[int] simplex)
bool insert_simplex_and_subfaces(vector[int] simplex,
double filtration)
- vector[pair[vector[int], double]] get_filtration()
- vector[pair[vector[int], double]] get_skeleton(int dimension)
vector[pair[vector[int], double]] get_star(vector[int] simplex)
vector[pair[vector[int], double]] get_cofaces(vector[int] simplex,
int dimension)
@@ -43,6 +57,14 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
void remove_maximal_simplex(vector[int] simplex)
bool prune_above_filtration(double filtration)
bool make_filtration_non_decreasing()
+ # Iterators over Simplex tree
+ pair[vector[int], double] get_simplex_and_filtration(Simplex_tree_simplex_handle f_simplex)
+ Simplex_tree_simplices_iterator get_simplices_iterator_begin()
+ Simplex_tree_simplices_iterator get_simplices_iterator_end()
+ vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_begin()
+ vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_end()
+ Simplex_tree_skeleton_iterator get_skeleton_iterator_begin(int dimension)
+ Simplex_tree_skeleton_iterator get_skeleton_iterator_end(int dimension)
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Simplex_tree<Gudhi::Simplex_tree_options_full_featured>>":
diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx
index b18627c4..c01cc905 100644
--- a/src/python/gudhi/simplex_tree.pyx
+++ b/src/python/gudhi/simplex_tree.pyx
@@ -7,6 +7,7 @@
# Modification(s):
# - YYYY/MM Author: Description of the modification
+from cython.operator import dereference, preincrement
from libc.stdint cimport intptr_t
from numpy import array as np_array
cimport simplex_tree
@@ -207,22 +208,34 @@ cdef class SimplexTree:
return self.get_ptr().insert_simplex_and_subfaces(csimplex,
<double>filtration)
- def get_filtration(self):
- """This function returns a list of all simplices with their given
+ def get_simplices(self):
+ """This function returns a generator with simplices and their given
filtration values.
+ :returns: The simplices.
+ :rtype: generator with tuples(simplex, filtration)
+ """
+ cdef Simplex_tree_simplices_iterator it = self.get_ptr().get_simplices_iterator_begin()
+ cdef Simplex_tree_simplices_iterator end = self.get_ptr().get_simplices_iterator_end()
+ cdef Simplex_tree_simplex_handle sh = dereference(it)
+
+ while it != end:
+ yield self.get_ptr().get_simplex_and_filtration(dereference(it))
+ preincrement(it)
+
+ def get_filtration(self):
+ """This function returns a generator with simplices and their given
+ filtration values sorted by increasing filtration values.
+
:returns: The simplices sorted by increasing filtration values.
- :rtype: list of tuples(simplex, filtration)
+ :rtype: generator with tuples(simplex, filtration)
"""
- cdef vector[pair[vector[int], double]] filtration \
- = self.get_ptr().get_filtration()
- ct = []
- for filtered_complex in filtration:
- v = []
- for vertex in filtered_complex.first:
- v.append(vertex)
- ct.append((v, filtered_complex.second))
- return ct
+ cdef vector[Simplex_tree_simplex_handle].const_iterator it = self.get_ptr().get_filtration_iterator_begin()
+ cdef vector[Simplex_tree_simplex_handle].const_iterator end = self.get_ptr().get_filtration_iterator_end()
+
+ while it != end:
+ yield self.get_ptr().get_simplex_and_filtration(dereference(it))
+ preincrement(it)
def get_skeleton(self, dimension):
"""This function returns the (simplices of the) skeleton of a maximum
@@ -233,15 +246,12 @@ cdef class SimplexTree:
:returns: The (simplices of the) skeleton of a maximum dimension.
:rtype: list of tuples(simplex, filtration)
"""
- cdef vector[pair[vector[int], double]] skeleton \
- = self.get_ptr().get_skeleton(<int>dimension)
- ct = []
- for filtered_simplex in skeleton:
- v = []
- for vertex in filtered_simplex.first:
- v.append(vertex)
- ct.append((v, filtered_simplex.second))
- return ct
+ cdef Simplex_tree_skeleton_iterator it = self.get_ptr().get_skeleton_iterator_begin(dimension)
+ cdef Simplex_tree_skeleton_iterator end = self.get_ptr().get_skeleton_iterator_end(dimension)
+
+ while it != end:
+ yield self.get_ptr().get_simplex_and_filtration(dereference(it))
+ preincrement(it)
def get_star(self, simplex):
"""This function returns the star of a given N-simplex.
diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py
index 13102094..3dd993f9 100644
--- a/src/python/gudhi/wasserstein.py
+++ b/src/python/gudhi/wasserstein.py
@@ -30,8 +30,10 @@ def _build_dist_matrix(X, Y, order=2., internal_p=2.):
:param order: exponent for the Wasserstein metric.
:param internal_p: Ground metric (i.e. norm L^p).
:returns: (n+1) x (m+1) np.array encoding the cost matrix C.
- For 1 <= i <= n, 1 <= j <= m, C[i,j] encodes the distance between X[i] and Y[j], while C[i, m+1] (resp. C[n+1, j]) encodes the distance (to the p) between X[i] (resp Y[j]) and its orthogonal proj onto the diagonal.
- note also that C[n+1, m+1] = 0 (it costs nothing to move from the diagonal to the diagonal).
+ For 0 <= i < n, 0 <= j < m, C[i,j] encodes the distance between X[i] and Y[j],
+ while C[i, m] (resp. C[n, j]) encodes the distance (to the p) between X[i] (resp Y[j])
+ and its orthogonal projection onto the diagonal.
+ note also that C[n, m] = 0 (it costs nothing to move from the diagonal to the diagonal).
'''
Xdiag = _proj_on_diag(X)
Ydiag = _proj_on_diag(Y)
@@ -62,14 +64,20 @@ def _perstot(X, order, internal_p):
return (np.sum(np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order))**(1./order)
-def wasserstein_distance(X, Y, order=2., internal_p=2.):
+def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.):
'''
- :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points
+ (i.e. with infinite coordinate).
:param Y: (m x 2) numpy.array encoding the second diagram.
+ :param matching: if True, computes and returns the optimal matching between X and Y, encoded as
+ a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to
+ the j-th point in Y, with the convention (-1) represents the diagonal.
:param order: exponent for Wasserstein; Default value is 2.
- :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm).
- :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with respect to the internal_p-norm as ground metric.
- :rtype: float
+ :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2);
+ Default value is 2 (Euclidean norm).
+ :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with
+ respect to the internal_p-norm as ground metric.
+ If matching is set to True, also returns the optimal matching between X and Y.
'''
n = len(X)
m = len(Y)
@@ -77,21 +85,40 @@ def wasserstein_distance(X, Y, order=2., internal_p=2.):
# handle empty diagrams
if X.size == 0:
if Y.size == 0:
- return 0.
+ if not matching:
+ return 0.
+ else:
+ return 0., np.array([])
else:
- return _perstot(Y, order, internal_p)
+ if not matching:
+ return _perstot(Y, order, internal_p)
+ else:
+ return _perstot(Y, order, internal_p), np.array([[-1, j] for j in range(m)])
elif Y.size == 0:
- return _perstot(X, order, internal_p)
+ if not matching:
+ return _perstot(X, order, internal_p)
+ else:
+ return _perstot(X, order, internal_p), np.array([[i, -1] for i in range(n)])
M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p)
- a = np.full(n+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here.
- a[-1] = a[-1] * m # normalized so that we have a probability measure, required by POT
- b = np.full(m+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here.
- b[-1] = b[-1] * n # so that we have a probability measure, required by POT
+ a = np.ones(n+1) # weight vector of the input diagram. Uniform here.
+ a[-1] = m
+ b = np.ones(m+1) # weight vector of the input diagram. Uniform here.
+ b[-1] = n
+
+ if matching:
+ P = ot.emd(a=a,b=b,M=M, numItermax=2000000)
+ ot_cost = np.sum(np.multiply(P,M))
+ P[-1, -1] = 0 # Remove matching corresponding to the diagonal
+ match = np.argwhere(P)
+ # Now we turn to -1 points encoding the diagonal
+ match[:,0][match[:,0] >= n] = -1
+ match[:,1][match[:,1] >= m] = -1
+ return ot_cost ** (1./order) , match
# Comptuation of the otcost using the ot.emd2 library.
# Note: it is the Wasserstein distance to the power q.
# The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value?
- ot_cost = (n+m) * ot.emd2(a, b, M, numItermax=2000000)
+ ot_cost = ot.emd2(a, b, M, numItermax=2000000)
return ot_cost ** (1./order)
diff --git a/src/python/include/Simplex_tree_interface.h b/src/python/include/Simplex_tree_interface.h
index 06f31341..4a7062d6 100644
--- a/src/python/include/Simplex_tree_interface.h
+++ b/src/python/include/Simplex_tree_interface.h
@@ -33,7 +33,10 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
using Simplex_handle = typename Base::Simplex_handle;
using Insertion_result = typename std::pair<Simplex_handle, bool>;
using Simplex = std::vector<Vertex_handle>;
- using Filtered_simplices = std::vector<std::pair<Simplex, Filtration_value>>;
+ using Simplex_and_filtration = std::pair<Simplex, Filtration_value>;
+ using Filtered_simplices = std::vector<Simplex_and_filtration>;
+ using Skeleton_simplex_iterator = typename Base::Skeleton_simplex_iterator;
+ using Complex_simplex_iterator = typename Base::Complex_simplex_iterator;
public:
bool find_simplex(const Simplex& vh) {
@@ -82,29 +85,12 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
Base::initialize_filtration();
}
- Filtered_simplices get_filtration() {
- Base::initialize_filtration();
- Filtered_simplices filtrations;
- for (auto f_simplex : Base::filtration_simplex_range()) {
- Simplex simplex;
- for (auto vertex : Base::simplex_vertex_range(f_simplex)) {
- simplex.insert(simplex.begin(), vertex);
- }
- filtrations.push_back(std::make_pair(simplex, Base::filtration(f_simplex)));
- }
- return filtrations;
- }
-
- Filtered_simplices get_skeleton(int dimension) {
- Filtered_simplices skeletons;
- for (auto f_simplex : Base::skeleton_simplex_range(dimension)) {
- Simplex simplex;
- for (auto vertex : Base::simplex_vertex_range(f_simplex)) {
- simplex.insert(simplex.begin(), vertex);
- }
- skeletons.push_back(std::make_pair(simplex, Base::filtration(f_simplex)));
+ Simplex_and_filtration get_simplex_and_filtration(Simplex_handle f_simplex) {
+ Simplex simplex;
+ for (auto vertex : Base::simplex_vertex_range(f_simplex)) {
+ simplex.insert(simplex.begin(), vertex);
}
- return skeletons;
+ return std::make_pair(std::move(simplex), Base::filtration(f_simplex));
}
Filtered_simplices get_star(const Simplex& simplex) {
@@ -135,6 +121,38 @@ class Simplex_tree_interface : public Simplex_tree<SimplexTreeOptions> {
Base::initialize_filtration();
pcoh = new Gudhi::Persistent_cohomology_interface<Base>(*this);
}
+
+ // Iterator over the simplex tree
+ Complex_simplex_iterator get_simplices_iterator_begin() {
+ // this specific case works because the range is just a pair of iterators - won't work if range was a vector
+ return Base::complex_simplex_range().begin();
+ }
+
+ Complex_simplex_iterator get_simplices_iterator_end() {
+ // this specific case works because the range is just a pair of iterators - won't work if range was a vector
+ return Base::complex_simplex_range().end();
+ }
+
+ typename std::vector<Simplex_handle>::const_iterator get_filtration_iterator_begin() {
+ // Base::initialize_filtration(); already performed in filtration_simplex_range
+ // this specific case works because the range is just a pair of iterators - won't work if range was a vector
+ return Base::filtration_simplex_range().begin();
+ }
+
+ typename std::vector<Simplex_handle>::const_iterator get_filtration_iterator_end() {
+ // this specific case works because the range is just a pair of iterators - won't work if range was a vector
+ return Base::filtration_simplex_range().end();
+ }
+
+ Skeleton_simplex_iterator get_skeleton_iterator_begin(int dimension) {
+ // this specific case works because the range is just a pair of iterators - won't work if range was a vector
+ return Base::skeleton_simplex_range(dimension).begin();
+ }
+
+ Skeleton_simplex_iterator get_skeleton_iterator_end(int dimension) {
+ // this specific case works because the range is just a pair of iterators - won't work if range was a vector
+ return Base::skeleton_simplex_range(dimension).end();
+ }
};
} // namespace Gudhi
diff --git a/src/python/test/test_alpha_complex.py b/src/python/test/test_alpha_complex.py
index 3761fe16..77121302 100755
--- a/src/python/test/test_alpha_complex.py
+++ b/src/python/test/test_alpha_complex.py
@@ -40,7 +40,7 @@ def test_infinite_alpha():
assert simplex_tree.num_simplices() == 11
assert simplex_tree.num_vertices() == 4
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
@@ -53,6 +53,7 @@ def test_infinite_alpha():
([0, 1, 2], 0.5),
([1, 2, 3], 0.5),
]
+
assert simplex_tree.get_star([0]) == [
([0], 0.0),
([0, 1], 0.25),
@@ -105,7 +106,7 @@ def test_filtered_alpha():
else:
assert False
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
diff --git a/src/python/test/test_euclidean_witness_complex.py b/src/python/test/test_euclidean_witness_complex.py
index c18d2484..f3664d39 100755
--- a/src/python/test/test_euclidean_witness_complex.py
+++ b/src/python/test/test_euclidean_witness_complex.py
@@ -40,7 +40,7 @@ def test_witness_complex():
assert landmarks[1] == euclidean_witness_complex.get_point(1)
assert landmarks[2] == euclidean_witness_complex.get_point(2)
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([0, 1], 0.0),
@@ -78,13 +78,13 @@ def test_strong_witness_complex():
assert landmarks[1] == euclidean_strong_witness_complex.get_point(1)
assert landmarks[2] == euclidean_strong_witness_complex.get_point(2)
- assert simplex_tree.get_filtration() == [([0], 0.0), ([1], 0.0), ([2], 0.0)]
+ assert list(simplex_tree.get_filtration()) == [([0], 0.0), ([1], 0.0), ([2], 0.0)]
simplex_tree = euclidean_strong_witness_complex.create_simplex_tree(
max_alpha_square=100.0
)
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
diff --git a/src/python/test/test_rips_complex.py b/src/python/test/test_rips_complex.py
index b02a68e1..b86e7498 100755
--- a/src/python/test/test_rips_complex.py
+++ b/src/python/test/test_rips_complex.py
@@ -32,7 +32,7 @@ def test_rips_from_points():
assert simplex_tree.num_simplices() == 10
assert simplex_tree.num_vertices() == 4
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
@@ -44,6 +44,7 @@ def test_rips_from_points():
([1, 2], 1.4142135623730951),
([0, 3], 1.4142135623730951),
]
+
assert simplex_tree.get_star([0]) == [
([0], 0.0),
([0, 1], 1.0),
@@ -95,7 +96,7 @@ def test_rips_from_distance_matrix():
assert simplex_tree.num_simplices() == 10
assert simplex_tree.num_vertices() == 4
- assert simplex_tree.get_filtration() == [
+ assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
@@ -107,6 +108,7 @@ def test_rips_from_distance_matrix():
([1, 2], 1.4142135623730951),
([0, 3], 1.4142135623730951),
]
+
assert simplex_tree.get_star([0]) == [
([0], 0.0),
([0, 1], 1.0),
diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py
index 1822c43b..f7848379 100755
--- a/src/python/test/test_simplex_tree.py
+++ b/src/python/test/test_simplex_tree.py
@@ -55,7 +55,7 @@ def test_insertion():
assert st.filtration([1]) == 0.0
# skeleton test
- assert st.get_skeleton(2) == [
+ assert list(st.get_skeleton(2)) == [
([0, 1, 2], 4.0),
([0, 1], 0.0),
([0, 2], 4.0),
@@ -64,7 +64,7 @@ def test_insertion():
([1], 0.0),
([2], 4.0),
]
- assert st.get_skeleton(1) == [
+ assert list(st.get_skeleton(1)) == [
([0, 1], 0.0),
([0, 2], 4.0),
([0], 0.0),
@@ -72,12 +72,12 @@ def test_insertion():
([1], 0.0),
([2], 4.0),
]
- assert st.get_skeleton(0) == [([0], 0.0), ([1], 0.0), ([2], 4.0)]
+ assert list(st.get_skeleton(0)) == [([0], 0.0), ([1], 0.0), ([2], 4.0)]
# remove_maximal_simplex test
assert st.get_cofaces([0, 1, 2], 1) == []
st.remove_maximal_simplex([0, 1, 2])
- assert st.get_skeleton(2) == [
+ assert list(st.get_skeleton(2)) == [
([0, 1], 0.0),
([0, 2], 4.0),
([0], 0.0),
@@ -126,7 +126,8 @@ def test_expansion():
assert st.num_vertices() == 7
assert st.num_simplices() == 17
- assert st.get_filtration() == [
+
+ assert list(st.get_filtration()) == [
([2], 0.1),
([3], 0.1),
([2, 3], 0.1),
@@ -151,7 +152,7 @@ def test_expansion():
assert st.num_simplices() == 22
st.initialize_filtration()
- assert st.get_filtration() == [
+ assert list(st.get_filtration()) == [
([2], 0.1),
([3], 0.1),
([2, 3], 0.1),
@@ -248,3 +249,15 @@ def test_make_filtration_non_decreasing():
assert st.filtration([3, 4, 5]) == 2.0
assert st.filtration([3, 4]) == 2.0
assert st.filtration([4, 5]) == 2.0
+
+def test_simplices_iterator():
+ st = SimplexTree()
+
+ assert st.insert([0, 1, 2], filtration=4.0) == True
+ assert st.insert([2, 3, 4], filtration=2.0) == True
+
+ for simplex in st.get_simplices():
+ print("simplex is: ", simplex[0])
+ assert st.find(simplex[0]) == True
+ print("filtration is: ", simplex[1])
+ assert st.filtration(simplex[0]) == simplex[1]
diff --git a/src/python/test/test_tangential_complex.py b/src/python/test/test_tangential_complex.py
index e650e99c..8668a2e0 100755
--- a/src/python/test/test_tangential_complex.py
+++ b/src/python/test/test_tangential_complex.py
@@ -37,7 +37,7 @@ def test_tangential():
assert st.num_simplices() == 6
assert st.num_vertices() == 4
- assert st.get_filtration() == [
+ assert list(st.get_filtration()) == [
([0], 0.0),
([1], 0.0),
([2], 0.0),
@@ -45,6 +45,7 @@ def test_tangential():
([3], 0.0),
([1, 3], 0.0),
]
+
assert st.get_cofaces([0], 1) == [([0, 2], 0.0)]
assert point_list[0] == tc.get_point(0)
diff --git a/src/python/test/test_time_delay.py b/src/python/test/test_time_delay.py
new file mode 100755
index 00000000..1ead9bca
--- /dev/null
+++ b/src/python/test/test_time_delay.py
@@ -0,0 +1,43 @@
+from gudhi.point_cloud.timedelay import TimeDelayEmbedding
+import numpy as np
+
+
+def test_normal():
+ # Sample array
+ ts = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ # Normal case.
+ prep = TimeDelayEmbedding()
+ pointclouds = prep(ts)
+ assert (pointclouds[0] == np.array([1, 2, 3])).all()
+ assert (pointclouds[1] == np.array([2, 3, 4])).all()
+ assert (pointclouds[2] == np.array([3, 4, 5])).all()
+ assert (pointclouds[3] == np.array([4, 5, 6])).all()
+ assert (pointclouds[4] == np.array([5, 6, 7])).all()
+ assert (pointclouds[5] == np.array([6, 7, 8])).all()
+ assert (pointclouds[6] == np.array([7, 8, 9])).all()
+ assert (pointclouds[7] == np.array([8, 9, 10])).all()
+ # Delay = 3
+ prep = TimeDelayEmbedding(delay=3)
+ pointclouds = prep(ts)
+ assert (pointclouds[0] == np.array([1, 4, 7])).all()
+ assert (pointclouds[1] == np.array([2, 5, 8])).all()
+ assert (pointclouds[2] == np.array([3, 6, 9])).all()
+ assert (pointclouds[3] == np.array([4, 7, 10])).all()
+ # Skip = 3
+ prep = TimeDelayEmbedding(skip=3)
+ pointclouds = prep(ts)
+ assert (pointclouds[0] == np.array([1, 2, 3])).all()
+ assert (pointclouds[1] == np.array([4, 5, 6])).all()
+ assert (pointclouds[2] == np.array([7, 8, 9])).all()
+ # Delay = 2 / Skip = 2
+ prep = TimeDelayEmbedding(delay=2, skip=2)
+ pointclouds = prep(ts)
+ assert (pointclouds[0] == np.array([1, 3, 5])).all()
+ assert (pointclouds[1] == np.array([3, 5, 7])).all()
+ assert (pointclouds[2] == np.array([5, 7, 9])).all()
+
+ # Vector series
+ ts = np.arange(0, 10).reshape(-1, 2)
+ prep = TimeDelayEmbedding(dim=4)
+ prep.fit([ts])
+ assert (prep.transform([ts])[0] == [[0, 1, 2, 3], [2, 3, 4, 5], [4, 5, 6, 7], [6, 7, 8, 9]]).all()
diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py
index 6a6b217b..0d70e11a 100755
--- a/src/python/test/test_wasserstein_distance.py
+++ b/src/python/test/test_wasserstein_distance.py
@@ -17,7 +17,7 @@ __author__ = "Theo Lacombe"
__copyright__ = "Copyright (C) 2019 Inria"
__license__ = "MIT"
-def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True):
+def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_matching=True):
diag1 = np.array([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]])
diag2 = np.array([[2.8, 4.45], [9.5, 14.1]])
diag3 = np.array([[0, 2], [4, 6]])
@@ -51,14 +51,27 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True):
assert wasserstein_distance(diag3, diag4, internal_p=1., order=2.) == approx(np.sqrt(5))
assert wasserstein_distance(diag3, diag4, internal_p=4.5, order=2.) == approx(np.sqrt(5))
- if(not test_infinity):
- return
+ if test_infinity:
+ diag5 = np.array([[0, 3], [4, np.inf]])
+ diag6 = np.array([[7, 8], [4, 6], [3, np.inf]])
- diag5 = np.array([[0, 3], [4, np.inf]])
- diag6 = np.array([[7, 8], [4, 6], [3, np.inf]])
+ assert wasserstein_distance(diag4, diag5) == np.inf
+ assert wasserstein_distance(diag5, diag6, order=1, internal_p=np.inf) == approx(4.)
+
+
+ if test_matching:
+ match = wasserstein_distance(emptydiag, emptydiag, matching=True, internal_p=1., order=2)[1]
+ assert np.array_equal(match, [])
+ match = wasserstein_distance(emptydiag, emptydiag, matching=True, internal_p=np.inf, order=2.24)[1]
+ assert np.array_equal(match, [])
+ match = wasserstein_distance(emptydiag, diag2, matching=True, internal_p=np.inf, order=2.)[1]
+ assert np.array_equal(match , [[-1, 0], [-1, 1]])
+ match = wasserstein_distance(diag2, emptydiag, matching=True, internal_p=np.inf, order=2.24)[1]
+ assert np.array_equal(match , [[0, -1], [1, -1]])
+ match = wasserstein_distance(diag1, diag2, matching=True, internal_p=2., order=2.)[1]
+ assert np.array_equal(match, [[0, 0], [1, 1], [2, -1]])
+
- assert wasserstein_distance(diag4, diag5) == np.inf
- assert wasserstein_distance(diag5, diag6, order=1, internal_p=np.inf) == approx(4.)
def hera_wrap(delta):
def fun(*kargs,**kwargs):
@@ -66,8 +79,8 @@ def hera_wrap(delta):
return fun
def test_wasserstein_distance_pot():
- _basic_wasserstein(pot, 1e-15, test_infinity=False)
+ _basic_wasserstein(pot, 1e-15, test_infinity=False, test_matching=True)
def test_wasserstein_distance_hera():
- _basic_wasserstein(hera_wrap(1e-12), 1e-12)
- _basic_wasserstein(hera_wrap(.1), .1)
+ _basic_wasserstein(hera_wrap(1e-12), 1e-12, test_matching=False)
+ _basic_wasserstein(hera_wrap(.1), .1, test_matching=False)