summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHind-M <hind.montassif@gmail.com>2022-01-31 11:15:54 +0100
committerHind-M <hind.montassif@gmail.com>2022-01-31 11:15:54 +0100
commit1209db091a89ed48527c34fff0cc9ef41c78d11f (patch)
treee34a11454579e27ad3c711b50a46c5c9670a3e54
parent8d1e7aeb3416194d00f45587d1ecea85ba218028 (diff)
parent7f1b8eb706c72921141b53e607d6e2aa28e2bf19 (diff)
Merge remote-tracking branch 'upstream/master' into fetch_datasets
-rw-r--r--.appveyor.yml80
-rw-r--r--.github/for_maintainers/tests_strategy.md28
-rw-r--r--.github/next_release.md8
-rw-r--r--CMakeLists.txt4
-rw-r--r--README.md3
-rw-r--r--azure-pipelines.yml75
-rw-r--r--src/Alpha_complex/include/gudhi/Alpha_complex.h12
-rw-r--r--src/Alpha_complex/test/Alpha_complex_unit_test.cpp15
-rw-r--r--src/CMakeLists.txt4
-rw-r--r--src/cmake/modules/GUDHI_modules.cmake7
-rw-r--r--src/cmake/modules/GUDHI_options.cmake6
-rw-r--r--src/cmake/modules/GUDHI_third_party_libraries.cmake175
-rw-r--r--src/python/CMakeLists.txt27
-rw-r--r--src/python/doc/alpha_complex_ref.rst1
-rw-r--r--src/python/doc/alpha_complex_sum.inc24
-rw-r--r--src/python/doc/alpha_complex_user.rst104
-rwxr-xr-xsrc/python/example/alpha_complex_diagram_persistence_from_off_file_example.py55
-rwxr-xr-xsrc/python/example/alpha_rips_persistence_bottleneck_distance.py110
-rwxr-xr-xsrc/python/example/plot_alpha_complex.py5
-rw-r--r--src/python/gudhi/alpha_complex.pyx62
-rw-r--r--src/python/gudhi/representations/vector_methods.py159
-rw-r--r--src/python/include/Alpha_complex_factory.h118
-rw-r--r--src/python/include/Alpha_complex_interface.h52
-rwxr-xr-xsrc/python/test/test_alpha_complex.py152
-rwxr-xr-xsrc/python/test/test_betti_curve_representations.py59
-rwxr-xr-xsrc/python/test/test_reader_utils.py33
-rwxr-xr-xsrc/python/test/test_representations.py3
27 files changed, 806 insertions, 575 deletions
diff --git a/.appveyor.yml b/.appveyor.yml
deleted file mode 100644
index af280955..00000000
--- a/.appveyor.yml
+++ /dev/null
@@ -1,80 +0,0 @@
-image:
- - Visual Studio 2019
-
-build:
- parallel: true
- verbosity: detailed
-
-configuration:
- - Release
-
-environment:
- # update the vcpkg cache even if build fails
- # APPVEYOR_SAVE_CACHE_ON_ERROR: true
- PYTHON: "C:\\Python39-x64"
- PYTHONPATH: "C:\\Python39-x64\\lib\\site-packages"
- CMAKE_VCPKG_FLAGS: -DVCPKG_TARGET_TRIPLET=x64-windows -DCMAKE_TOOLCHAIN_FILE=c:\Tools\vcpkg\scripts\buildsystems\vcpkg.cmake
-
- matrix:
- - target: Examples
- CMAKE_FLAGS: -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=OFF
-
- - target: UnitaryTests
- CMAKE_FLAGS: -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=OFF
-
- - target: Utilities
- CMAKE_FLAGS: -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=OFF
-
- - target: Python
- CMAKE_FLAGS: -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=ON -DWITH_GUDHI_REMOTE_TEST=ON
-
-
-#cache:
-# - c:\Tools\vcpkg\installed
-# - '%LOCALAPPDATA%\pip\Cache'
-
-init:
- - echo %target%
-
-install:
- - git submodule update --init
- - vcpkg update
- - vcpkg remove --outdated
- - vcpkg upgrade --no-dry-run
- - vcpkg install boost-filesystem:x64-windows boost-test:x64-windows boost-program-options:x64-windows tbb:x64-windows eigen3:x64-windows cgal:x64-windows
- - dir "C:\Tools\vcpkg\installed\x64-windows\bin\"
- - vcpkg integrate install
- - CALL "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" amd64
- - "set PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
- - python --version
- - pip --version
- - python -m pip install --upgrade pip
- - python -m pip install --upgrade setuptools
- - python -m pip install -r ext\gudhi-deploy\build-requirements.txt
- # No PyKeOps on windows, let's workaround this one.
- - for /F "tokens=*" %%A in (ext\gudhi-deploy\test-requirements.txt) do python -m pip install %%A
- - dir "c:\python39-x64\lib\site-packages"
- - dir "%LOCALAPPDATA%\pip\Cache"
- - python -c "from scipy import spatial; print(spatial.cKDTree)"
-
-build_script:
- - mkdir build
- - cd build
- - cmake -G "Visual Studio 16 2019" -A x64 -DCMAKE_BUILD_TYPE=Release %CMAKE_FLAGS% %CMAKE_VCPKG_FLAGS% ..
- - if [%target%]==[Python] (
- cd src\python &
- dir . &
- type setup.py &
- copy "C:\Tools\vcpkg\installed\x64-windows\bin\mpfr-6.dll" ".\gudhi\" &
- copy "C:\Tools\vcpkg\installed\x64-windows\bin\gmp.dll" ".\gudhi\" &
- copy "C:\Tools\vcpkg\installed\x64-windows\bin\tbb.dll" ".\gudhi\" &
- copy "C:\Tools\vcpkg\installed\x64-windows\bin\tbbmalloc.dll" ".\gudhi\" &
- python setup.py build_ext --inplace &
- SET PYTHONPATH=%CD%;%PYTHONPATH% &
- echo %PYTHONPATH% &
- ctest -j 1 --output-on-failure -C Release
- ) else (
- dir . &
- MSBuild GUDHIdev.sln /m /p:Configuration=Release /p:Platform=x64 &
- ctest -j 1 --output-on-failure -C Release -E diff_files
- )
diff --git a/.github/for_maintainers/tests_strategy.md b/.github/for_maintainers/tests_strategy.md
index 2bba3f42..338d4282 100644
--- a/.github/for_maintainers/tests_strategy.md
+++ b/.github/for_maintainers/tests_strategy.md
@@ -8,7 +8,7 @@ The aim is to help maintainers to anticipate third parties modifications, update
### Linux
-As all the third parties are already installed (thanks to docker), the compilations has been separated by categories to be parallelized:
+As all the third parties are already installed (thanks to docker), the compilations have been separated in categories to be parallelized:
* examples (C++)
* tests (C++)
@@ -25,9 +25,9 @@ Without CGAL, and, with or without Eigen builds are performed inside the docker
#### Update docker images
-C++ third parties installation are done thanks to apt on Ubuntu latest LTS.
+C++ third parties installation is done thanks to apt on Ubuntu latest LTS.
-Docker images need to be rebuild and push each time `.github/build-requirements`, `.github/test-requirements`, when a new third party is added, when a new CGAL version improves gudhi performances, ...
+Docker images need to be rebuilt and pushed each time `.github/build-requirements`, `.github/test-requirements`, when a new third party is added, when a new CGAL version improves gudhi performances, ...
```bash
docker build -f Dockerfile_for_circleci_image -t gudhi/ci_for_gudhi:latest .
@@ -39,22 +39,24 @@ docker push gudhi/ci_for_gudhi_wo_cgal:latest
### Windows
-The compilations has been separated by categories to be parallelized, but I don't know why builds are not run in parallel:
+The compilations are not parallelized, as installation time (about 30 minutes) is too much compared to
+build and tests timings (about 30 minutes). Builds and tests include:
* examples (C++)
* tests (C++)
* utils (C++)
* python (here the WITH_GUDHI_REMOTE_TEST option is enabled which adds datasets fetching test)
-Doxygen (C++) is not tested.
-(cf. `.appveyor.yml`)
+Doxygen (C++) is not generated.
+(cf. `azure-pipelines.yml`)
-C++ third parties installation are done thanks to [vcpkg](https://github.com/microsoft/vcpkg/).
-In case of installation issue, check in [vcpkg issues](https://github.com/microsoft/vcpkg/issues).
+C++ third parties installation is done thanks to [vcpkg](https://github.com/microsoft/vcpkg/).
+In case of an installation issue, check in [vcpkg issues](https://github.com/microsoft/vcpkg/issues).
### OSx
-The compilations has been separated by categories to be parallelized:
+The compilations are not parallelized, but they should, as installation time (about 4 minutes) is
+negligible compared to build and tests timings (about 30 minutes). Builds and tests include:
* examples (C++)
* tests (C++)
@@ -64,8 +66,8 @@ The compilations has been separated by categories to be parallelized:
(cf. `azure-pipelines.yml`)
-C++ third parties installation are done thanks to [brew](https://formulae.brew.sh/formula/).
-In case of installation issue, check in formula issues.
+C++ third parties installation is done thanks to [brew](https://formulae.brew.sh/formula/).
+In case of an installation issue, check in formula issues.
## Pip packaging
@@ -78,9 +80,9 @@ Only the Linux pip package is based on a docker image (`gudhi/pip_for_gudhi` bas
### Update docker image
-C++ third parties installation are done thanks to yum on an image based on `quay.io/pypa/manylinux2014_x86_64`.
+C++ third parties installation is done thanks to yum on an image based on `quay.io/pypa/manylinux2014_x86_64`.
-Docker image need to be rebuild and push each time `.github/build-requirements`, when a new third party is added, when a new CGAL version improves gudhi performances, ...
+Docker image needs to be rebuilt and pushed each time `.github/build-requirements`, when a new third party is added, when a new CGAL version improves gudhi performances, ...
As `.github/test-requirements` is not installed, no need to rebuild image when this file is modified.
```bash
diff --git a/.github/next_release.md b/.github/next_release.md
index 0a8b2bbb..65a92816 100644
--- a/.github/next_release.md
+++ b/.github/next_release.md
@@ -6,8 +6,12 @@ We are now using GitHub to develop the GUDHI library, do not hesitate to [fork t
Below is a list of changes made since GUDHI 3.5.0:
-- [Module](link)
- - ...
+- [Alpha complex](https://gudhi.inria.fr/python/latest/alpha_complex_user.html)
+ - the python weighted version for alpha complex is now available in any dimension D.
+ - `alpha_complex = gudhi.AlphaComplex(off_file='/data/points/tore3D_300.off')` is deprecated, please use [read_points_from_off_file](https://gudhi.inria.fr/python/latest/point_cloud.html#gudhi.read_points_from_off_file) instead.
+
+- [Representations](https://gudhi.inria.fr/python/latest/representations.html#gudhi.representations.vector_methods.BettiCurve)
+ - A more flexible Betti curve class capable of computing exact curves
- [Module](link)
- ...
diff --git a/CMakeLists.txt b/CMakeLists.txt
index d0cf6a25..ac877eea 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -2,9 +2,9 @@ cmake_minimum_required(VERSION 3.5)
project(GUDHIdev)
-include(CMakeGUDHIVersion.txt)
-
list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/src/cmake/modules/")
+include(CMakeGUDHIVersion.txt)
+include(GUDHI_options)
# Reset cache
set(GUDHI_MODULES "" CACHE INTERNAL "GUDHI_MODULES")
diff --git a/README.md b/README.md
index 279953e1..664483ee 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,6 @@
-[![OSx on Azure](https://dev.azure.com/GUDHI/gudhi-devel/_apis/build/status/GUDHI.gudhi-devel?branchName=master)](https://dev.azure.com/GUDHI/gudhi-devel/_build/latest?definitionId=1&branchName=master)
+[![OSx and Win on Azure](https://dev.azure.com/GUDHI/gudhi-devel/_apis/build/status/GUDHI.gudhi-devel?branchName=master)](https://dev.azure.com/GUDHI/gudhi-devel/_build/latest?definitionId=1&branchName=master)
[![Linux on CircleCI](https://circleci.com/gh/GUDHI/gudhi-devel/tree/master.svg?style=svg)](https://circleci.com/gh/GUDHI/gudhi-devel/tree/master)
-[![Win on Appveyor](https://ci.appveyor.com/api/projects/status/976j2uut8xgalvx2/branch/master?svg=true)](https://ci.appveyor.com/project/GUDHI/gudhi-devel/branch/master)
[![Anaconda Cloud](https://anaconda.org/conda-forge/gudhi/badges/version.svg)](https://anaconda.org/conda-forge/gudhi)
[![Anaconda downloads](https://anaconda.org/conda-forge/gudhi/badges/downloads.svg)](https://anaconda.org/conda-forge/gudhi)
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 25f61ce7..05c88289 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -1,26 +1,26 @@
jobs:
- - job: 'Test'
- displayName: "Build and test"
+ - job: 'OSx'
+ displayName: "Build and test OSx"
timeoutInMinutes: 0
cancelTimeoutInMinutes: 60
pool:
vmImage: macOS-10.15
variables:
- pythonVersion: '3.6'
+ pythonVersion: '3.7'
cmakeBuildType: Release
steps:
- - bash: echo "##vso[task.prependpath]$CONDA/bin"
- displayName: Add conda to PATH
-
- - bash: sudo conda create --yes --quiet --name gudhi_build_env
- displayName: Create Anaconda environment
-
+ # Use a specific Python version
+ - task: UsePythonVersion@0
+ displayName: Use Python $(pythonVersion)
+ inputs:
+ versionSpec: $(pythonVersion)
+ addToPath: true
+ architecture: 'x64'
+
- bash: |
- source activate gudhi_build_env
git submodule update --init
- sudo conda install --yes --quiet --name gudhi_build_env python=$(pythonVersion)
python -m pip install --user -r ext/gudhi-deploy/build-requirements.txt
python -m pip install --user -r ext/gudhi-deploy/test-requirements.txt
python -m pip uninstall -y pykeops
@@ -28,11 +28,56 @@ jobs:
brew install graphviz doxygen boost eigen gmp mpfr tbb cgal || true
displayName: 'Install build dependencies'
- bash: |
- source activate gudhi_build_env
mkdir build
cd build
- cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 -DWITH_GUDHI_REMOTE_TEST=ON ..
- make -j 4
+ cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DWITH_GUDHI_REMOTE_TEST=ON ..
+ make
make doxygen
- ctest -j 4 --output-on-failure # -E sphinx remove sphinx build as it fails
+ ctest --output-on-failure
displayName: 'Build, test and documentation generation'
+
+ - job: 'Windows'
+ displayName: "Build and test Windows"
+ timeoutInMinutes: 0
+ cancelTimeoutInMinutes: 60
+ pool:
+ vmImage: windows-latest
+ variables:
+ pythonVersion: '3.7'
+ cmakeVcpkgFlags: -DVCPKG_TARGET_TRIPLET=x64-windows -DCMAKE_TOOLCHAIN_FILE=c:\vcpkg\scripts\buildsystems\vcpkg.cmake
+ cmakeFlags: -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=OFF
+
+ steps:
+ # Use a specific Python version
+ - task: UsePythonVersion@0
+ displayName: Use Python $(pythonVersion)
+ inputs:
+ versionSpec: $(pythonVersion)
+ addToPath: true
+ architecture: 'x64'
+
+ - script: |
+ git submodule update --init
+ python -m pip install --user -r ext/gudhi-deploy/build-requirements.txt
+ # No PyKeOps on windows, let's workaround this one.
+ for /F "tokens=*" %%A in (ext\gudhi-deploy\test-requirements.txt) do python -m pip install %%A
+ vcpkg install boost-filesystem:x64-windows boost-test:x64-windows boost-program-options:x64-windows tbb:x64-windows eigen3:x64-windows cgal:x64-windows
+ displayName: 'Install build dependencies'
+ - script: |
+ call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64
+ mkdir build
+ cd build
+ cmake -G "Visual Studio 16 2019" -A x64 -DCMAKE_BUILD_TYPE=Release $(cmakeVcpkgFlags) $(cmakeFlags) ..
+ MSBuild GUDHIdev.sln /m /p:Configuration=Release /p:Platform=x64
+ ctest --output-on-failure -C Release -E diff_files
+ cmake -DWITH_GUDHI_PYTHON=ON -DWITH_GUDHI_REMOTE_TEST=ON .
+ cd src\python
+ copy "C:\vcpkg\installed\x64-windows\bin\mpfr-6.dll" ".\gudhi\"
+ copy "C:\vcpkg\installed\x64-windows\bin\gmp.dll" ".\gudhi\"
+ copy "C:\vcpkg\installed\x64-windows\bin\tbb.dll" ".\gudhi\"
+ copy "C:\vcpkg\installed\x64-windows\bin\tbbmalloc.dll" ".\gudhi\"
+ python setup.py build_ext --inplace
+ SET PYTHONPATH=%CD%;%PYTHONPATH%
+ echo %PYTHONPATH%
+ ctest --output-on-failure -C Release
+ displayName: 'Build and test'
diff --git a/src/Alpha_complex/include/gudhi/Alpha_complex.h b/src/Alpha_complex/include/gudhi/Alpha_complex.h
index e03bb161..028ec9bb 100644
--- a/src/Alpha_complex/include/gudhi/Alpha_complex.h
+++ b/src/Alpha_complex/include/gudhi/Alpha_complex.h
@@ -20,6 +20,7 @@
#include <stdlib.h>
#include <math.h> // isnan, fmax
#include <memory> // for std::unique_ptr
+#include <cstddef> // for std::size_t
#include <CGAL/Delaunay_triangulation.h>
#include <CGAL/Regular_triangulation.h> // aka. Weighted Delaunay triangulation
@@ -213,6 +214,15 @@ class Alpha_complex {
Alpha_complex (Alpha_complex&& other) = delete;
Alpha_complex& operator= (Alpha_complex&& other) = delete;
+ /** \brief Returns the number of finite vertices in the triangulation.
+ */
+ std::size_t num_vertices() const {
+ if (triangulation_ == nullptr)
+ return 0;
+ else
+ return triangulation_->number_of_vertices();
+ }
+
/** \brief get_point returns the point corresponding to the vertex given as parameter.
*
* @param[in] vertex Vertex handle of the point to retrieve.
@@ -373,7 +383,7 @@ class Alpha_complex {
// --------------------------------------------------------------------------------------------
// Simplex_tree construction from loop on triangulation finite full cells list
- if (triangulation_->number_of_vertices() > 0) {
+ if (num_vertices() > 0) {
for (auto cit = triangulation_->finite_full_cells_begin();
cit != triangulation_->finite_full_cells_end();
++cit) {
diff --git a/src/Alpha_complex/test/Alpha_complex_unit_test.cpp b/src/Alpha_complex/test/Alpha_complex_unit_test.cpp
index 4b37e4bd..f74ad217 100644
--- a/src/Alpha_complex/test/Alpha_complex_unit_test.cpp
+++ b/src/Alpha_complex/test/Alpha_complex_unit_test.cpp
@@ -56,6 +56,9 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_OFF_file, TestedKernel, list_of
Gudhi::Simplex_tree<> simplex_tree_60;
BOOST_CHECK(alpha_complex_from_file.create_complex(simplex_tree_60, max_alpha_square_value));
+ std::clog << "alpha_complex_from_file.num_vertices()=" << alpha_complex_from_file.num_vertices() << std::endl;
+ BOOST_CHECK(alpha_complex_from_file.num_vertices() == 7);
+
std::clog << "simplex_tree_60.dimension()=" << simplex_tree_60.dimension() << std::endl;
BOOST_CHECK(simplex_tree_60.dimension() == 2);
@@ -72,6 +75,9 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_OFF_file, TestedKernel, list_of
Gudhi::Simplex_tree<> simplex_tree_59;
BOOST_CHECK(alpha_complex_from_file.create_complex(simplex_tree_59, max_alpha_square_value));
+ std::clog << "alpha_complex_from_file.num_vertices()=" << alpha_complex_from_file.num_vertices() << std::endl;
+ BOOST_CHECK(alpha_complex_from_file.num_vertices() == 7);
+
std::clog << "simplex_tree_59.dimension()=" << simplex_tree_59.dimension() << std::endl;
BOOST_CHECK(simplex_tree_59.dimension() == 2);
@@ -120,6 +126,9 @@ BOOST_AUTO_TEST_CASE(Alpha_complex_from_points) {
Gudhi::Simplex_tree<> simplex_tree;
BOOST_CHECK(alpha_complex_from_points.create_complex(simplex_tree));
+ std::clog << "alpha_complex_from_points.num_vertices()=" << alpha_complex_from_points.num_vertices() << std::endl;
+ BOOST_CHECK(alpha_complex_from_points.num_vertices() == points.size());
+
// Another way to check num_simplices
std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" << std::endl;
int num_simplices = 0;
@@ -240,6 +249,9 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_from_empty_points, TestedKernel, lis
// ----------------------------------------------------------------------------
Gudhi::alpha_complex::Alpha_complex<TestedKernel> alpha_complex_from_points(points);
+ std::clog << "alpha_complex_from_points.num_vertices()=" << alpha_complex_from_points.num_vertices() << std::endl;
+ BOOST_CHECK(alpha_complex_from_points.num_vertices() == points.size());
+
// Test to the limit
BOOST_CHECK_THROW (alpha_complex_from_points.get_point(0), std::out_of_range);
@@ -291,6 +303,9 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(Alpha_complex_with_duplicated_points, TestedKernel
std::clog << "create_complex" << std::endl;
BOOST_CHECK(alpha_complex_from_points.create_complex(simplex_tree));
+ std::clog << "alpha_complex_from_points.num_vertices()=" << alpha_complex_from_points.num_vertices() << std::endl;
+ BOOST_CHECK(alpha_complex_from_points.num_vertices() < points.size());
+
std::clog << "simplex_tree.num_vertices()=" << simplex_tree.num_vertices()
<< std::endl;
BOOST_CHECK(simplex_tree.num_vertices() < points.size());
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 8f6a1ccc..8023e04c 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -2,9 +2,9 @@ cmake_minimum_required(VERSION 3.5)
project(GUDHI)
-include(CMakeGUDHIVersion.txt)
-
list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/modules/")
+include(CMakeGUDHIVersion.txt)
+include(GUDHI_options)
set(GUDHI_MODULES "" CACHE INTERNAL "GUDHI_MODULES")
set(GUDHI_MISSING_MODULES "" CACHE INTERNAL "GUDHI_MISSING_MODULES")
diff --git a/src/cmake/modules/GUDHI_modules.cmake b/src/cmake/modules/GUDHI_modules.cmake
index cbed6351..13248f7e 100644
--- a/src/cmake/modules/GUDHI_modules.cmake
+++ b/src/cmake/modules/GUDHI_modules.cmake
@@ -17,13 +17,6 @@ function(add_gudhi_module file_path)
endfunction(add_gudhi_module)
-option(WITH_GUDHI_BENCHMARK "Activate/deactivate benchmark compilation" OFF)
-option(WITH_GUDHI_EXAMPLE "Activate/deactivate examples compilation" OFF)
-option(WITH_GUDHI_REMOTE_TEST "Activate/deactivate datasets fetching test which uses the Internet" OFF)
-option(WITH_GUDHI_PYTHON "Activate/deactivate python module compilation and installation" ON)
-option(WITH_GUDHI_TEST "Activate/deactivate tests compilation" ON)
-option(WITH_GUDHI_UTILITIES "Activate/deactivate utilities compilation" ON)
-
if (WITH_GUDHI_BENCHMARK)
set(GUDHI_SUB_DIRECTORIES "${GUDHI_SUB_DIRECTORIES};benchmark")
endif()
diff --git a/src/cmake/modules/GUDHI_options.cmake b/src/cmake/modules/GUDHI_options.cmake
new file mode 100644
index 00000000..c75b72f5
--- /dev/null
+++ b/src/cmake/modules/GUDHI_options.cmake
@@ -0,0 +1,6 @@
+option(WITH_GUDHI_BENCHMARK "Activate/deactivate benchmark compilation" OFF)
+option(WITH_GUDHI_EXAMPLE "Activate/deactivate examples compilation and installation" OFF)
+option(WITH_GUDHI_REMOTE_TEST "Activate/deactivate datasets fetching test which uses the Internet" OFF)
+option(WITH_GUDHI_PYTHON "Activate/deactivate python module compilation and installation" ON)
+option(WITH_GUDHI_TEST "Activate/deactivate examples compilation and installation" ON)
+option(WITH_GUDHI_UTILITIES "Activate/deactivate utilities compilation and installation" ON)
diff --git a/src/cmake/modules/GUDHI_third_party_libraries.cmake b/src/cmake/modules/GUDHI_third_party_libraries.cmake
index 21c9d47b..b316740d 100644
--- a/src/cmake/modules/GUDHI_third_party_libraries.cmake
+++ b/src/cmake/modules/GUDHI_third_party_libraries.cmake
@@ -93,91 +93,92 @@ add_definitions( -DBOOST_SYSTEM_NO_DEPRECATED )
message(STATUS "boost include dirs:" ${Boost_INCLUDE_DIRS})
message(STATUS "boost library dirs:" ${Boost_LIBRARY_DIRS})
-# Find the correct Python interpreter.
-# Can be set with -DPYTHON_EXECUTABLE=/usr/bin/python3 or -DPython_ADDITIONAL_VERSIONS=3 for instance.
-find_package( PythonInterp )
-
-# find_python_module tries to import module in Python interpreter and to retrieve its version number
-# returns ${PYTHON_MODULE_NAME_UP}_VERSION and ${PYTHON_MODULE_NAME_UP}_FOUND
-function( find_python_module PYTHON_MODULE_NAME )
- string(TOUPPER ${PYTHON_MODULE_NAME} PYTHON_MODULE_NAME_UP)
- execute_process(
- COMMAND ${PYTHON_EXECUTABLE} -c "import ${PYTHON_MODULE_NAME}; print(${PYTHON_MODULE_NAME}.__version__)"
- RESULT_VARIABLE PYTHON_MODULE_RESULT
- OUTPUT_VARIABLE PYTHON_MODULE_VERSION
- ERROR_VARIABLE PYTHON_MODULE_ERROR)
- if(PYTHON_MODULE_RESULT EQUAL 0)
- # Remove all carriage returns as it can be multiline
- string(REGEX REPLACE "\n" " " PYTHON_MODULE_VERSION "${PYTHON_MODULE_VERSION}")
- message ("++ Python module ${PYTHON_MODULE_NAME} - Version ${PYTHON_MODULE_VERSION} found")
-
- set(${PYTHON_MODULE_NAME_UP}_VERSION ${PYTHON_MODULE_VERSION} PARENT_SCOPE)
- set(${PYTHON_MODULE_NAME_UP}_FOUND TRUE PARENT_SCOPE)
- else()
- message ("PYTHON_MODULE_NAME = ${PYTHON_MODULE_NAME}
- - PYTHON_MODULE_RESULT = ${PYTHON_MODULE_RESULT}
- - PYTHON_MODULE_VERSION = ${PYTHON_MODULE_VERSION}
- - PYTHON_MODULE_ERROR = ${PYTHON_MODULE_ERROR}")
- unset(${PYTHON_MODULE_NAME_UP}_VERSION PARENT_SCOPE)
- set(${PYTHON_MODULE_NAME_UP}_FOUND FALSE PARENT_SCOPE)
- endif()
-endfunction( find_python_module )
-
-# For modules that do not define module.__version__
-function( find_python_module_no_version PYTHON_MODULE_NAME )
- string(TOUPPER ${PYTHON_MODULE_NAME} PYTHON_MODULE_NAME_UP)
- execute_process(
- COMMAND ${PYTHON_EXECUTABLE} -c "import ${PYTHON_MODULE_NAME}"
- RESULT_VARIABLE PYTHON_MODULE_RESULT
- ERROR_VARIABLE PYTHON_MODULE_ERROR)
- if(PYTHON_MODULE_RESULT EQUAL 0)
- # Remove carriage return
- message ("++ Python module ${PYTHON_MODULE_NAME} found")
- set(${PYTHON_MODULE_NAME_UP}_FOUND TRUE PARENT_SCOPE)
- else()
- message ("PYTHON_MODULE_NAME = ${PYTHON_MODULE_NAME}
- - PYTHON_MODULE_RESULT = ${PYTHON_MODULE_RESULT}
- - PYTHON_MODULE_ERROR = ${PYTHON_MODULE_ERROR}")
- set(${PYTHON_MODULE_NAME_UP}_FOUND FALSE PARENT_SCOPE)
+if (WITH_GUDHI_PYTHON)
+ # Find the correct Python interpreter.
+ # Can be set with -DPYTHON_EXECUTABLE=/usr/bin/python3 or -DPython_ADDITIONAL_VERSIONS=3 for instance.
+ find_package( PythonInterp )
+
+ # find_python_module tries to import module in Python interpreter and to retrieve its version number
+ # returns ${PYTHON_MODULE_NAME_UP}_VERSION and ${PYTHON_MODULE_NAME_UP}_FOUND
+ function( find_python_module PYTHON_MODULE_NAME )
+ string(TOUPPER ${PYTHON_MODULE_NAME} PYTHON_MODULE_NAME_UP)
+ execute_process(
+ COMMAND ${PYTHON_EXECUTABLE} -c "import ${PYTHON_MODULE_NAME}; print(${PYTHON_MODULE_NAME}.__version__)"
+ RESULT_VARIABLE PYTHON_MODULE_RESULT
+ OUTPUT_VARIABLE PYTHON_MODULE_VERSION
+ ERROR_VARIABLE PYTHON_MODULE_ERROR)
+ if(PYTHON_MODULE_RESULT EQUAL 0)
+ # Remove all carriage returns as it can be multiline
+ string(REGEX REPLACE "\n" " " PYTHON_MODULE_VERSION "${PYTHON_MODULE_VERSION}")
+ message ("++ Python module ${PYTHON_MODULE_NAME} - Version ${PYTHON_MODULE_VERSION} found")
+
+ set(${PYTHON_MODULE_NAME_UP}_VERSION ${PYTHON_MODULE_VERSION} PARENT_SCOPE)
+ set(${PYTHON_MODULE_NAME_UP}_FOUND TRUE PARENT_SCOPE)
+ else()
+ message ("PYTHON_MODULE_NAME = ${PYTHON_MODULE_NAME}
+ - PYTHON_MODULE_RESULT = ${PYTHON_MODULE_RESULT}
+ - PYTHON_MODULE_VERSION = ${PYTHON_MODULE_VERSION}
+ - PYTHON_MODULE_ERROR = ${PYTHON_MODULE_ERROR}")
+ unset(${PYTHON_MODULE_NAME_UP}_VERSION PARENT_SCOPE)
+ set(${PYTHON_MODULE_NAME_UP}_FOUND FALSE PARENT_SCOPE)
+ endif()
+ endfunction( find_python_module )
+
+ # For modules that do not define module.__version__
+ function( find_python_module_no_version PYTHON_MODULE_NAME )
+ string(TOUPPER ${PYTHON_MODULE_NAME} PYTHON_MODULE_NAME_UP)
+ execute_process(
+ COMMAND ${PYTHON_EXECUTABLE} -c "import ${PYTHON_MODULE_NAME}"
+ RESULT_VARIABLE PYTHON_MODULE_RESULT
+ ERROR_VARIABLE PYTHON_MODULE_ERROR)
+ if(PYTHON_MODULE_RESULT EQUAL 0)
+ # Remove carriage return
+ message ("++ Python module ${PYTHON_MODULE_NAME} found")
+ set(${PYTHON_MODULE_NAME_UP}_FOUND TRUE PARENT_SCOPE)
+ else()
+ message ("PYTHON_MODULE_NAME = ${PYTHON_MODULE_NAME}
+ - PYTHON_MODULE_RESULT = ${PYTHON_MODULE_RESULT}
+ - PYTHON_MODULE_ERROR = ${PYTHON_MODULE_ERROR}")
+ set(${PYTHON_MODULE_NAME_UP}_FOUND FALSE PARENT_SCOPE)
+ endif()
+ endfunction( find_python_module_no_version )
+
+ if( PYTHONINTERP_FOUND )
+ find_python_module("cython")
+ find_python_module("pytest")
+ find_python_module("matplotlib")
+ find_python_module("numpy")
+ find_python_module("scipy")
+ find_python_module("sphinx")
+ find_python_module("sklearn")
+ find_python_module("ot")
+ find_python_module("pybind11")
+ find_python_module("torch")
+ find_python_module("pykeops")
+ find_python_module("eagerpy")
+ find_python_module_no_version("hnswlib")
+ find_python_module("tensorflow")
+ find_python_module("sphinx_paramlinks")
+ find_python_module_no_version("python_docs_theme")
endif()
-endfunction( find_python_module_no_version )
-
-if( PYTHONINTERP_FOUND )
- find_python_module("cython")
- find_python_module("pytest")
- find_python_module("matplotlib")
- find_python_module("numpy")
- find_python_module("scipy")
- find_python_module("sphinx")
- find_python_module("sklearn")
- find_python_module("ot")
- find_python_module("pybind11")
- find_python_module("torch")
- find_python_module("pykeops")
- find_python_module("eagerpy")
- find_python_module_no_version("hnswlib")
- find_python_module("tensorflow")
- find_python_module("sphinx_paramlinks")
- find_python_module_no_version("python_docs_theme")
-endif()
-
-if(NOT GUDHI_PYTHON_PATH)
- message(FATAL_ERROR "ERROR: GUDHI_PYTHON_PATH is not valid.")
-endif(NOT GUDHI_PYTHON_PATH)
-
-option(WITH_GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS "Build with setting runtime_library_dirs. Usefull when setting rpath is not allowed" ON)
-
-if(PYTHONINTERP_FOUND AND CYTHON_FOUND)
- if(SPHINX_FOUND)
- # Documentation generation is available through sphinx
- find_program( SPHINX_PATH sphinx-build )
-
- if(NOT SPHINX_PATH)
- if(PYTHON_VERSION_MAJOR EQUAL 3)
- # In Python3, just hack sphinx-build if it does not exist
- set(SPHINX_PATH "${PYTHON_EXECUTABLE}" "-m" "sphinx.cmd.build")
- endif(PYTHON_VERSION_MAJOR EQUAL 3)
- endif(NOT SPHINX_PATH)
- endif(SPHINX_FOUND)
-endif(PYTHONINTERP_FOUND AND CYTHON_FOUND)
-
+
+ if(NOT GUDHI_PYTHON_PATH)
+ message(FATAL_ERROR "ERROR: GUDHI_PYTHON_PATH is not valid.")
+ endif(NOT GUDHI_PYTHON_PATH)
+
+ option(WITH_GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS "Build with setting runtime_library_dirs. Usefull when setting rpath is not allowed" ON)
+
+ if(PYTHONINTERP_FOUND AND CYTHON_FOUND)
+ if(SPHINX_FOUND)
+ # Documentation generation is available through sphinx
+ find_program( SPHINX_PATH sphinx-build )
+
+ if(NOT SPHINX_PATH)
+ if(PYTHON_VERSION_MAJOR EQUAL 3)
+ # In Python3, just hack sphinx-build if it does not exist
+ set(SPHINX_PATH "${PYTHON_EXECUTABLE}" "-m" "sphinx.cmd.build")
+ endif(PYTHON_VERSION_MAJOR EQUAL 3)
+ endif(NOT SPHINX_PATH)
+ endif(SPHINX_FOUND)
+ endif(PYTHONINTERP_FOUND AND CYTHON_FOUND)
+endif (WITH_GUDHI_PYTHON) \ No newline at end of file
diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt
index 33711880..e3119cfc 100644
--- a/src/python/CMakeLists.txt
+++ b/src/python/CMakeLists.txt
@@ -171,12 +171,14 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'nerve_gic', ")
endif ()
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'alpha_complex', ")
set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'subsampling', ")
set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'tangential_complex', ")
set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'euclidean_witness_complex', ")
set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'euclidean_strong_witness_complex', ")
endif ()
+ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'alpha_complex', ")
+ endif ()
if(CGAL_FOUND)
if(NOT CGAL_VERSION VERSION_LESS 5.3.0)
@@ -313,7 +315,7 @@ if(PYTHONINTERP_FOUND)
if(SCIPY_FOUND)
if(SKLEARN_FOUND)
if(OT_FOUND)
- if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ if(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
set (GUDHI_SPHINX_MESSAGE "Generating API documentation with Sphinx in ${CMAKE_CURRENT_BINARY_DIR}/sphinx/")
# User warning - Sphinx is a static pages generator, and configured to work fine with user_version
# Images and biblio warnings because not found on developper version
@@ -333,10 +335,10 @@ if(PYTHONINTERP_FOUND)
${SPHINX_PATH} -b doctest ${CMAKE_CURRENT_SOURCE_DIR}/doc ${CMAKE_CURRENT_BINARY_DIR}/doctest)
# Set missing or not modules
set(GUDHI_MODULES ${GUDHI_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MODULES")
- else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 4.11.0")
+ else(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
+ message("++ Python documentation module will not be compiled because it requires a Eigen3 and CGAL version >= 5.1.0")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
- endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ endif(NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
else(OT_FOUND)
message("++ Python documentation module will not be compiled because POT was not found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES")
@@ -372,13 +374,15 @@ if(PYTHONINTERP_FOUND)
# Test examples
- if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
# Bottleneck and Alpha
add_test(NAME alpha_rips_persistence_bottleneck_distance_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_rips_persistence_bottleneck_distance.py"
-f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off -t 0.15 -d 3)
+ endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
+ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
# Tangential
add_test(NAME tangential_complex_plain_homology_from_off_file_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
@@ -446,7 +450,7 @@ if(PYTHONINTERP_FOUND)
add_gudhi_py_test(test_cover_complex)
endif (NOT CGAL_VERSION VERSION_LESS 4.11.0)
- if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
# Alpha
add_test(NAME alpha_complex_from_points_example_py_test
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
@@ -462,7 +466,7 @@ if(PYTHONINTERP_FOUND)
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_complex_diagram_persistence_from_off_file_example.py"
--no-diagram -f ${CMAKE_SOURCE_DIR}/data/points/tore3D_300.off)
add_gudhi_py_test(test_alpha_complex)
- endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
+ endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
# Euclidean witness
@@ -547,6 +551,11 @@ if(PYTHONINTERP_FOUND)
add_gudhi_py_test(test_representations)
endif()
+ # Betti curves
+ if(SKLEARN_FOUND AND SCIPY_FOUND)
+ add_gudhi_py_test(test_betti_curve_representations)
+ endif()
+
# Time Delay
add_gudhi_py_test(test_time_delay)
@@ -585,4 +594,4 @@ if(PYTHONINTERP_FOUND)
else(PYTHONINTERP_FOUND)
message("++ Python module will not be compiled because no Python interpreter was found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python" CACHE INTERNAL "GUDHI_MISSING_MODULES")
-endif(PYTHONINTERP_FOUND)
+endif(PYTHONINTERP_FOUND) \ No newline at end of file
diff --git a/src/python/doc/alpha_complex_ref.rst b/src/python/doc/alpha_complex_ref.rst
index 7da79543..eaa72551 100644
--- a/src/python/doc/alpha_complex_ref.rst
+++ b/src/python/doc/alpha_complex_ref.rst
@@ -9,6 +9,5 @@ Alpha complex reference manual
.. autoclass:: gudhi.AlphaComplex
:members:
:undoc-members:
- :show-inheritance:
.. automethod:: gudhi.AlphaComplex.__init__
diff --git a/src/python/doc/alpha_complex_sum.inc b/src/python/doc/alpha_complex_sum.inc
index aeab493f..5c76fd54 100644
--- a/src/python/doc/alpha_complex_sum.inc
+++ b/src/python/doc/alpha_complex_sum.inc
@@ -1,15 +1,15 @@
.. table::
:widths: 30 40 30
- +----------------------------------------------------------------+-------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+
- | .. figure:: | Alpha complex is a simplicial complex constructed from the finite | :Author: Vincent Rouvreau |
- | ../../doc/Alpha_complex/alpha_complex_representation.png | cells of a Delaunay Triangulation. It has the same persistent homology | |
- | :alt: Alpha complex representation | as the ÄŒech complex and is significantly smaller. | :Since: GUDHI 2.0.0 |
- | :figclass: align-center | | |
- | | | :License: MIT (`GPL v3 </licensing/>`_) |
- | | | |
- | | | :Requires: `Eigen <installation.html#eigen>`_ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0 |
- | | | |
- +----------------------------------------------------------------+-------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+
- | * :doc:`alpha_complex_user` | * :doc:`alpha_complex_ref` |
- +----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ +----------------------------------------------------------------+-------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+
+ | .. figure:: | Alpha complex is a simplicial complex constructed from the finite | :Author: Vincent Rouvreau |
+ | ../../doc/Alpha_complex/alpha_complex_representation.png | cells of a Delaunay Triangulation. It has the same persistent homology | |
+ | :alt: Alpha complex representation | as the ÄŒech complex and is significantly smaller. | :Since: GUDHI 2.0.0 |
+ | :figclass: align-center | | |
+ | | | :License: MIT (`GPL v3 </licensing/>`_) |
+ | | | |
+ | | | :Requires: `Eigen <installation.html#eigen>`_ :math:`\geq` 3.1.0 and `CGAL <installation.html#cgal>`_ :math:`\geq` 5.1 |
+ | | | |
+ +----------------------------------------------------------------+-------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------+
+ | * :doc:`alpha_complex_user` | * :doc:`alpha_complex_ref` |
+ +----------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/alpha_complex_user.rst b/src/python/doc/alpha_complex_user.rst
index 96e267ef..cfd22742 100644
--- a/src/python/doc/alpha_complex_user.rst
+++ b/src/python/doc/alpha_complex_user.rst
@@ -9,7 +9,7 @@ Definition
.. include:: alpha_complex_sum.inc
-:doc:`AlphaComplex <alpha_complex_ref>` is constructing a :doc:`SimplexTree <simplex_tree_ref>` using
+:class:`~gudhi.AlphaComplex` is constructing a :doc:`SimplexTree <simplex_tree_ref>` using
`Delaunay Triangulation <http://doc.cgal.org/latest/Triangulation/index.html#Chapter_Triangulations>`_
:cite:`cgal:hdj-t-19b` from the `Computational Geometry Algorithms Library <http://www.cgal.org/>`_
:cite:`cgal:eb-19b`.
@@ -33,9 +33,6 @@ Remarks
Using :code:`precision = 'fast'` makes the computations slightly faster, and the combinatorics are still exact, but
the computation of filtration values can exceptionally be arbitrarily bad. In all cases, we still guarantee that the
output is a valid filtration (faces have a filtration value no larger than their cofaces).
-* For performances reasons, it is advised to use Alpha_complex with `CGAL <installation.html#cgal>`_ :math:`\geq` 5.0.0.
-* The vertices in the output simplex tree are not guaranteed to match the order of the input points. One can use
- :func:`~gudhi.AlphaComplex.get_point` to get the initial point back.
Example from points
-------------------
@@ -44,23 +41,22 @@ This example builds the alpha-complex from the given points:
.. testcode::
- import gudhi
- alpha_complex = gudhi.AlphaComplex(points=[[1, 1], [7, 0], [4, 6], [9, 6], [0, 14], [2, 19], [9, 17]])
+ from gudhi import AlphaComplex
+ ac = AlphaComplex(points=[[1, 1], [7, 0], [4, 6], [9, 6], [0, 14], [2, 19], [9, 17]])
+
+ stree = ac.create_simplex_tree()
+ print('Alpha complex is of dimension ', stree.dimension(), ' - ',
+ stree.num_simplices(), ' simplices - ', stree.num_vertices(), ' vertices.')
- simplex_tree = alpha_complex.create_simplex_tree()
- result_str = 'Alpha complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \
- repr(simplex_tree.num_simplices()) + ' simplices - ' + \
- repr(simplex_tree.num_vertices()) + ' vertices.'
- print(result_str)
fmt = '%s -> %.2f'
- for filtered_value in simplex_tree.get_filtration():
+ for filtered_value in stree.get_filtration():
print(fmt % tuple(filtered_value))
The output is:
.. testoutput::
- Alpha complex is of dimension 2 - 25 simplices - 7 vertices.
+ Alpha complex is of dimension 2 - 25 simplices - 7 vertices.
[0] -> 0.00
[1] -> 0.00
[2] -> 0.00
@@ -177,11 +173,75 @@ of speed-up, since we always first build the full filtered complex, so it is rec
:paramref:`~gudhi.AlphaComplex.create_simplex_tree.max_alpha_square`.
In the following example, a threshold of :math:`\alpha^2 = 32.0` is used.
+Weighted version
+^^^^^^^^^^^^^^^^
+
+A weighted version for Alpha complex is available. It is like a usual Alpha complex, but based on a
+`CGAL regular triangulation <https://doc.cgal.org/latest/Triangulation/index.html#title20>`_.
+
+This example builds the weighted alpha-complex of a small molecule, where atoms have different sizes.
+It is taken from
+`CGAL 3d weighted alpha shapes <https://doc.cgal.org/latest/Alpha_shapes_3/index.html#title13>`_.
+
+Then, it is asked to display information about the alpha complex.
+
+.. testcode::
+
+ from gudhi import AlphaComplex
+ wgt_ac = AlphaComplex(points=[[ 1., -1., -1.],
+ [-1., 1., -1.],
+ [-1., -1., 1.],
+ [ 1., 1., 1.],
+ [ 2., 2., 2.]],
+ weights = [4., 4., 4., 4., 1.])
+
+ stree = wgt_ac.create_simplex_tree()
+ print('Weighted alpha complex is of dimension ', stree.dimension(), ' - ',
+ stree.num_simplices(), ' simplices - ', stree.num_vertices(), ' vertices.')
+ fmt = '%s -> %.2f'
+ for simplex in stree.get_simplices():
+ print(fmt % tuple(simplex))
+
+The output is:
+
+.. testoutput::
+
+ Weighted alpha complex is of dimension 3 - 29 simplices - 5 vertices.
+ [0, 1, 2, 3] -> -1.00
+ [0, 1, 2] -> -1.33
+ [0, 1, 3, 4] -> 95.00
+ [0, 1, 3] -> -1.33
+ [0, 1, 4] -> 95.00
+ [0, 1] -> -2.00
+ [0, 2, 3, 4] -> 95.00
+ [0, 2, 3] -> -1.33
+ [0, 2, 4] -> 95.00
+ [0, 2] -> -2.00
+ [0, 3, 4] -> 23.00
+ [0, 3] -> -2.00
+ [0, 4] -> 23.00
+ [0] -> -4.00
+ [1, 2, 3, 4] -> 95.00
+ [1, 2, 3] -> -1.33
+ [1, 2, 4] -> 95.00
+ [1, 2] -> -2.00
+ [1, 3, 4] -> 23.00
+ [1, 3] -> -2.00
+ [1, 4] -> 23.00
+ [1] -> -4.00
+ [2, 3, 4] -> 23.00
+ [2, 3] -> -2.00
+ [2, 4] -> 23.00
+ [2] -> -4.00
+ [3, 4] -> -1.00
+ [3] -> -4.00
+ [4] -> -1.00
Example from OFF file
^^^^^^^^^^^^^^^^^^^^^
-This example builds the alpha complex from 300 random points on a 2-torus.
+This example builds the alpha complex from 300 random points on a 2-torus, given by an
+`OFF file <fileformats.html#off-file-format>`_.
Then, it computes the persistence diagram and displays it:
@@ -189,14 +249,10 @@ Then, it computes the persistence diagram and displays it:
:include-source:
import matplotlib.pyplot as plt
- import gudhi
- alpha_complex = gudhi.AlphaComplex(off_file=gudhi.__root_source_dir__ + \
- '/data/points/tore3D_300.off')
- simplex_tree = alpha_complex.create_simplex_tree()
- result_str = 'Alpha complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \
- repr(simplex_tree.num_simplices()) + ' simplices - ' + \
- repr(simplex_tree.num_vertices()) + ' vertices.'
- print(result_str)
- diag = simplex_tree.persistence()
- gudhi.plot_persistence_diagram(diag)
+ import gudhi as gd
+ off_file = gd.__root_source_dir__ + '/data/points/tore3D_300.off'
+ points = gd.read_points_from_off_file(off_file = off_file)
+ stree = gd.AlphaComplex(points = points).create_simplex_tree()
+ dgm = stree.persistence()
+ gd.plot_persistence_diagram(dgm, legend = True)
plt.show()
diff --git a/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py b/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py
index fe03be31..c96121a6 100755
--- a/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py
+++ b/src/python/example/alpha_complex_diagram_persistence_from_off_file_example.py
@@ -1,9 +1,7 @@
#!/usr/bin/env python
import argparse
-import errno
-import os
-import gudhi
+import gudhi as gd
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
which is released under MIT.
@@ -41,33 +39,24 @@ parser.add_argument(
args = parser.parse_args()
-with open(args.file, "r") as f:
- first_line = f.readline()
- if (first_line == "OFF\n") or (first_line == "nOFF\n"):
- print("##############################################################")
- print("AlphaComplex creation from points read in a OFF file")
-
- alpha_complex = gudhi.AlphaComplex(off_file=args.file)
- if args.max_alpha_square is not None:
- print("with max_edge_length=", args.max_alpha_square)
- simplex_tree = alpha_complex.create_simplex_tree(
- max_alpha_square=args.max_alpha_square
- )
- else:
- simplex_tree = alpha_complex.create_simplex_tree()
-
- print("Number of simplices=", simplex_tree.num_simplices())
-
- diag = simplex_tree.persistence()
-
- print("betti_numbers()=", simplex_tree.betti_numbers())
-
- if args.no_diagram == False:
- import matplotlib.pyplot as plot
- gudhi.plot_persistence_diagram(diag, band=args.band)
- plot.show()
- else:
- raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
- args.file)
-
- f.close()
+print("##############################################################")
+print("AlphaComplex creation from points read in a OFF file")
+
+points = gd.read_points_from_off_file(off_file = args.file)
+alpha_complex = gd.AlphaComplex(points = points)
+if args.max_alpha_square is not None:
+ print("with max_edge_length=", args.max_alpha_square)
+ simplex_tree = alpha_complex.create_simplex_tree(
+ max_alpha_square=args.max_alpha_square
+ )
+else:
+ simplex_tree = alpha_complex.create_simplex_tree()
+
+print("Number of simplices=", simplex_tree.num_simplices())
+
+diag = simplex_tree.persistence()
+print("betti_numbers()=", simplex_tree.betti_numbers())
+if args.no_diagram == False:
+ import matplotlib.pyplot as plot
+ gd.plot_persistence_diagram(diag, band=args.band)
+ plot.show()
diff --git a/src/python/example/alpha_rips_persistence_bottleneck_distance.py b/src/python/example/alpha_rips_persistence_bottleneck_distance.py
index 3e12b0d5..6b97fb3b 100755
--- a/src/python/example/alpha_rips_persistence_bottleneck_distance.py
+++ b/src/python/example/alpha_rips_persistence_bottleneck_distance.py
@@ -1,10 +1,8 @@
#!/usr/bin/env python
-import gudhi
+import gudhi as gd
import argparse
import math
-import errno
-import os
import numpy as np
""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
@@ -37,70 +35,60 @@ parser.add_argument("-t", "--threshold", type=float, default=0.5)
parser.add_argument("-d", "--max_dimension", type=int, default=1)
args = parser.parse_args()
-with open(args.file, "r") as f:
- first_line = f.readline()
- if (first_line == "OFF\n") or (first_line == "nOFF\n"):
- point_cloud = gudhi.read_points_from_off_file(off_file=args.file)
- print("##############################################################")
- print("RipsComplex creation from points read in a OFF file")
+point_cloud = gd.read_points_from_off_file(off_file=args.file)
+print("##############################################################")
+print("RipsComplex creation from points read in a OFF file")
- message = "RipsComplex with max_edge_length=" + repr(args.threshold)
- print(message)
+message = "RipsComplex with max_edge_length=" + repr(args.threshold)
+print(message)
- rips_complex = gudhi.RipsComplex(
- points=point_cloud, max_edge_length=args.threshold
- )
-
- rips_stree = rips_complex.create_simplex_tree(
- max_dimension=args.max_dimension)
-
- message = "Number of simplices=" + repr(rips_stree.num_simplices())
- print(message)
-
- rips_stree.compute_persistence()
-
- print("##############################################################")
- print("AlphaComplex creation from points read in a OFF file")
-
- message = "AlphaComplex with max_edge_length=" + repr(args.threshold)
- print(message)
-
- alpha_complex = gudhi.AlphaComplex(points=point_cloud)
- alpha_stree = alpha_complex.create_simplex_tree(
- max_alpha_square=(args.threshold * args.threshold)
- )
-
- message = "Number of simplices=" + repr(alpha_stree.num_simplices())
- print(message)
+rips_complex = gd.RipsComplex(
+ points=point_cloud, max_edge_length=args.threshold
+)
- alpha_stree.compute_persistence()
+rips_stree = rips_complex.create_simplex_tree(
+ max_dimension=args.max_dimension)
- max_b_distance = 0.0
- for dim in range(args.max_dimension):
- # Alpha persistence values needs to be transform because filtration
- # values are alpha square values
- alpha_intervals = np.sqrt(alpha_stree.persistence_intervals_in_dimension(dim))
+message = "Number of simplices=" + repr(rips_stree.num_simplices())
+print(message)
- rips_intervals = rips_stree.persistence_intervals_in_dimension(dim)
- bottleneck_distance = gudhi.bottleneck_distance(
- rips_intervals, alpha_intervals
- )
- message = (
- "In dimension "
- + repr(dim)
- + ", bottleneck distance = "
- + repr(bottleneck_distance)
- )
- print(message)
- max_b_distance = max(bottleneck_distance, max_b_distance)
+rips_stree.compute_persistence()
- print("==============================================================")
- message = "Bottleneck distance is " + repr(max_b_distance)
- print(message)
+print("##############################################################")
+print("AlphaComplex creation from points read in a OFF file")
- else:
- raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
- args.file)
+message = "AlphaComplex with max_edge_length=" + repr(args.threshold)
+print(message)
+alpha_complex = gd.AlphaComplex(points=point_cloud)
+alpha_stree = alpha_complex.create_simplex_tree(
+ max_alpha_square=(args.threshold * args.threshold)
+)
- f.close()
+message = "Number of simplices=" + repr(alpha_stree.num_simplices())
+print(message)
+
+alpha_stree.compute_persistence()
+
+max_b_distance = 0.0
+for dim in range(args.max_dimension):
+ # Alpha persistence values needs to be transform because filtration
+ # values are alpha square values
+ alpha_intervals = np.sqrt(alpha_stree.persistence_intervals_in_dimension(dim))
+
+ rips_intervals = rips_stree.persistence_intervals_in_dimension(dim)
+ bottleneck_distance = gd.bottleneck_distance(
+ rips_intervals, alpha_intervals
+ )
+ message = (
+ "In dimension "
+ + repr(dim)
+ + ", bottleneck distance = "
+ + repr(bottleneck_distance)
+ )
+ print(message)
+ max_b_distance = max(bottleneck_distance, max_b_distance)
+
+print("==============================================================")
+message = "Bottleneck distance is " + repr(max_b_distance)
+print(message)
diff --git a/src/python/example/plot_alpha_complex.py b/src/python/example/plot_alpha_complex.py
index 99c18a7c..0924619b 100755
--- a/src/python/example/plot_alpha_complex.py
+++ b/src/python/example/plot_alpha_complex.py
@@ -1,8 +1,9 @@
#!/usr/bin/env python
import numpy as np
-import gudhi
-ac = gudhi.AlphaComplex(off_file='../../data/points/tore3D_1307.off')
+import gudhi as gd
+points = gd.read_points_from_off_file(off_file = '../../data/points/tore3D_1307.off')
+ac = gd.AlphaComplex(points = points)
st = ac.create_simplex_tree()
points = np.array([ac.get_point(i) for i in range(st.num_vertices())])
# We want to plot the alpha-complex with alpha=0.1.
diff --git a/src/python/gudhi/alpha_complex.pyx b/src/python/gudhi/alpha_complex.pyx
index ea128743..a4888914 100644
--- a/src/python/gudhi/alpha_complex.pyx
+++ b/src/python/gudhi/alpha_complex.pyx
@@ -16,7 +16,7 @@ from libcpp.utility cimport pair
from libcpp.string cimport string
from libcpp cimport bool
from libc.stdint cimport intptr_t
-import os
+import warnings
from gudhi.simplex_tree cimport *
from gudhi.simplex_tree import SimplexTree
@@ -28,66 +28,72 @@ __license__ = "GPL v3"
cdef extern from "Alpha_complex_interface.h" namespace "Gudhi":
cdef cppclass Alpha_complex_interface "Gudhi::alpha_complex::Alpha_complex_interface":
- Alpha_complex_interface(vector[vector[double]] points, bool fast_version, bool exact_version) nogil except +
+ Alpha_complex_interface(vector[vector[double]] points, vector[double] weights, bool fast_version, bool exact_version) nogil except +
vector[double] get_point(int vertex) nogil except +
void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, double max_alpha_square, bool default_filtration_value) nogil except +
# AlphaComplex python interface
cdef class AlphaComplex:
- """AlphaComplex is a simplicial complex constructed from the finite cells
- of a Delaunay Triangulation.
+ """AlphaComplex is a simplicial complex constructed from the finite cells of a Delaunay Triangulation.
- The filtration value of each simplex is computed as the square of the
- circumradius of the simplex if the circumsphere is empty (the simplex is
- then said to be Gabriel), and as the minimum of the filtration values of
- the codimension 1 cofaces that make it not Gabriel otherwise.
+ The filtration value of each simplex is computed as the square of the circumradius of the simplex if the
+ circumsphere is empty (the simplex is then said to be Gabriel), and as the minimum of the filtration values of the
+ codimension 1 cofaces that make it not Gabriel otherwise.
- All simplices that have a filtration value strictly greater than a given
- alpha squared value are not inserted into the complex.
+ All simplices that have a filtration value strictly greater than a given alpha squared value are not inserted into
+ the complex.
.. note::
- When Alpha_complex is constructed with an infinite value of alpha, the
- complex is a Delaunay complex.
-
+ When Alpha_complex is constructed with an infinite value of alpha, the complex is a Delaunay complex.
"""
cdef Alpha_complex_interface * this_ptr
# Fake constructor that does nothing but documenting the constructor
- def __init__(self, points=None, off_file='', precision='safe'):
+ def __init__(self, points=[], off_file='', weights=None, precision='safe'):
"""AlphaComplex constructor.
:param points: A list of points in d-Dimension.
- :type points: list of list of double
-
- Or
+ :type points: Iterable[Iterable[float]]
- :param off_file: An OFF file style name.
+ :param off_file: **[deprecated]** An `OFF file style <fileformats.html#off-file-format>`_ name.
+ If an `off_file` is given with `points` as arguments, only points from the file are taken into account.
:type off_file: string
+ :param weights: A list of weights. If set, the number of weights must correspond to the number of points.
+ :type weights: Iterable[float]
+
:param precision: Alpha complex precision can be 'fast', 'safe' or 'exact'. Default is 'safe'.
:type precision: string
+
+ :raises FileNotFoundError: **[deprecated]** If `off_file` is set but not found.
+ :raises ValueError: In case of inconsistency between the number of points and weights.
"""
# The real cython constructor
- def __cinit__(self, points = None, off_file = '', precision = 'safe'):
+ def __cinit__(self, points = [], off_file = '', weights=None, precision = 'safe'):
assert precision in ['fast', 'safe', 'exact'], "Alpha complex precision can only be 'fast', 'safe' or 'exact'"
cdef bool fast = precision == 'fast'
cdef bool exact = precision == 'exact'
- cdef vector[vector[double]] pts
if off_file:
- if os.path.isfile(off_file):
- points = read_points_from_off_file(off_file = off_file)
- else:
- print("file " + off_file + " not found.")
- if points is None:
- # Empty Alpha construction
- points=[]
+ warnings.warn("off_file is a deprecated parameter, please consider using gudhi.read_points_from_off_file",
+ DeprecationWarning)
+ points = read_points_from_off_file(off_file = off_file)
+
+ # weights are set but is inconsistent with the number of points
+ if weights != None and len(weights) != len(points):
+ raise ValueError("Inconsistency between the number of points and weights")
+
+ # need to copy the points to use them without the gil
+ cdef vector[vector[double]] pts
+ cdef vector[double] wgts
pts = points
+ if weights != None:
+ wgts = weights
with nogil:
- self.this_ptr = new Alpha_complex_interface(pts, fast, exact)
+ self.this_ptr = new Alpha_complex_interface(pts, wgts, fast, exact)
def __dealloc__(self):
if self.this_ptr != NULL:
diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py
index e883b5dd..f8078d03 100644
--- a/src/python/gudhi/representations/vector_methods.py
+++ b/src/python/gudhi/representations/vector_methods.py
@@ -1,15 +1,17 @@
# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
-# Author(s): Mathieu Carrière, Martin Royer
+# Author(s): Mathieu Carrière, Martin Royer, Gard Spreemann
#
# Copyright (C) 2018-2020 Inria
#
# Modification(s):
# - 2020/06 Martin: ATOL integration
+# - 2020/12 Gard: A more flexible Betti curve class capable of computing exact curves.
# - 2021/11 Vincent Rouvreau: factorize _automatic_sample_range
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
+from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler
from sklearn.neighbors import DistanceMetric
from sklearn.metrics import pairwise
@@ -306,67 +308,162 @@ class Silhouette(BaseEstimator, TransformerMixin):
"""
return self.fit_transform([diag])[0,:]
+
class BettiCurve(BaseEstimator, TransformerMixin):
"""
- This is a class for computing Betti curves from a list of persistence diagrams. A Betti curve is a 1D piecewise-constant function obtained from the rank function. It is sampled evenly on a given range and the vector of samples is returned. See https://www.researchgate.net/publication/316604237_Time_Series_Classification_via_Topological_Data_Analysis for more details.
+ Compute Betti curves from persistence diagrams. There are several modes of operation: with a given resolution (with or without a sample_range), with a predefined grid, and with none of the previous. With a predefined grid, the class computes the Betti numbers at those grid points. Without a predefined grid, if the resolution is set to None, it can be fit to a list of persistence diagrams and produce a grid that consists of (at least) the filtration values at which at least one of those persistence diagrams changes Betti numbers, and then compute the Betti numbers at those grid points. In the latter mode, the exact Betti curve is computed for the entire real line. Otherwise, if the resolution is given, the Betti curve is obtained by sampling evenly using either the given sample_range or based on the persistence diagrams.
"""
- def __init__(self, resolution=100, sample_range=[np.nan, np.nan]):
+
+ def __init__(self, resolution=100, sample_range=[np.nan, np.nan], predefined_grid=None):
"""
Constructor for the BettiCurve class.
Parameters:
resolution (int): number of sample for the piecewise-constant function (default 100).
sample_range ([double, double]): minimum and maximum of the piecewise-constant function domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ predefined_grid (1d array or None, default=None): Predefined filtration grid points at which to compute the Betti curves. Must be strictly ordered. Infinities are ok. If None (default), and resolution is given, the grid will be uniform from x_min to x_max in 'resolution' steps, otherwise a grid will be computed that captures all changes in Betti numbers in the provided data.
+
+ Attributes:
+ grid_ (1d array): The grid on which the Betti numbers are computed. If predefined_grid was specified, `grid_` will always be that grid, independently of data. If not, the grid is fitted to capture all filtration values at which the Betti numbers change.
+
+ Examples
+ --------
+ If pd is a persistence diagram and xs is a nonempty grid of finite values such that xs[0] >= pd.min(), then the results of:
+
+ >>> bc = BettiCurve(predefined_grid=xs) # doctest: +SKIP
+ >>> result = bc(pd) # doctest: +SKIP
+
+ and
+
+ >>> from scipy.interpolate import interp1d # doctest: +SKIP
+ >>> bc = BettiCurve(resolution=None, predefined_grid=None) # doctest: +SKIP
+ >>> bettis = bc.fit_transform([pd]) # doctest: +SKIP
+ >>> interp = interp1d(bc.grid_, bettis[0, :], kind="previous", fill_value="extrapolate") # doctest: +SKIP
+ >>> result = np.array(interp(xs), dtype=int) # doctest: +SKIP
+
+ are the same.
"""
- self.resolution, self.sample_range = resolution, sample_range
- def fit(self, X, y=None):
+ if (predefined_grid is not None) and (not isinstance(predefined_grid, np.ndarray)):
+ raise ValueError("Expected predefined_grid as array or None.")
+
+ self.predefined_grid = predefined_grid
+ self.resolution = resolution
+ self.sample_range = sample_range
+
+ def is_fitted(self):
+ return hasattr(self, "grid_")
+
+ def fit(self, X, y = None):
"""
- Fit the BettiCurve class on a list of persistence diagrams: if any of the values in **sample_range** is numpy.nan, replace it with the corresponding value computed on the given list of persistence diagrams.
+ Fit the BettiCurve class on a list of persistence diagrams: if any of the values in **sample_range** is numpy.nan, replace it with the corresponding value computed on the given list of persistence diagrams. When no predefined grid is provided and resolution set to None, compute a filtration grid that captures all changes in Betti numbers for all the given persistence diagrams.
Parameters:
- X (list of n x 2 numpy arrays): input persistence diagrams.
- y (n x 1 array): persistence diagram labels (unused).
+ X (list of 2d arrays): Persistence diagrams.
+ y (None): Ignored.
"""
- self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
+
+ if self.predefined_grid is None:
+ if self.resolution is None: # Flexible/exact version
+ events = np.unique(np.concatenate([pd.flatten() for pd in X] + [[-np.inf]], axis=0))
+ self.grid_ = np.array(events)
+ else:
+ self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
+ self.grid_ = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
+ else:
+ self.grid_ = self.predefined_grid # Get the predefined grid from user
+
return self
def transform(self, X):
"""
- Compute the Betti curve for each persistence diagram individually and concatenate the results.
+ Compute Betti curves.
Parameters:
- X (list of n x 2 numpy arrays): input persistence diagrams.
-
+ X (list of 2d arrays): Persistence diagrams.
+
Returns:
- numpy array with shape (number of diagrams) x (**resolution**): output Betti curves.
+ `len(X).len(self.grid_)` array of ints: Betti numbers of the given persistence diagrams at the grid points given in `self.grid_`
"""
- Xfit = []
- x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
- step_x = x_values[1] - x_values[0]
- for diagram in X:
- diagram_int = np.clip(np.ceil((diagram[:,:2] - self.sample_range[0]) / step_x), 0, self.resolution).astype(int)
- bc = np.zeros(self.resolution)
- for interval in diagram_int:
- bc[interval[0]:interval[1]] += 1
- Xfit.append(np.reshape(bc,[1,-1]))
+ if not self.is_fitted():
+ raise NotFittedError("Not fitted.")
- Xfit = np.concatenate(Xfit, 0)
+ if not X:
+ X = [np.zeros((0, 2))]
+
+ N = len(X)
- return Xfit
+ events = np.concatenate([pd.flatten(order="F") for pd in X], axis=0)
+ sorting = np.argsort(events)
+ offsets = np.zeros(1 + N, dtype=int)
+ for i in range(0, N):
+ offsets[i+1] = offsets[i] + 2*X[i].shape[0]
+ starts = offsets[0:N]
+ ends = offsets[1:N + 1] - 1
- def __call__(self, diag):
+ bettis = [[0] for i in range(0, N)]
+
+ i = 0
+ for x in self.grid_:
+ while i < len(sorting) and events[sorting[i]] <= x:
+ j = np.searchsorted(ends, sorting[i])
+ delta = 1 if sorting[i] - starts[j] < len(X[j]) else -1
+ bettis[j][-1] += delta
+ i += 1
+ for k in range(0, N):
+ bettis[k].append(bettis[k][-1])
+
+ return np.array(bettis, dtype=int)[:, 0:-1]
+
+ def fit_transform(self, X):
+ """
+ The result is the same as fit(X) followed by transform(X), but potentially faster.
"""
- Apply BettiCurve on a single persistence diagram and outputs the result.
- Parameters:
- diag (n x 2 numpy array): input persistence diagram.
+ if self.predefined_grid is None and self.resolution is None:
+ if not X:
+ X = [np.zeros((0, 2))]
- Returns:
- numpy array with shape (**resolution**): output Betti curve.
+ N = len(X)
+
+ events = np.concatenate([pd.flatten(order="F") for pd in X], axis=0)
+ sorting = np.argsort(events)
+ offsets = np.zeros(1 + N, dtype=int)
+ for i in range(0, N):
+ offsets[i+1] = offsets[i] + 2*X[i].shape[0]
+ starts = offsets[0:N]
+ ends = offsets[1:N + 1] - 1
+
+ xs = [-np.inf]
+ bettis = [[0] for i in range(0, N)]
+
+ for i in sorting:
+ j = np.searchsorted(ends, i)
+ delta = 1 if i - starts[j] < len(X[j]) else -1
+ if events[i] == xs[-1]:
+ bettis[j][-1] += delta
+ else:
+ xs.append(events[i])
+ for k in range(0, j):
+ bettis[k].append(bettis[k][-1])
+ bettis[j].append(bettis[j][-1] + delta)
+ for k in range(j+1, N):
+ bettis[k].append(bettis[k][-1])
+
+ self.grid_ = np.array(xs)
+ return np.array(bettis, dtype=int)
+
+ else:
+ return self.fit(X).transform(X)
+
+ def __call__(self, diag):
"""
- return self.fit_transform([diag])[0,:]
+ Shorthand for transform on a single persistence diagram.
+ """
+ return self.fit_transform([diag])[0, :]
+
+
class Entropy(BaseEstimator, TransformerMixin):
"""
diff --git a/src/python/include/Alpha_complex_factory.h b/src/python/include/Alpha_complex_factory.h
index 3405fdd6..3d20aa8f 100644
--- a/src/python/include/Alpha_complex_factory.h
+++ b/src/python/include/Alpha_complex_factory.h
@@ -31,15 +31,34 @@ namespace Gudhi {
namespace alpha_complex {
-template <typename CgalPointType>
-std::vector<double> pt_cgal_to_cython(CgalPointType const& point) {
- std::vector<double> vd;
- vd.reserve(point.dimension());
- for (auto coord = point.cartesian_begin(); coord != point.cartesian_end(); coord++)
- vd.push_back(CGAL::to_double(*coord));
- return vd;
-}
+// template Functor that transforms a CGAL point to a vector of double as expected by cython
+template<typename CgalPointType, bool Weighted>
+struct Point_cgal_to_cython;
+
+// Specialized Unweighted Functor
+template<typename CgalPointType>
+struct Point_cgal_to_cython<CgalPointType, false> {
+ std::vector<double> operator()(CgalPointType const& point) const
+ {
+ std::vector<double> vd;
+ vd.reserve(point.dimension());
+ for (auto coord = point.cartesian_begin(); coord != point.cartesian_end(); coord++)
+ vd.push_back(CGAL::to_double(*coord));
+ return vd;
+ }
+};
+// Specialized Weighted Functor
+template<typename CgalPointType>
+struct Point_cgal_to_cython<CgalPointType, true> {
+ std::vector<double> operator()(CgalPointType const& weighted_point) const
+ {
+ const auto& point = weighted_point.point();
+ return Point_cgal_to_cython<decltype(point), false>()(point);
+ }
+};
+
+// Function that transforms a cython point (aka. a vector of double) to a CGAL point
template <typename CgalPointType>
static CgalPointType pt_cython_to_cgal(std::vector<double> const& vec) {
return CgalPointType(vec.size(), vec.begin(), vec.end());
@@ -51,24 +70,35 @@ class Abstract_alpha_complex {
virtual bool create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square,
bool default_filtration_value) = 0;
+
+ virtual std::size_t num_vertices() const = 0;
virtual ~Abstract_alpha_complex() = default;
};
-class Exact_Alphacomplex_dD final : public Abstract_alpha_complex {
+template <bool Weighted = false>
+class Exact_alpha_complex_dD final : public Abstract_alpha_complex {
private:
using Kernel = CGAL::Epeck_d<CGAL::Dynamic_dimension_tag>;
- using Point = typename Kernel::Point_d;
+ using Bare_point = typename Kernel::Point_d;
+ using Point = std::conditional_t<Weighted, typename Kernel::Weighted_point_d,
+ typename Kernel::Point_d>;
public:
- Exact_Alphacomplex_dD(const std::vector<std::vector<double>>& points, bool exact_version)
+ Exact_alpha_complex_dD(const std::vector<std::vector<double>>& points, bool exact_version)
+ : exact_version_(exact_version),
+ alpha_complex_(boost::adaptors::transform(points, pt_cython_to_cgal<Bare_point>)) {
+ }
+
+ Exact_alpha_complex_dD(const std::vector<std::vector<double>>& points,
+ const std::vector<double>& weights, bool exact_version)
: exact_version_(exact_version),
- alpha_complex_(boost::adaptors::transform(points, pt_cython_to_cgal<Point>)) {
+ alpha_complex_(boost::adaptors::transform(points, pt_cython_to_cgal<Bare_point>), weights) {
}
virtual std::vector<double> get_point(int vh) override {
- Point const& point = alpha_complex_.get_point(vh);
- return pt_cgal_to_cython(point);
+ // Can be a Weighted or a Bare point in function of Weighted
+ return Point_cgal_to_cython<Point, Weighted>()(alpha_complex_.get_point(vh));
}
virtual bool create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square,
@@ -76,65 +106,49 @@ class Exact_Alphacomplex_dD final : public Abstract_alpha_complex {
return alpha_complex_.create_complex(*simplex_tree, max_alpha_square, exact_version_, default_filtration_value);
}
+ virtual std::size_t num_vertices() const {
+ return alpha_complex_.num_vertices();
+ }
+
private:
bool exact_version_;
- Alpha_complex<Kernel> alpha_complex_;
+ Alpha_complex<Kernel, Weighted> alpha_complex_;
};
-class Inexact_Alphacomplex_dD final : public Abstract_alpha_complex {
+template <bool Weighted = false>
+class Inexact_alpha_complex_dD final : public Abstract_alpha_complex {
private:
using Kernel = CGAL::Epick_d<CGAL::Dynamic_dimension_tag>;
- using Point = typename Kernel::Point_d;
+ using Bare_point = typename Kernel::Point_d;
+ using Point = std::conditional_t<Weighted, typename Kernel::Weighted_point_d,
+ typename Kernel::Point_d>;
public:
- Inexact_Alphacomplex_dD(const std::vector<std::vector<double>>& points, bool exact_version)
- : exact_version_(exact_version),
- alpha_complex_(boost::adaptors::transform(points, pt_cython_to_cgal<Point>)) {
+ Inexact_alpha_complex_dD(const std::vector<std::vector<double>>& points)
+ : alpha_complex_(boost::adaptors::transform(points, pt_cython_to_cgal<Bare_point>)) {
+ }
+
+ Inexact_alpha_complex_dD(const std::vector<std::vector<double>>& points, const std::vector<double>& weights)
+ : alpha_complex_(boost::adaptors::transform(points, pt_cython_to_cgal<Bare_point>), weights) {
}
virtual std::vector<double> get_point(int vh) override {
- Point const& point = alpha_complex_.get_point(vh);
- return pt_cgal_to_cython(point);
+ // Can be a Weighted or a Bare point in function of Weighted
+ return Point_cgal_to_cython<Point, Weighted>()(alpha_complex_.get_point(vh));
}
virtual bool create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square,
bool default_filtration_value) override {
- return alpha_complex_.create_complex(*simplex_tree, max_alpha_square, exact_version_, default_filtration_value);
+ return alpha_complex_.create_complex(*simplex_tree, max_alpha_square, false, default_filtration_value);
}
- private:
- bool exact_version_;
- Alpha_complex<Kernel> alpha_complex_;
-};
-
-template <complexity Complexity>
-class Alphacomplex_3D final : public Abstract_alpha_complex {
- private:
- using Point = typename Alpha_complex_3d<Complexity, false, false>::Bare_point_3;
-
- static Point pt_cython_to_cgal_3(std::vector<double> const& vec) {
- return Point(vec[0], vec[1], vec[2]);
- }
-
- public:
- Alphacomplex_3D(const std::vector<std::vector<double>>& points)
- : alpha_complex_(boost::adaptors::transform(points, pt_cython_to_cgal_3)) {
- }
-
- virtual std::vector<double> get_point(int vh) override {
- Point const& point = alpha_complex_.get_point(vh);
- return pt_cgal_to_cython(point);
- }
-
- virtual bool create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square,
- bool default_filtration_value) override {
- return alpha_complex_.create_complex(*simplex_tree, max_alpha_square);
+ virtual std::size_t num_vertices() const {
+ return alpha_complex_.num_vertices();
}
private:
- Alpha_complex_3d<Complexity, false, false> alpha_complex_;
+ Alpha_complex<Kernel, Weighted> alpha_complex_;
};
-
} // namespace alpha_complex
} // namespace Gudhi
diff --git a/src/python/include/Alpha_complex_interface.h b/src/python/include/Alpha_complex_interface.h
index 23be194d..671af4a4 100644
--- a/src/python/include/Alpha_complex_interface.h
+++ b/src/python/include/Alpha_complex_interface.h
@@ -27,10 +27,23 @@ namespace alpha_complex {
class Alpha_complex_interface {
public:
- Alpha_complex_interface(const std::vector<std::vector<double>>& points, bool fast_version, bool exact_version)
- : points_(points),
- fast_version_(fast_version),
- exact_version_(exact_version) {
+ Alpha_complex_interface(const std::vector<std::vector<double>>& points,
+ const std::vector<double>& weights,
+ bool fast_version, bool exact_version) {
+ const bool weighted = (weights.size() > 0);
+ if (fast_version) {
+ if (weighted) {
+ alpha_ptr_ = std::make_unique<Inexact_alpha_complex_dD<true>>(points, weights);
+ } else {
+ alpha_ptr_ = std::make_unique<Inexact_alpha_complex_dD<false>>(points);
+ }
+ } else {
+ if (weighted) {
+ alpha_ptr_ = std::make_unique<Exact_alpha_complex_dD<true>>(points, weights, exact_version);
+ } else {
+ alpha_ptr_ = std::make_unique<Exact_alpha_complex_dD<false>>(points, exact_version);
+ }
+ }
}
std::vector<double> get_point(int vh) {
@@ -39,38 +52,13 @@ class Alpha_complex_interface {
void create_simplex_tree(Simplex_tree_interface<>* simplex_tree, double max_alpha_square,
bool default_filtration_value) {
- if (points_.size() > 0) {
- std::size_t dimension = points_[0].size();
- if (dimension == 3 && !default_filtration_value) {
- if (fast_version_)
- alpha_ptr_ = std::make_unique<Alphacomplex_3D<Gudhi::alpha_complex::complexity::FAST>>(points_);
- else if (exact_version_)
- alpha_ptr_ = std::make_unique<Alphacomplex_3D<Gudhi::alpha_complex::complexity::EXACT>>(points_);
- else
- alpha_ptr_ = std::make_unique<Alphacomplex_3D<Gudhi::alpha_complex::complexity::SAFE>>(points_);
- if (!alpha_ptr_->create_simplex_tree(simplex_tree, max_alpha_square, default_filtration_value)) {
- // create_simplex_tree will fail if all points are on a plane - Retry with dD by setting dimension to 2
- dimension--;
- alpha_ptr_.reset();
- }
- }
- // Not ** else ** because we have to take into account if 3d fails
- if (dimension != 3 || default_filtration_value) {
- if (fast_version_) {
- alpha_ptr_ = std::make_unique<Inexact_Alphacomplex_dD>(points_, exact_version_);
- } else {
- alpha_ptr_ = std::make_unique<Exact_Alphacomplex_dD>(points_, exact_version_);
- }
- alpha_ptr_->create_simplex_tree(simplex_tree, max_alpha_square, default_filtration_value);
- }
- }
+ // Nothing to be done in case of an empty point set
+ if (alpha_ptr_->num_vertices() > 0)
+ alpha_ptr_->create_simplex_tree(simplex_tree, max_alpha_square, default_filtration_value);
}
private:
std::unique_ptr<Abstract_alpha_complex> alpha_ptr_;
- std::vector<std::vector<double>> points_;
- bool fast_version_;
- bool exact_version_;
};
} // namespace alpha_complex
diff --git a/src/python/test/test_alpha_complex.py b/src/python/test/test_alpha_complex.py
index 814f8289..f15284f3 100755
--- a/src/python/test/test_alpha_complex.py
+++ b/src/python/test/test_alpha_complex.py
@@ -8,10 +8,12 @@
- YYYY/MM Author: Description of the modification
"""
-import gudhi as gd
+from gudhi import AlphaComplex
import math
import numpy as np
import pytest
+import warnings
+
try:
# python3
from itertools import zip_longest
@@ -19,22 +21,24 @@ except ImportError:
# python2
from itertools import izip_longest as zip_longest
-__author__ = "Vincent Rouvreau"
-__copyright__ = "Copyright (C) 2016 Inria"
-__license__ = "MIT"
def _empty_alpha(precision):
- alpha_complex = gd.AlphaComplex(points=[[0, 0]], precision = precision)
+ alpha_complex = AlphaComplex(precision = precision)
+ assert alpha_complex.__is_defined() == True
+
+def _one_2d_point_alpha(precision):
+ alpha_complex = AlphaComplex(points=[[0, 0]], precision = precision)
assert alpha_complex.__is_defined() == True
def test_empty_alpha():
for precision in ['fast', 'safe', 'exact']:
_empty_alpha(precision)
+ _one_2d_point_alpha(precision)
def _infinite_alpha(precision):
point_list = [[0, 0], [1, 0], [0, 1], [1, 1]]
- alpha_complex = gd.AlphaComplex(points=point_list, precision = precision)
+ alpha_complex = AlphaComplex(points=point_list, precision = precision)
assert alpha_complex.__is_defined() == True
simplex_tree = alpha_complex.create_simplex_tree()
@@ -69,18 +73,9 @@ def _infinite_alpha(precision):
assert point_list[1] == alpha_complex.get_point(1)
assert point_list[2] == alpha_complex.get_point(2)
assert point_list[3] == alpha_complex.get_point(3)
- try:
- alpha_complex.get_point(4) == []
- except IndexError:
- pass
- else:
- assert False
- try:
- alpha_complex.get_point(125) == []
- except IndexError:
- pass
- else:
- assert False
+
+ with pytest.raises(IndexError):
+ alpha_complex.get_point(len(point_list))
def test_infinite_alpha():
for precision in ['fast', 'safe', 'exact']:
@@ -88,7 +83,7 @@ def test_infinite_alpha():
def _filtered_alpha(precision):
point_list = [[0, 0], [1, 0], [0, 1], [1, 1]]
- filtered_alpha = gd.AlphaComplex(points=point_list, precision = precision)
+ filtered_alpha = AlphaComplex(points=point_list, precision = precision)
simplex_tree = filtered_alpha.create_simplex_tree(max_alpha_square=0.25)
@@ -99,18 +94,9 @@ def _filtered_alpha(precision):
assert point_list[1] == filtered_alpha.get_point(1)
assert point_list[2] == filtered_alpha.get_point(2)
assert point_list[3] == filtered_alpha.get_point(3)
- try:
- filtered_alpha.get_point(4) == []
- except IndexError:
- pass
- else:
- assert False
- try:
- filtered_alpha.get_point(125) == []
- except IndexError:
- pass
- else:
- assert False
+
+ with pytest.raises(IndexError):
+ filtered_alpha.get_point(len(point_list))
assert list(simplex_tree.get_filtration()) == [
([0], 0.0),
@@ -141,10 +127,10 @@ def _safe_alpha_persistence_comparison(precision):
embedding2 = [[signal[i], delayed[i]] for i in range(len(time))]
#build alpha complex and simplex tree
- alpha_complex1 = gd.AlphaComplex(points=embedding1, precision = precision)
+ alpha_complex1 = AlphaComplex(points=embedding1, precision = precision)
simplex_tree1 = alpha_complex1.create_simplex_tree()
- alpha_complex2 = gd.AlphaComplex(points=embedding2, precision = precision)
+ alpha_complex2 = AlphaComplex(points=embedding2, precision = precision)
simplex_tree2 = alpha_complex2.create_simplex_tree()
diag1 = simplex_tree1.persistence()
@@ -162,7 +148,7 @@ def test_safe_alpha_persistence_comparison():
def _delaunay_complex(precision):
point_list = [[0, 0], [1, 0], [0, 1], [1, 1]]
- filtered_alpha = gd.AlphaComplex(points=point_list, precision = precision)
+ filtered_alpha = AlphaComplex(points=point_list, precision = precision)
simplex_tree = filtered_alpha.create_simplex_tree(default_filtration_value = True)
@@ -173,18 +159,11 @@ def _delaunay_complex(precision):
assert point_list[1] == filtered_alpha.get_point(1)
assert point_list[2] == filtered_alpha.get_point(2)
assert point_list[3] == filtered_alpha.get_point(3)
- try:
- filtered_alpha.get_point(4) == []
- except IndexError:
- pass
- else:
- assert False
- try:
- filtered_alpha.get_point(125) == []
- except IndexError:
- pass
- else:
- assert False
+
+ with pytest.raises(IndexError):
+ filtered_alpha.get_point(4)
+ with pytest.raises(IndexError):
+ filtered_alpha.get_point(125)
for filtered_value in simplex_tree.get_filtration():
assert math.isnan(filtered_value[1])
@@ -198,7 +177,13 @@ def test_delaunay_complex():
_delaunay_complex(precision)
def _3d_points_on_a_plane(precision, default_filtration_value):
- alpha = gd.AlphaComplex(off_file='alphacomplexdoc.off', precision = precision)
+ alpha = AlphaComplex(points = [[1.0, 1.0 , 0.0],
+ [7.0, 0.0 , 0.0],
+ [4.0, 6.0 , 0.0],
+ [9.0, 6.0 , 0.0],
+ [0.0, 14.0, 0.0],
+ [2.0, 19.0, 0.0],
+ [9.0, 17.0, 0.0]], precision = precision)
simplex_tree = alpha.create_simplex_tree(default_filtration_value = default_filtration_value)
assert simplex_tree.dimension() == 2
@@ -206,28 +191,16 @@ def _3d_points_on_a_plane(precision, default_filtration_value):
assert simplex_tree.num_simplices() == 25
def test_3d_points_on_a_plane():
- off_file = open("alphacomplexdoc.off", "w")
- off_file.write("OFF \n" \
- "7 0 0 \n" \
- "1.0 1.0 0.0\n" \
- "7.0 0.0 0.0\n" \
- "4.0 6.0 0.0\n" \
- "9.0 6.0 0.0\n" \
- "0.0 14.0 0.0\n" \
- "2.0 19.0 0.0\n" \
- "9.0 17.0 0.0\n" )
- off_file.close()
-
for default_filtration_value in [True, False]:
for precision in ['fast', 'safe', 'exact']:
_3d_points_on_a_plane(precision, default_filtration_value)
def _3d_tetrahedrons(precision):
points = 10*np.random.rand(10, 3)
- alpha = gd.AlphaComplex(points=points, precision = precision)
+ alpha = AlphaComplex(points = points, precision = precision)
st_alpha = alpha.create_simplex_tree(default_filtration_value = False)
# New AlphaComplex for get_point to work
- delaunay = gd.AlphaComplex(points=points, precision = precision)
+ delaunay = AlphaComplex(points = points, precision = precision)
st_delaunay = delaunay.create_simplex_tree(default_filtration_value = True)
delaunay_tetra = []
@@ -256,3 +229,60 @@ def _3d_tetrahedrons(precision):
def test_3d_tetrahedrons():
for precision in ['fast', 'safe', 'exact']:
_3d_tetrahedrons(precision)
+
+def test_off_file_deprecation_warning():
+ off_file = open("alphacomplexdoc.off", "w")
+ off_file.write("OFF \n" \
+ "7 0 0 \n" \
+ "1.0 1.0 0.0\n" \
+ "7.0 0.0 0.0\n" \
+ "4.0 6.0 0.0\n" \
+ "9.0 6.0 0.0\n" \
+ "0.0 14.0 0.0\n" \
+ "2.0 19.0 0.0\n" \
+ "9.0 17.0 0.0\n" )
+ off_file.close()
+
+ with pytest.warns(DeprecationWarning):
+ alpha = AlphaComplex(off_file="alphacomplexdoc.off")
+
+def test_non_existing_off_file():
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(FileNotFoundError):
+ alpha = AlphaComplex(off_file="pouetpouettralala.toubiloubabdou")
+
+def test_inconsistency_points_and_weights():
+ points = [[1.0, 1.0 , 0.0],
+ [7.0, 0.0 , 0.0],
+ [4.0, 6.0 , 0.0],
+ [9.0, 6.0 , 0.0],
+ [0.0, 14.0, 0.0],
+ [2.0, 19.0, 0.0],
+ [9.0, 17.0, 0.0]]
+ with pytest.raises(ValueError):
+ # 7 points, 8 weights, on purpose
+ alpha = AlphaComplex(points = points,
+ weights = [1., 2., 3., 4., 5., 6., 7., 8.])
+
+ with pytest.raises(ValueError):
+ # 7 points, 6 weights, on purpose
+ alpha = AlphaComplex(points = points,
+ weights = [1., 2., 3., 4., 5., 6.])
+
+def _weighted_doc_example(precision):
+ stree = AlphaComplex(points=[[ 1., -1., -1.],
+ [-1., 1., -1.],
+ [-1., -1., 1.],
+ [ 1., 1., 1.],
+ [ 2., 2., 2.]],
+ weights = [4., 4., 4., 4., 1.],
+ precision = precision).create_simplex_tree()
+
+ assert stree.filtration([0, 1, 2, 3]) == pytest.approx(-1.)
+ assert stree.filtration([0, 1, 3, 4]) == pytest.approx(95.)
+ assert stree.filtration([0, 2, 3, 4]) == pytest.approx(95.)
+ assert stree.filtration([1, 2, 3, 4]) == pytest.approx(95.)
+
+def test_weighted_doc_example():
+ for precision in ['fast', 'safe', 'exact']:
+ _weighted_doc_example(precision)
diff --git a/src/python/test/test_betti_curve_representations.py b/src/python/test/test_betti_curve_representations.py
new file mode 100755
index 00000000..6a45da4d
--- /dev/null
+++ b/src/python/test/test_betti_curve_representations.py
@@ -0,0 +1,59 @@
+import numpy as np
+import scipy.interpolate
+import pytest
+
+from gudhi.representations.vector_methods import BettiCurve
+
+def test_betti_curve_is_irregular_betti_curve_followed_by_interpolation():
+ m = 10
+ n = 1000
+ pinf = 0.05
+ pzero = 0.05
+ res = 100
+
+ pds = []
+ for i in range(0, m):
+ pd = np.zeros((n, 2))
+ pd[:, 0] = np.random.uniform(0, 10, n)
+ pd[:, 1] = np.random.uniform(pd[:, 0], 10, n)
+ pd[np.random.uniform(0, 1, n) < pzero, 0] = 0
+ pd[np.random.uniform(0, 1, n) < pinf, 1] = np.inf
+ pds.append(pd)
+
+ bc = BettiCurve(resolution=None, predefined_grid=None)
+ bc.fit(pds)
+ bettis = bc.transform(pds)
+
+ bc2 = BettiCurve(resolution=None, predefined_grid=None)
+ bettis2 = bc2.fit_transform(pds)
+ assert((bc2.grid_ == bc.grid_).all())
+ assert((bettis2 == bettis).all())
+
+ for i in range(0, m):
+ grid = np.linspace(pds[i][np.isfinite(pds[i])].min(), pds[i][np.isfinite(pds[i])].max() + 1, res)
+ bc_gridded = BettiCurve(predefined_grid=grid)
+ bc_gridded.fit([])
+ bettis_gridded = bc_gridded(pds[i])
+
+ interp = scipy.interpolate.interp1d(bc.grid_, bettis[i, :], kind="previous", fill_value="extrapolate")
+ bettis_interp = np.array(interp(grid), dtype=int)
+ assert((bettis_interp == bettis_gridded).all())
+
+
+def test_empty_with_predefined_grid():
+ random_grid = np.sort(np.random.uniform(0, 1, 100))
+ bc = BettiCurve(predefined_grid=random_grid)
+ bettis = bc.fit_transform([])
+ assert((bc.grid_ == random_grid).all())
+ assert((bettis == 0).all())
+
+
+def test_empty():
+ bc = BettiCurve(resolution=None, predefined_grid=None)
+ bettis = bc.fit_transform([])
+ assert(bc.grid_ == [-np.inf])
+ assert((bettis == 0).all())
+
+def test_wrong_value_of_predefined_grid():
+ with pytest.raises(ValueError):
+ BettiCurve(predefined_grid=[1, 2, 3])
diff --git a/src/python/test/test_reader_utils.py b/src/python/test/test_reader_utils.py
index e96e0569..fdfddc4b 100755
--- a/src/python/test/test_reader_utils.py
+++ b/src/python/test/test_reader_utils.py
@@ -8,8 +8,9 @@
- YYYY/MM Author: Description of the modification
"""
-import gudhi
+import gudhi as gd
import numpy as np
+from pytest import raises
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2017 Inria"
@@ -18,7 +19,7 @@ __license__ = "MIT"
def test_non_existing_csv_file():
# Try to open a non existing file
- matrix = gudhi.read_lower_triangular_matrix_from_csv_file(
+ matrix = gd.read_lower_triangular_matrix_from_csv_file(
csv_file="pouetpouettralala.toubiloubabdou"
)
assert matrix == []
@@ -29,7 +30,7 @@ def test_full_square_distance_matrix_csv_file():
test_file = open("full_square_distance_matrix.csv", "w")
test_file.write("0;1;2;3;\n1;0;4;5;\n2;4;0;6;\n3;5;6;0;")
test_file.close()
- matrix = gudhi.read_lower_triangular_matrix_from_csv_file(
+ matrix = gd.read_lower_triangular_matrix_from_csv_file(
csv_file="full_square_distance_matrix.csv", separator=";"
)
assert matrix == [[], [1.0], [2.0, 4.0], [3.0, 5.0, 6.0]]
@@ -40,7 +41,7 @@ def test_lower_triangular_distance_matrix_csv_file():
test_file = open("lower_triangular_distance_matrix.csv", "w")
test_file.write("\n1,\n2,3,\n4,5,6,\n7,8,9,10,")
test_file.close()
- matrix = gudhi.read_lower_triangular_matrix_from_csv_file(
+ matrix = gd.read_lower_triangular_matrix_from_csv_file(
csv_file="lower_triangular_distance_matrix.csv", separator=","
)
assert matrix == [[], [1.0], [2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0, 10.0]]
@@ -48,11 +49,11 @@ def test_lower_triangular_distance_matrix_csv_file():
def test_non_existing_persistence_file():
# Try to open a non existing file
- persistence = gudhi.read_persistence_intervals_grouped_by_dimension(
+ persistence = gd.read_persistence_intervals_grouped_by_dimension(
persistence_file="pouetpouettralala.toubiloubabdou"
)
assert persistence == []
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="pouetpouettralala.toubiloubabdou", only_this_dim=1
)
np.testing.assert_array_equal(persistence, [])
@@ -65,21 +66,21 @@ def test_read_persistence_intervals_without_dimension():
"# Simple persistence diagram without dimension\n2.7 3.7\n9.6 14.\n34.2 34.974\n3. inf"
)
test_file.close()
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_without_dimension.pers"
)
np.testing.assert_array_equal(
persistence, [(2.7, 3.7), (9.6, 14.0), (34.2, 34.974), (3.0, float("Inf"))]
)
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_without_dimension.pers", only_this_dim=0
)
np.testing.assert_array_equal(persistence, [])
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_without_dimension.pers", only_this_dim=1
)
np.testing.assert_array_equal(persistence, [])
- persistence = gudhi.read_persistence_intervals_grouped_by_dimension(
+ persistence = gd.read_persistence_intervals_grouped_by_dimension(
persistence_file="persistence_intervals_without_dimension.pers"
)
assert persistence == {
@@ -94,29 +95,29 @@ def test_read_persistence_intervals_with_dimension():
"# Simple persistence diagram with dimension\n0 2.7 3.7\n1 9.6 14.\n3 34.2 34.974\n1 3. inf"
)
test_file.close()
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_with_dimension.pers"
)
np.testing.assert_array_equal(
persistence, [(2.7, 3.7), (9.6, 14.0), (34.2, 34.974), (3.0, float("Inf"))]
)
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_with_dimension.pers", only_this_dim=0
)
np.testing.assert_array_equal(persistence, [(2.7, 3.7)])
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_with_dimension.pers", only_this_dim=1
)
np.testing.assert_array_equal(persistence, [(9.6, 14.0), (3.0, float("Inf"))])
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_with_dimension.pers", only_this_dim=2
)
np.testing.assert_array_equal(persistence, [])
- persistence = gudhi.read_persistence_intervals_in_dimension(
+ persistence = gd.read_persistence_intervals_in_dimension(
persistence_file="persistence_intervals_with_dimension.pers", only_this_dim=3
)
np.testing.assert_array_equal(persistence, [(34.2, 34.974)])
- persistence = gudhi.read_persistence_intervals_grouped_by_dimension(
+ persistence = gd.read_persistence_intervals_grouped_by_dimension(
persistence_file="persistence_intervals_with_dimension.pers"
)
assert persistence == {
diff --git a/src/python/test/test_representations.py b/src/python/test/test_representations.py
index 93461f1e..d219ce7a 100755
--- a/src/python/test/test_representations.py
+++ b/src/python/test/test_representations.py
@@ -105,7 +105,6 @@ def test_dummy_atol():
from gudhi.representations.vector_methods import BettiCurve
-
def test_infinity():
a = np.array([[1.0, 8.0], [2.0, np.inf], [3.0, 4.0]])
c = BettiCurve(20, [0.0, 10.0])(a)
@@ -113,7 +112,6 @@ def test_infinity():
assert c[7] == 3
assert c[9] == 2
-
def test_preprocessing_empty_diagrams():
empty_diag = np.empty(shape = [0, 2])
assert not np.any(BirthPersistenceTransform()(empty_diag))
@@ -169,3 +167,4 @@ def test_kernel_empty_diagrams():
# PersistenceScaleSpaceKernel(kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])))(empty_diag, empty_diag)
# PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1.)(empty_diag, empty_diag)
# PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1., kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])))(empty_diag, empty_diag)
+