From a2e8ac11e22be7edff1244fbdd848ef6cbbd6903 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 15 Dec 2020 22:49:48 +0100 Subject: Next release is 3.5.0 --- .github/next_release.md | 22 +++++++++------------- CMakeGUDHIVersion.txt | 4 ++-- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/.github/next_release.md b/.github/next_release.md index 5a74966d..26143b0e 100644 --- a/.github/next_release.md +++ b/.github/next_release.md @@ -1,23 +1,19 @@ -We are pleased to announce the release 3.4.0 of the GUDHI library. +We are pleased to announce the release 3.5.0 of the GUDHI library. -As a major new feature, the GUDHI library now offers dD weighted alpha complex, pip and conda packages for Python 3.9. +As a major new feature, the GUDHI library now offers ... -We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.4.0.tar.gz). +We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz). -Below is a list of changes made since GUDHI 3.3.0: +Below is a list of changes made since GUDHI 3.4.0: -- [Alpha complex](https://gudhi.inria.fr/doc/latest/group__alpha__complex.html) - - the C++ weighted version for alpha complex is now available in any dimension D. +- [Module](link) + - ... -- Simplex tree [C++](https://gudhi.inria.fr/doc/latest/class_gudhi_1_1_simplex__tree.html) [Python](http://gudhi.gforge.inria.fr/python/latest/simplex_tree_ref.html) - - A new method to reset the filtrations - - A new method to get the boundaries of a simplex - -- [Subsampling](https://gudhi.inria.fr/doc/latest/group__subsampling.html) - - The C++ function `choose_n_farthest_points()` now takes a distance function instead of a kernel as first argument, users can replace `k` with `k.squared_distance_d_object()` in each call in their code. +- [Module](link) + - ... - Miscellaneous - - The [list of bugs that were solved since GUDHI-3.3.0](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.4.0+is%3Aclosed) is available on GitHub. + - The [list of bugs that were solved since GUDHI-3.4.0](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.5.0+is%3Aclosed) is available on GitHub. All modules are distributed under the terms of the MIT license. However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details. diff --git a/CMakeGUDHIVersion.txt b/CMakeGUDHIVersion.txt index e191ad96..db9b243b 100644 --- a/CMakeGUDHIVersion.txt +++ b/CMakeGUDHIVersion.txt @@ -1,8 +1,8 @@ # Must be conform to pep440 - https://www.python.org/dev/peps/pep-0440/#pre-releases set (GUDHI_MAJOR_VERSION 3) -set (GUDHI_MINOR_VERSION 4) +set (GUDHI_MINOR_VERSION 5) # GUDHI_PATCH_VERSION can be 'ZaN' for Alpha release, 'ZbN' for Beta release, 'ZrcN' for release candidate or 'Z' for a final release. -set (GUDHI_PATCH_VERSION 0) +set (GUDHI_PATCH_VERSION 0rc1) set(GUDHI_VERSION ${GUDHI_MAJOR_VERSION}.${GUDHI_MINOR_VERSION}.${GUDHI_PATCH_VERSION}) message(STATUS "GUDHI version : ${GUDHI_VERSION}") -- cgit v1.2.3 From 32388973293692b544de0db976abc800178a67ed Mon Sep 17 00:00:00 2001 From: Umberto Lupo <46537483+ulupo@users.noreply.github.com> Date: Sat, 19 Dec 2020 10:16:02 +0100 Subject: Docstring improvements in RipsComplex - create_simplex_tree method referred to the Delaunay triangulation instead of the flag complex - "rips" was not capitalized - "double" was used in the docs but only "float" (which has double precision) is a Python type --- src/python/gudhi/rips_complex.pyx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/python/gudhi/rips_complex.pyx b/src/python/gudhi/rips_complex.pyx index 72e82c79..c3470292 100644 --- a/src/python/gudhi/rips_complex.pyx +++ b/src/python/gudhi/rips_complex.pyx @@ -49,13 +49,13 @@ cdef class RipsComplex: :type max_edge_length: float :param points: A list of points in d-Dimension. - :type points: list of list of double + :type points: list of list of float Or :param distance_matrix: A distance matrix (full square or lower triangular). - :type points: list of list of double + :type points: list of list of float And in both cases @@ -89,10 +89,10 @@ cdef class RipsComplex: def create_simplex_tree(self, max_dimension=1): """ - :param max_dimension: graph expansion for rips until this given maximal + :param max_dimension: graph expansion for Rips until this given maximal dimension. :type max_dimension: int - :returns: A simplex tree created from the Delaunay Triangulation. + :returns: A simplex tree encoding the Vietoris–Rips filtration. :rtype: SimplexTree """ stree = SimplexTree() -- cgit v1.2.3 From 3ffba81f566ccb05388cfabb5604befcdcfee1e5 Mon Sep 17 00:00:00 2001 From: Gard Spreemann Date: Tue, 29 Dec 2020 10:54:12 +0100 Subject: Fix building with CGAL 5.2. This is based on a similar fix for the alpha complex code. --- src/Tangential_complex/include/gudhi/Tangential_complex.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/Tangential_complex/include/gudhi/Tangential_complex.h b/src/Tangential_complex/include/gudhi/Tangential_complex.h index f007bdd5..f3491f91 100644 --- a/src/Tangential_complex/include/gudhi/Tangential_complex.h +++ b/src/Tangential_complex/include/gudhi/Tangential_complex.h @@ -954,7 +954,11 @@ class Tangential_complex { // Triangulation's traits functor & objects typename Tr_traits::Compute_weight_d point_weight = local_tr_traits.compute_weight_d_object(); +#if CGAL_VERSION_NR < 1050200000 typename Tr_traits::Power_center_d power_center = local_tr_traits.power_center_d_object(); +#else + typename Tr_traits::Construct_power_sphere_d power_center = local_tr_traits.construct_power_sphere_d_object(); +#endif //*************************************************** // Build a minimal triangulation in the tangent space @@ -1100,7 +1104,11 @@ class Tangential_complex { std::size_t closest_pt_index = updated_pts_ds.k_nearest_neighbors(center_point, 1, false).begin()->first; typename K::Construct_weighted_point_d k_constr_wp = m_k.construct_weighted_point_d_object(); +#if CGAL_VERSION_NR < 1050200000 typename K::Power_distance_d k_power_dist = m_k.power_distance_d_object(); +#else + typename K::Compute_power_product_d k_power_dist = m_k.compute_power_product_d_object(); +#endif // Construct a weighted point equivalent to the star sphere Weighted_point star_sphere = k_constr_wp(compute_perturbed_point(i), m_squared_star_spheres_radii_incl_margin[i]); -- cgit v1.2.3 From 3a26b7b867c36ece01b58ddcb0f5031cfdd223e4 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 5 Jan 2021 07:32:27 +0100 Subject: gudhi version 3.4.1rc1 --- CMakeGUDHIVersion.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeGUDHIVersion.txt b/CMakeGUDHIVersion.txt index db9b243b..11c8766b 100644 --- a/CMakeGUDHIVersion.txt +++ b/CMakeGUDHIVersion.txt @@ -1,8 +1,8 @@ # Must be conform to pep440 - https://www.python.org/dev/peps/pep-0440/#pre-releases set (GUDHI_MAJOR_VERSION 3) -set (GUDHI_MINOR_VERSION 5) +set (GUDHI_MINOR_VERSION 4) # GUDHI_PATCH_VERSION can be 'ZaN' for Alpha release, 'ZbN' for Beta release, 'ZrcN' for release candidate or 'Z' for a final release. -set (GUDHI_PATCH_VERSION 0rc1) +set (GUDHI_PATCH_VERSION 1rc1) set(GUDHI_VERSION ${GUDHI_MAJOR_VERSION}.${GUDHI_MINOR_VERSION}.${GUDHI_PATCH_VERSION}) message(STATUS "GUDHI version : ${GUDHI_VERSION}") -- cgit v1.2.3 From 5f6bf9af6b9036f41a3430527e73a3e1cd733e2b Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 5 Jan 2021 18:13:44 +0100 Subject: Remove sphinx tests as it fails --- .appveyor.yml | 2 +- azure-pipelines.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.appveyor.yml b/.appveyor.yml index a257debc..06de5b14 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -60,7 +60,7 @@ build_script: cd src/python & type setup.py & MSBuild Cython.sln /m /p:Configuration=Release /p:Platform=x64 & - ctest -j 1 --output-on-failure -C Release + ctest -j 1 --output-on-failure -C Release -E sphinx ) else ( MSBuild GUDHIdev.sln /m /p:Configuration=Release /p:Platform=x64 & ctest -j 1 --output-on-failure -C Release -E diff_files diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 8e88cab5..64f3d141 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -33,5 +33,5 @@ jobs: cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 .. make -j 4 make doxygen - ctest -j 4 --output-on-failure # -E sphinx remove sphinx build as it fails + ctest -j 4 --output-on-failure -E sphinx # remove sphinx build as it fails displayName: 'Build, test and documentation generation' -- cgit v1.2.3 From cd4d24d3bdf6d39582ab74ffedf94f2f60a734ab Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 7 Jan 2021 08:07:31 +0100 Subject: CGAL 5.2 --- Dockerfile_for_circleci_image | 8 ++++---- Dockerfile_for_pip | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Dockerfile_for_circleci_image b/Dockerfile_for_circleci_image index ec1b8ff8..f20602b0 100644 --- a/Dockerfile_for_circleci_image +++ b/Dockerfile_for_circleci_image @@ -50,14 +50,14 @@ RUN apt-get install -y make \ pkg-config \ curl -RUN curl -LO "https://github.com/CGAL/cgal/releases/download/v5.1/CGAL-5.1.tar.xz" \ - && tar xf CGAL-5.1.tar.xz \ +RUN curl -LO "https://github.com/CGAL/cgal/releases/download/v5.2/CGAL-5.2.tar.xz" \ + && tar xf CGAL-5.2.tar.xz \ && mkdir build \ && cd build \ - && cmake -DCMAKE_BUILD_TYPE=Release ../CGAL-5.1/ \ + && cmake -DCMAKE_BUILD_TYPE=Release ../CGAL-5.2/ \ && make install \ && cd .. \ - && rm -rf build CGAL-5.1 + && rm -rf build CGAL-5.2 ADD .github/build-requirements.txt / ADD .github/test-requirements.txt / diff --git a/Dockerfile_for_pip b/Dockerfile_for_pip index d5ae6417..ada39647 100644 --- a/Dockerfile_for_pip +++ b/Dockerfile_for_pip @@ -24,14 +24,14 @@ RUN wget https://dl.bintray.com/boostorg/release/1.73.0/source/boost_1_73_0.tar. && cd .. \ && rm -rf boost -RUN wget https://github.com/CGAL/cgal/releases/download/v5.1/CGAL-5.1.tar.xz \ - && tar xf CGAL-5.1.tar.xz \ +RUN wget https://github.com/CGAL/cgal/releases/download/v5.2/CGAL-5.2.tar.xz \ + && tar xf CGAL-5.2.tar.xz \ && mkdir build \ && cd build \ - && /opt/cmake/bin/cmake -DCMAKE_BUILD_TYPE=Release ../CGAL-5.1/ \ + && /opt/cmake/bin/cmake -DCMAKE_BUILD_TYPE=Release ../CGAL-5.2/ \ && make install \ && cd .. \ - && rm -rf build CGAL-5.1 + && rm -rf build CGAL-5.2 ADD .github/build-requirements.txt / -- cgit v1.2.3 From b6d635279d6d6d412452955bb210ed99224aa4b1 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Fri, 8 Jan 2021 16:22:42 +0100 Subject: sphinx shall compile on ws and osx as sphinxcontrib-bibtex is set to version 1.0.0. Add a test strategy file. fix docker warnings at build --- .appveyor.yml | 2 +- .github/for_maintainers/tests_strategy.md | 90 ++++++++++++++++++++++++++++++ .github/test-requirements.txt | 2 +- Dockerfile_for_circleci_image | 2 +- Dockerfile_for_circleci_image_without_cgal | 55 ++++++++++++++++++ Dockerfile_gudhi_installation | 2 +- azure-pipelines.yml | 2 +- 7 files changed, 150 insertions(+), 5 deletions(-) create mode 100644 .github/for_maintainers/tests_strategy.md create mode 100644 Dockerfile_for_circleci_image_without_cgal diff --git a/.appveyor.yml b/.appveyor.yml index 06de5b14..a257debc 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -60,7 +60,7 @@ build_script: cd src/python & type setup.py & MSBuild Cython.sln /m /p:Configuration=Release /p:Platform=x64 & - ctest -j 1 --output-on-failure -C Release -E sphinx + ctest -j 1 --output-on-failure -C Release ) else ( MSBuild GUDHIdev.sln /m /p:Configuration=Release /p:Platform=x64 & ctest -j 1 --output-on-failure -C Release -E diff_files diff --git a/.github/for_maintainers/tests_strategy.md b/.github/for_maintainers/tests_strategy.md new file mode 100644 index 00000000..9c181740 --- /dev/null +++ b/.github/for_maintainers/tests_strategy.md @@ -0,0 +1,90 @@ +# Tests strategy + +This document tries to sum up the tests strategy that has been put in place for gudhi continuous integration. + +The aim is to help maintainers to anticipate third parties modifications, updates. + +## Builds + +### Linux + +As all the third parties are already installed (thanks to docker), the compilations has been seperated by categories to be parallelized: + +* examples (C++) +* tests (C++) +* utils (C++) +* doxygen (C++ documentation that is available in the artefacts) +* python (including documentation and code coverage that are available in the artefacts) + +(cf. `.circleci/config.yml`) + +These build categories are done with and without CGAL, and, with and without Eigen to be sure the users won't be annoyed if a third party is missing. + +With CGAL and with Eigen builds are performed inside the docker image `gudhi/ci_for_gudhi` based on `Dockerfile_for_circleci_image` file. +Without CGAL, and, with or without Eigen builds are performed inside the docker image `gudhi/ci_for_gudhi_wo_cgal` based on `Dockerfile_for_circleci_image_without_cgal` file. + +#### Update docker images + +C++ third parties installation are done thanks to apt on Ubuntu latest LTS. + +Docker images need to be rebuild and push each time `.github/build-requirements`, `.github/test-requirements`, when a new third party is added, when a new CGAL version improves gudhi performances, ... + +```bash +docker build -f Dockerfile_for_circleci_image -t gudhi/ci_for_gudhi:latest . +docker build -f Dockerfile_for_circleci_image_without_cgal -t gudhi/ci_for_gudhi_wo_cgal:latest . +docker login # requires some specific rights on https://hub.docker.com/u/gudhi/repository/docker/gudhi +docker push gudhi/ci_for_gudhi:latest +docker push gudhi/ci_for_gudhi_wo_cgal:latest +``` + +### Windows + +The compilations has been seperated by categories to be parallelized, but I don't know why builds are not run in parallel: + +* examples (C++) +* tests (C++) +* utils (C++) +* python + +Doxygen (C++) is not tested. +(cf. `.appveyor.yml`) + +C++ third parties installation are done thanks to [vcpkg](https://github.com/microsoft/vcpkg/). +In case of installation issue, check in [vcpkg issues](https://github.com/microsoft/vcpkg/issues). + +### OSx + +The compilations has been seperated by categories to be parallelized: + +* examples (C++) +* tests (C++) +* utils (C++) +* python +* Doxygen (C++) + +(cf. `azure-pipelines.yml`) + +C++ third parties installation are done thanks to [brew](https://formulae.brew.sh/formula/). +In case of installation issue, check in formula issues. + +## Pip packaging + +Pip packaging is done in 2 parts: + +* on push and pull requests, the wheels are built (pip package dry-run) +* on releases, the wheels are built and sent to pypi.org (package) + +Only the Linux pip package is based on a docker image (`gudhi/pip_for_gudhi` based on `Dockerfile_for_pip` file) to make it faster. + +### Update docker image + +C++ third parties installation are done thanks to yum on an image based on `quay.io/pypa/manylinux2014_x86_64`. + +Docker image need to be rebuild and push each time `.github/build-requirements`, when a new third party is added, when a new CGAL version improves gudhi performances, ... +As `.github/test-requirements` is not installed, no need to rebuild image when this file is modified. + +```bash +docker build -f Dockerfile_for_pip -t gudhi/pip_for_gudhi:latest . +docker login # requires some specific rights on https://hub.docker.com/u/gudhi/repository/docker/gudhi +docker push gudhi/pip_for_gudhi:latest +``` diff --git a/.github/test-requirements.txt b/.github/test-requirements.txt index 688a2a11..d0803574 100644 --- a/.github/test-requirements.txt +++ b/.github/test-requirements.txt @@ -1,7 +1,7 @@ pytest pytest-cov sphinx -sphinxcontrib-bibtex +sphinxcontrib-bibtex==1.0.0 sphinx-paramlinks matplotlib scipy diff --git a/Dockerfile_for_circleci_image b/Dockerfile_for_circleci_image index f20602b0..60c98f66 100644 --- a/Dockerfile_for_circleci_image +++ b/Dockerfile_for_circleci_image @@ -66,4 +66,4 @@ RUN pip3 install -r build-requirements.txt RUN pip3 --no-cache-dir install -r test-requirements.txt # apt clean up -RUN apt autoremove && rm -rf /var/lib/apt/lists/* +RUN apt-get autoremove && rm -rf /var/lib/apt/lists/* diff --git a/Dockerfile_for_circleci_image_without_cgal b/Dockerfile_for_circleci_image_without_cgal new file mode 100644 index 00000000..7bf96667 --- /dev/null +++ b/Dockerfile_for_circleci_image_without_cgal @@ -0,0 +1,55 @@ +FROM ubuntu:20.04 + +# Update and upgrade distribution +RUN apt update && \ + apt upgrade -y + +# Tools necessary for installing and configuring Ubuntu +RUN apt install -y \ + apt-utils \ + locales \ + tzdata + +# Timezone +RUN echo "Europe/Paris" | tee /etc/timezone && \ + ln -fs /usr/share/zoneinfo/Europe/Paris /etc/localtime && \ + dpkg-reconfigure -f noninteractive tzdata + +# Locale with UTF-8 support +RUN echo en_US.UTF-8 UTF-8 >> /etc/locale.gen && \ + locale-gen && \ + update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 + +# Update again +RUN apt update + +# Required for Gudhi compilation +RUN apt install -y make \ + git \ + g++ \ + cmake \ + perl \ + libboost-all-dev \ + locales \ + python3 \ + python3-pip \ + python3-tk \ + python3-grpcio \ + libfreetype6-dev \ + pkg-config \ + curl + +RUN curl -LO "https://gitlab.com/libeigen/eigen/-/archive/3.3.9/eigen-3.3.9.tar.gz" \ + && tar xf eigen-3.3.9.tar.gz + +ADD .github/build-requirements.txt / +ADD .github/test-requirements.txt / + +RUN pip3 install -r build-requirements.txt +RUN pip3 --no-cache-dir install -r test-requirements.txt + +# apt clean up +RUN apt-get autoremove && rm -rf /var/lib/apt/lists/* diff --git a/Dockerfile_gudhi_installation b/Dockerfile_gudhi_installation index ebd21f8d..b0e46d72 100644 --- a/Dockerfile_gudhi_installation +++ b/Dockerfile_gudhi_installation @@ -68,7 +68,7 @@ RUN pip3 install \ scikit-learn # apt clean up -RUN apt autoremove && rm -rf /var/lib/apt/lists/* +RUN apt-get autoremove && rm -rf /var/lib/apt/lists/* RUN curl -LO "https://github.com/GUDHI/gudhi-devel/releases/download/tags%2Fgudhi-release-3.3.0/gudhi.3.3.0.tar.gz" \ && tar xf gudhi.3.3.0.tar.gz \ diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 64f3d141..8e88cab5 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -33,5 +33,5 @@ jobs: cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 .. make -j 4 make doxygen - ctest -j 4 --output-on-failure -E sphinx # remove sphinx build as it fails + ctest -j 4 --output-on-failure # -E sphinx remove sphinx build as it fails displayName: 'Build, test and documentation generation' -- cgit v1.2.3 From a506e8cee390b46076c21955f5b725193c628bc0 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 11 Jan 2021 09:22:52 +0100 Subject: Split weighted alpha complex unit tests as it uses a lot of memory and make the CI crash --- src/Alpha_complex/test/CMakeLists.txt | 14 +++ ..._alpha_complex_non_visible_points_unit_test.cpp | 60 ++++++++++++ .../test/Weighted_alpha_complex_unit_test.cpp | 102 --------------------- .../test/Zero_weighted_alpha_complex_unit_test.cpp | 77 ++++++++++++++++ 4 files changed, 151 insertions(+), 102 deletions(-) create mode 100644 src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp create mode 100644 src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp diff --git a/src/Alpha_complex/test/CMakeLists.txt b/src/Alpha_complex/test/CMakeLists.txt index db5d840f..0595ca92 100644 --- a/src/Alpha_complex/test/CMakeLists.txt +++ b/src/Alpha_complex/test/CMakeLists.txt @@ -59,4 +59,18 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0) endif() gudhi_add_boost_test(Weighted_alpha_complex_test_unit) + add_executable ( Weighted_alpha_complex_non_visible_points_test_unit Weighted_alpha_complex_non_visible_points_unit_test.cpp ) + target_link_libraries(Weighted_alpha_complex_non_visible_points_test_unit ${CGAL_LIBRARY}) + if (TBB_FOUND) + target_link_libraries(Weighted_alpha_complex_non_visible_points_test_unit ${TBB_LIBRARIES}) + endif() + gudhi_add_boost_test(Weighted_alpha_complex_non_visible_points_test_unit) + + add_executable ( Zero_weighted_alpha_complex_test_unit Zero_weighted_alpha_complex_unit_test.cpp ) + target_link_libraries(Zero_weighted_alpha_complex_test_unit ${CGAL_LIBRARY}) + if (TBB_FOUND) + target_link_libraries(Zero_weighted_alpha_complex_test_unit ${TBB_LIBRARIES}) + endif() + gudhi_add_boost_test(Zero_weighted_alpha_complex_test_unit) + endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0) \ No newline at end of file diff --git a/src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp b/src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp new file mode 100644 index 00000000..dd83c1da --- /dev/null +++ b/src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp @@ -0,0 +1,60 @@ +/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + * Author(s): Vincent Rouvreau + * + * Copyright (C) 2020 Inria + * + * Modification(s): + * - YYYY/MM Author: Description of the modification + */ + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE "weighted_alpha_complex_non_visible_points" +#include +#include + +#include +#include + +#include + +#include +#include + + +using list_of_1d_kernel_variants = boost::mpl::list, + CGAL::Epeck_d< CGAL::Dimension_tag<1>>, + CGAL::Epick_d< CGAL::Dynamic_dimension_tag >, + CGAL::Epick_d< CGAL::Dimension_tag<1>> + >; + +BOOST_AUTO_TEST_CASE_TEMPLATE(Weighted_alpha_complex_non_visible_points, Kernel, list_of_1d_kernel_variants) { + // check that for 2 closed weighted 1-d points, one with a high weight to hide the second one with a small weight, + // that the point with a small weight has the same high filtration value than the edge formed by the 2 points + using Point_d = typename Kernel::Point_d; + std::vector points; + std::vector p1 {0.}; + points.emplace_back(p1.begin(), p1.end()); + // closed enough points + std::vector p2 {0.1}; + points.emplace_back(p2.begin(), p2.end()); + std::vector weights {100., 0.01}; + + Gudhi::alpha_complex::Alpha_complex alpha_complex(points, weights); + Gudhi::Simplex_tree<> stree; + BOOST_CHECK(alpha_complex.create_complex(stree)); + + std::clog << "Iterator on weighted alpha complex simplices in the filtration order, with [filtration value]:" + << std::endl; + for (auto f_simplex : stree.filtration_simplex_range()) { + std::clog << " ( "; + for (auto vertex : stree.simplex_vertex_range(f_simplex)) { + std::clog << vertex << " "; + } + std::clog << ") -> " << "[" << stree.filtration(f_simplex) << "] " << std::endl; + } + + BOOST_CHECK(stree.filtration(stree.find({0})) == -100.); + BOOST_CHECK(stree.filtration(stree.find({1})) == stree.filtration(stree.find({0, 1}))); + BOOST_CHECK(stree.filtration(stree.find({1})) > 100000); +} \ No newline at end of file diff --git a/src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp b/src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp index d267276c..875704ee 100644 --- a/src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp +++ b/src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp @@ -13,10 +13,8 @@ #include #include -#include #include -#include // float comparison #include #include #include @@ -25,69 +23,6 @@ #include #include #include -#include - -using list_of_exact_kernel_variants = boost::mpl::list, - CGAL::Epeck_d< CGAL::Dimension_tag<4> > - > ; - -BOOST_AUTO_TEST_CASE_TEMPLATE(Zero_weighted_alpha_complex, Kernel, list_of_exact_kernel_variants) { - // Check that in exact mode for static dimension 4 the code for dD unweighted and for dD weighted with all weights - // 0 give exactly the same simplex tree (simplices and filtration values). - - // Random points construction - using Point_d = typename Kernel::Point_d; - std::vector points; - std::uniform_real_distribution rd_pts(-10., 10.); - std::random_device rand_dev; - std::mt19937 rand_engine(rand_dev()); - for (int idx = 0; idx < 20; idx++) { - std::vector point {rd_pts(rand_engine), rd_pts(rand_engine), rd_pts(rand_engine), rd_pts(rand_engine)}; - points.emplace_back(point.begin(), point.end()); - } - - // Alpha complex from points - Gudhi::alpha_complex::Alpha_complex alpha_complex_from_points(points); - Gudhi::Simplex_tree<> simplex; - Gudhi::Simplex_tree<>::Filtration_value infty = std::numeric_limits::Filtration_value>::infinity(); - BOOST_CHECK(alpha_complex_from_points.create_complex(simplex, infty, true)); - std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" - << std::endl; - for (auto f_simplex : simplex.filtration_simplex_range()) { - std::clog << " ( "; - for (auto vertex : simplex.simplex_vertex_range(f_simplex)) { - std::clog << vertex << " "; - } - std::clog << ") -> " << "[" << simplex.filtration(f_simplex) << "] " << std::endl; - } - - // Alpha complex from zero weighted points - std::vector weights(20, 0.); - Gudhi::alpha_complex::Alpha_complex alpha_complex_from_zero_weighted_points(points, weights); - Gudhi::Simplex_tree<> zw_simplex; - BOOST_CHECK(alpha_complex_from_zero_weighted_points.create_complex(zw_simplex, infty, true)); - - std::clog << "Iterator on zero weighted alpha complex simplices in the filtration order, with [filtration value]:" - << std::endl; - for (auto f_simplex : zw_simplex.filtration_simplex_range()) { - std::clog << " ( "; - for (auto vertex : zw_simplex.simplex_vertex_range(f_simplex)) { - std::clog << vertex << " "; - } - std::clog << ") -> " << "[" << zw_simplex.filtration(f_simplex) << "] " << std::endl; - } - - BOOST_CHECK(zw_simplex == simplex); -} - -template -bool cgal_3d_point_sort (Point_d a,Point_d b) { - if (a[0] != b[0]) - return a[0] < b[0]; - if (a[1] != b[1]) - return a[1] < b[1]; - return a[2] < b[2]; -} BOOST_AUTO_TEST_CASE(Weighted_alpha_complex_3d_comparison) { // check that for random weighted 3d points in safe mode the 3D and dD codes give the same result with some tolerance @@ -189,41 +124,4 @@ BOOST_AUTO_TEST_CASE(Weighted_alpha_complex_3d_comparison) { } ++dD_itr; } -} - -using list_of_1d_kernel_variants = boost::mpl::list, - CGAL::Epeck_d< CGAL::Dimension_tag<1>>, - CGAL::Epick_d< CGAL::Dynamic_dimension_tag >, - CGAL::Epick_d< CGAL::Dimension_tag<1>> - >; - -BOOST_AUTO_TEST_CASE_TEMPLATE(Weighted_alpha_complex_non_visible_points, Kernel, list_of_1d_kernel_variants) { - // check that for 2 closed weighted 1-d points, one with a high weight to hide the second one with a small weight, - // that the point with a small weight has the same high filtration value than the edge formed by the 2 points - using Point_d = typename Kernel::Point_d; - std::vector points; - std::vector p1 {0.}; - points.emplace_back(p1.begin(), p1.end()); - // closed enough points - std::vector p2 {0.1}; - points.emplace_back(p2.begin(), p2.end()); - std::vector weights {100., 0.01}; - - Gudhi::alpha_complex::Alpha_complex alpha_complex(points, weights); - Gudhi::Simplex_tree<> stree; - BOOST_CHECK(alpha_complex.create_complex(stree)); - - std::clog << "Iterator on weighted alpha complex simplices in the filtration order, with [filtration value]:" - << std::endl; - for (auto f_simplex : stree.filtration_simplex_range()) { - std::clog << " ( "; - for (auto vertex : stree.simplex_vertex_range(f_simplex)) { - std::clog << vertex << " "; - } - std::clog << ") -> " << "[" << stree.filtration(f_simplex) << "] " << std::endl; - } - - BOOST_CHECK(stree.filtration(stree.find({0})) == -100.); - BOOST_CHECK(stree.filtration(stree.find({1})) == stree.filtration(stree.find({0, 1}))); - BOOST_CHECK(stree.filtration(stree.find({1})) > 100000); } \ No newline at end of file diff --git a/src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp b/src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp new file mode 100644 index 00000000..b7df07c7 --- /dev/null +++ b/src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp @@ -0,0 +1,77 @@ +/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + * Author(s): Vincent Rouvreau + * + * Copyright (C) 2020 Inria + * + * Modification(s): + * - YYYY/MM Author: Description of the modification + */ + +#define BOOST_TEST_DYN_LINK +#define BOOST_TEST_MODULE "zero_weighted_alpha_complex" +#include +#include + +#include + +#include +#include +#include // for std::fabs + +#include +#include +#include + +using list_of_exact_kernel_variants = boost::mpl::list, + CGAL::Epeck_d< CGAL::Dimension_tag<4> > + > ; + +BOOST_AUTO_TEST_CASE_TEMPLATE(Zero_weighted_alpha_complex, Kernel, list_of_exact_kernel_variants) { + // Check that in exact mode for static dimension 4 the code for dD unweighted and for dD weighted with all weights + // 0 give exactly the same simplex tree (simplices and filtration values). + + // Random points construction + using Point_d = typename Kernel::Point_d; + std::vector points; + std::uniform_real_distribution rd_pts(-10., 10.); + std::random_device rand_dev; + std::mt19937 rand_engine(rand_dev()); + for (int idx = 0; idx < 20; idx++) { + std::vector point {rd_pts(rand_engine), rd_pts(rand_engine), rd_pts(rand_engine), rd_pts(rand_engine)}; + points.emplace_back(point.begin(), point.end()); + } + + // Alpha complex from points + Gudhi::alpha_complex::Alpha_complex alpha_complex_from_points(points); + Gudhi::Simplex_tree<> simplex; + Gudhi::Simplex_tree<>::Filtration_value infty = std::numeric_limits::Filtration_value>::infinity(); + BOOST_CHECK(alpha_complex_from_points.create_complex(simplex, infty, true)); + std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:" + << std::endl; + for (auto f_simplex : simplex.filtration_simplex_range()) { + std::clog << " ( "; + for (auto vertex : simplex.simplex_vertex_range(f_simplex)) { + std::clog << vertex << " "; + } + std::clog << ") -> " << "[" << simplex.filtration(f_simplex) << "] " << std::endl; + } + + // Alpha complex from zero weighted points + std::vector weights(20, 0.); + Gudhi::alpha_complex::Alpha_complex alpha_complex_from_zero_weighted_points(points, weights); + Gudhi::Simplex_tree<> zw_simplex; + BOOST_CHECK(alpha_complex_from_zero_weighted_points.create_complex(zw_simplex, infty, true)); + + std::clog << "Iterator on zero weighted alpha complex simplices in the filtration order, with [filtration value]:" + << std::endl; + for (auto f_simplex : zw_simplex.filtration_simplex_range()) { + std::clog << " ( "; + for (auto vertex : zw_simplex.simplex_vertex_range(f_simplex)) { + std::clog << vertex << " "; + } + std::clog << ") -> " << "[" << zw_simplex.filtration(f_simplex) << "] " << std::endl; + } + + BOOST_CHECK(zw_simplex == simplex); +} \ No newline at end of file -- cgit v1.2.3 From 0afc650917ddf9fc4cf95fd86e0b6408f64a465d Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 11 Jan 2021 11:29:20 +0100 Subject: Remove sphinx doc test for atol as points order can be inverted and add it in a UT but sorted --- src/python/gudhi/representations/vector_methods.py | 14 +++++++------- src/python/test/test_representations.py | 18 ++++++++++++++++++ 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py index cdcb1fde..d4449e7d 100644 --- a/src/python/gudhi/representations/vector_methods.py +++ b/src/python/gudhi/representations/vector_methods.py @@ -606,16 +606,16 @@ class Atol(BaseEstimator, TransformerMixin): >>> c = np.array([[3, 2, -1], [1, 2, -1]]) >>> atol_vectoriser = Atol(quantiser=KMeans(n_clusters=2, random_state=202006)) >>> atol_vectoriser.fit(X=[a, b, c]).centers - array([[ 2. , 0.66666667, 3.33333333], - [ 2.6 , 2.8 , -0.4 ]]) + >>> # array([[ 2. , 0.66666667, 3.33333333], + >>> # [ 2.6 , 2.8 , -0.4 ]]) >>> atol_vectoriser(a) - array([1.18168665, 0.42375966]) + >>> # array([1.18168665, 0.42375966]) >>> atol_vectoriser(c) - array([0.02062512, 1.25157463]) + >>> # array([0.02062512, 1.25157463]) >>> atol_vectoriser.transform(X=[a, b, c]) - array([[1.18168665, 0.42375966], - [0.29861028, 1.06330156], - [0.02062512, 1.25157463]]) + >>> # array([[1.18168665, 0.42375966], + >>> # [0.29861028, 1.06330156], + >>> # [0.02062512, 1.25157463]]) """ def __init__(self, quantiser, weighting_method="cloud", contrast="gaussian"): """ diff --git a/src/python/test/test_representations.py b/src/python/test/test_representations.py index 43c914f3..1c8f8cdb 100755 --- a/src/python/test/test_representations.py +++ b/src/python/test/test_representations.py @@ -46,6 +46,24 @@ def test_multiple(): assert d1 == pytest.approx(d2, rel=0.02) +# Test sorted values as points order can be inverted, and sorted test is not documentation-friendly +def test_atol_doc(): + a = np.array([[1, 2, 4], [1, 4, 0], [1, 0, 4]]) + b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]]) + c = np.array([[3, 2, -1], [1, 2, -1]]) + + atol_vectoriser = Atol(quantiser=KMeans(n_clusters=2, random_state=202006)) + assert np.sort(atol_vectoriser.fit(X=[a, b, c]).centers, axis=0) == \ + pytest.approx(np.array([[2. , 0.66666667, -0.4], \ + [2.6, 2.8 , 3.33333333]])) + assert np.sort(atol_vectoriser(a)) == pytest.approx(np.array([0.42375966, 1.18168665])) + assert np.sort(atol_vectoriser(c)) == pytest.approx(np.array([0.02062512, 1.25157463])) + assert np.sort(atol_vectoriser.transform(X=[a, b, c]), axis=0) == \ + pytest.approx(np.array([[0.02062512, 0.42375966], \ + [0.29861028, 1.06330156], \ + [1.18168665, 1.25157463]])) + + def test_dummy_atol(): a = np.array([[1, 2, 4], [1, 4, 0], [1, 0, 4]]) b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]]) -- cgit v1.2.3 From 2a29df7cd54e9689e93bab90e3f64c84e8e8790f Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 11 Jan 2021 13:59:01 +0100 Subject: skip doctest (but run them) --- src/python/gudhi/representations/vector_methods.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py index d4449e7d..5ec2abd0 100644 --- a/src/python/gudhi/representations/vector_methods.py +++ b/src/python/gudhi/representations/vector_methods.py @@ -605,14 +605,14 @@ class Atol(BaseEstimator, TransformerMixin): >>> b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]]) >>> c = np.array([[3, 2, -1], [1, 2, -1]]) >>> atol_vectoriser = Atol(quantiser=KMeans(n_clusters=2, random_state=202006)) - >>> atol_vectoriser.fit(X=[a, b, c]).centers + >>> atol_vectoriser.fit(X=[a, b, c]).centers #doctest: +SKIP >>> # array([[ 2. , 0.66666667, 3.33333333], >>> # [ 2.6 , 2.8 , -0.4 ]]) >>> atol_vectoriser(a) - >>> # array([1.18168665, 0.42375966]) + >>> # array([1.18168665, 0.42375966]) #doctest: +SKIP >>> atol_vectoriser(c) - >>> # array([0.02062512, 1.25157463]) - >>> atol_vectoriser.transform(X=[a, b, c]) + >>> # array([0.02062512, 1.25157463]) #doctest: +SKIP + >>> atol_vectoriser.transform(X=[a, b, c]) #doctest: +SKIP >>> # array([[1.18168665, 0.42375966], >>> # [0.29861028, 1.06330156], >>> # [0.02062512, 1.25157463]]) -- cgit v1.2.3 From 60907b0104a2807667f175d9a8a328fd3f7f4ec8 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Mon, 11 Jan 2021 16:25:18 +0100 Subject: Ignore doctest for atol doc. Rewrite unitary test for atol doc. To be synchronized --- src/python/gudhi/representations/vector_methods.py | 9 ++++---- src/python/test/test_representations.py | 26 ++++++++++++++-------- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py index 5ec2abd0..84bc99a2 100644 --- a/src/python/gudhi/representations/vector_methods.py +++ b/src/python/gudhi/representations/vector_methods.py @@ -605,18 +605,19 @@ class Atol(BaseEstimator, TransformerMixin): >>> b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]]) >>> c = np.array([[3, 2, -1], [1, 2, -1]]) >>> atol_vectoriser = Atol(quantiser=KMeans(n_clusters=2, random_state=202006)) - >>> atol_vectoriser.fit(X=[a, b, c]).centers #doctest: +SKIP + >>> atol_vectoriser.fit(X=[a, b, c]).centers # doctest: +SKIP >>> # array([[ 2. , 0.66666667, 3.33333333], >>> # [ 2.6 , 2.8 , -0.4 ]]) >>> atol_vectoriser(a) - >>> # array([1.18168665, 0.42375966]) #doctest: +SKIP + >>> # array([1.18168665, 0.42375966]) # doctest: +SKIP >>> atol_vectoriser(c) - >>> # array([0.02062512, 1.25157463]) #doctest: +SKIP - >>> atol_vectoriser.transform(X=[a, b, c]) #doctest: +SKIP + >>> # array([0.02062512, 1.25157463]) # doctest: +SKIP + >>> atol_vectoriser.transform(X=[a, b, c]) # doctest: +SKIP >>> # array([[1.18168665, 0.42375966], >>> # [0.29861028, 1.06330156], >>> # [0.02062512, 1.25157463]]) """ + # Note the example above must be up to date with the one in tests called test_atol_doc def __init__(self, quantiser, weighting_method="cloud", contrast="gaussian"): """ Constructor for the Atol measure vectorisation class. diff --git a/src/python/test/test_representations.py b/src/python/test/test_representations.py index 1c8f8cdb..cda1a15b 100755 --- a/src/python/test/test_representations.py +++ b/src/python/test/test_representations.py @@ -47,21 +47,29 @@ def test_multiple(): # Test sorted values as points order can be inverted, and sorted test is not documentation-friendly +# Note the test below must be up to date with the Atol class documentation def test_atol_doc(): a = np.array([[1, 2, 4], [1, 4, 0], [1, 0, 4]]) b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]]) c = np.array([[3, 2, -1], [1, 2, -1]]) atol_vectoriser = Atol(quantiser=KMeans(n_clusters=2, random_state=202006)) - assert np.sort(atol_vectoriser.fit(X=[a, b, c]).centers, axis=0) == \ - pytest.approx(np.array([[2. , 0.66666667, -0.4], \ - [2.6, 2.8 , 3.33333333]])) - assert np.sort(atol_vectoriser(a)) == pytest.approx(np.array([0.42375966, 1.18168665])) - assert np.sort(atol_vectoriser(c)) == pytest.approx(np.array([0.02062512, 1.25157463])) - assert np.sort(atol_vectoriser.transform(X=[a, b, c]), axis=0) == \ - pytest.approx(np.array([[0.02062512, 0.42375966], \ - [0.29861028, 1.06330156], \ - [1.18168665, 1.25157463]])) + # Atol will do + # X = np.concatenate([a,b,c]) + # kmeans = KMeans(n_clusters=2, random_state=202006).fit(X) + # kmeans.labels_ will be : array([1, 0, 1, 0, 0, 1, 0, 0]) + first_cluster = np.asarray([a[0], a[2], b[2]]) + second_cluster = np.asarray([a[1], b[0], b[2], c[0], c[1]]) + + # Check the center of the first_cluster and second_cluster are in Atol centers + centers = atol_vectoriser.fit(X=[a, b, c]).centers + np.isclose(centers, first_cluster.mean(axis=0)).all(1).any() + np.isclose(centers, second_cluster.mean(axis=0)).all(1).any() + + vectorization = atol_vectoriser.transform(X=[a, b, c]) + assert np.allclose(vectorization[0], atol_vectoriser(a)) + assert np.allclose(vectorization[1], atol_vectoriser(b)) + assert np.allclose(vectorization[2], atol_vectoriser(c)) def test_dummy_atol(): -- cgit v1.2.3 From fe76c248ea315062b6a22db0acb3a7059ec6d363 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Fri, 22 Jan 2021 09:10:01 +0100 Subject: gudhi 3.4.1 --- CMakeGUDHIVersion.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeGUDHIVersion.txt b/CMakeGUDHIVersion.txt index 11c8766b..5b0777a6 100644 --- a/CMakeGUDHIVersion.txt +++ b/CMakeGUDHIVersion.txt @@ -2,7 +2,7 @@ set (GUDHI_MAJOR_VERSION 3) set (GUDHI_MINOR_VERSION 4) # GUDHI_PATCH_VERSION can be 'ZaN' for Alpha release, 'ZbN' for Beta release, 'ZrcN' for release candidate or 'Z' for a final release. -set (GUDHI_PATCH_VERSION 1rc1) +set (GUDHI_PATCH_VERSION 1) set(GUDHI_VERSION ${GUDHI_MAJOR_VERSION}.${GUDHI_MINOR_VERSION}.${GUDHI_PATCH_VERSION}) message(STATUS "GUDHI version : ${GUDHI_VERSION}") -- cgit v1.2.3