summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com>2021-01-20 11:17:03 +0100
committerGitHub <noreply@github.com>2021-01-20 11:17:03 +0100
commit7f2709ea12124f514c29b10019558ba3e3ee0975 (patch)
tree82ab9568e3c5fe275773fad49243f31a16e5b038
parentca18cc5ab72eaf133adcd8b5bcde794ff8166384 (diff)
parent60907b0104a2807667f175d9a8a328fd3f7f4ec8 (diff)
Merge pull request #447 from VincentRouvreau/cgal_5_2_for_ci
CGAL 5.2 for pip and CI
-rw-r--r--.appveyor.yml2
-rw-r--r--.github/for_maintainers/tests_strategy.md90
-rw-r--r--.github/test-requirements.txt2
-rw-r--r--Dockerfile_for_circleci_image10
-rw-r--r--Dockerfile_for_circleci_image_without_cgal55
-rw-r--r--Dockerfile_for_pip8
-rw-r--r--Dockerfile_gudhi_installation2
-rw-r--r--azure-pipelines.yml2
-rw-r--r--src/Alpha_complex/test/CMakeLists.txt14
-rw-r--r--src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp60
-rw-r--r--src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp102
-rw-r--r--src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp77
-rw-r--r--src/python/gudhi/representations/vector_methods.py19
-rwxr-xr-xsrc/python/test/test_representations.py26
14 files changed, 345 insertions, 124 deletions
diff --git a/.appveyor.yml b/.appveyor.yml
index 06de5b14..a257debc 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -60,7 +60,7 @@ build_script:
cd src/python &
type setup.py &
MSBuild Cython.sln /m /p:Configuration=Release /p:Platform=x64 &
- ctest -j 1 --output-on-failure -C Release -E sphinx
+ ctest -j 1 --output-on-failure -C Release
) else (
MSBuild GUDHIdev.sln /m /p:Configuration=Release /p:Platform=x64 &
ctest -j 1 --output-on-failure -C Release -E diff_files
diff --git a/.github/for_maintainers/tests_strategy.md b/.github/for_maintainers/tests_strategy.md
new file mode 100644
index 00000000..9c181740
--- /dev/null
+++ b/.github/for_maintainers/tests_strategy.md
@@ -0,0 +1,90 @@
+# Tests strategy
+
+This document tries to sum up the tests strategy that has been put in place for gudhi continuous integration.
+
+The aim is to help maintainers to anticipate third parties modifications, updates.
+
+## Builds
+
+### Linux
+
+As all the third parties are already installed (thanks to docker), the compilations has been seperated by categories to be parallelized:
+
+* examples (C++)
+* tests (C++)
+* utils (C++)
+* doxygen (C++ documentation that is available in the artefacts)
+* python (including documentation and code coverage that are available in the artefacts)
+
+(cf. `.circleci/config.yml`)
+
+These build categories are done with and without CGAL, and, with and without Eigen to be sure the users won't be annoyed if a third party is missing.
+
+With CGAL and with Eigen builds are performed inside the docker image `gudhi/ci_for_gudhi` based on `Dockerfile_for_circleci_image` file.
+Without CGAL, and, with or without Eigen builds are performed inside the docker image `gudhi/ci_for_gudhi_wo_cgal` based on `Dockerfile_for_circleci_image_without_cgal` file.
+
+#### Update docker images
+
+C++ third parties installation are done thanks to apt on Ubuntu latest LTS.
+
+Docker images need to be rebuild and push each time `.github/build-requirements`, `.github/test-requirements`, when a new third party is added, when a new CGAL version improves gudhi performances, ...
+
+```bash
+docker build -f Dockerfile_for_circleci_image -t gudhi/ci_for_gudhi:latest .
+docker build -f Dockerfile_for_circleci_image_without_cgal -t gudhi/ci_for_gudhi_wo_cgal:latest .
+docker login # requires some specific rights on https://hub.docker.com/u/gudhi/repository/docker/gudhi
+docker push gudhi/ci_for_gudhi:latest
+docker push gudhi/ci_for_gudhi_wo_cgal:latest
+```
+
+### Windows
+
+The compilations has been seperated by categories to be parallelized, but I don't know why builds are not run in parallel:
+
+* examples (C++)
+* tests (C++)
+* utils (C++)
+* python
+
+Doxygen (C++) is not tested.
+(cf. `.appveyor.yml`)
+
+C++ third parties installation are done thanks to [vcpkg](https://github.com/microsoft/vcpkg/).
+In case of installation issue, check in [vcpkg issues](https://github.com/microsoft/vcpkg/issues).
+
+### OSx
+
+The compilations has been seperated by categories to be parallelized:
+
+* examples (C++)
+* tests (C++)
+* utils (C++)
+* python
+* Doxygen (C++)
+
+(cf. `azure-pipelines.yml`)
+
+C++ third parties installation are done thanks to [brew](https://formulae.brew.sh/formula/).
+In case of installation issue, check in formula issues.
+
+## Pip packaging
+
+Pip packaging is done in 2 parts:
+
+* on push and pull requests, the wheels are built (pip package dry-run)
+* on releases, the wheels are built and sent to pypi.org (package)
+
+Only the Linux pip package is based on a docker image (`gudhi/pip_for_gudhi` based on `Dockerfile_for_pip` file) to make it faster.
+
+### Update docker image
+
+C++ third parties installation are done thanks to yum on an image based on `quay.io/pypa/manylinux2014_x86_64`.
+
+Docker image need to be rebuild and push each time `.github/build-requirements`, when a new third party is added, when a new CGAL version improves gudhi performances, ...
+As `.github/test-requirements` is not installed, no need to rebuild image when this file is modified.
+
+```bash
+docker build -f Dockerfile_for_pip -t gudhi/pip_for_gudhi:latest .
+docker login # requires some specific rights on https://hub.docker.com/u/gudhi/repository/docker/gudhi
+docker push gudhi/pip_for_gudhi:latest
+```
diff --git a/.github/test-requirements.txt b/.github/test-requirements.txt
index 688a2a11..d0803574 100644
--- a/.github/test-requirements.txt
+++ b/.github/test-requirements.txt
@@ -1,7 +1,7 @@
pytest
pytest-cov
sphinx
-sphinxcontrib-bibtex
+sphinxcontrib-bibtex==1.0.0
sphinx-paramlinks
matplotlib
scipy
diff --git a/Dockerfile_for_circleci_image b/Dockerfile_for_circleci_image
index ec1b8ff8..60c98f66 100644
--- a/Dockerfile_for_circleci_image
+++ b/Dockerfile_for_circleci_image
@@ -50,14 +50,14 @@ RUN apt-get install -y make \
pkg-config \
curl
-RUN curl -LO "https://github.com/CGAL/cgal/releases/download/v5.1/CGAL-5.1.tar.xz" \
- && tar xf CGAL-5.1.tar.xz \
+RUN curl -LO "https://github.com/CGAL/cgal/releases/download/v5.2/CGAL-5.2.tar.xz" \
+ && tar xf CGAL-5.2.tar.xz \
&& mkdir build \
&& cd build \
- && cmake -DCMAKE_BUILD_TYPE=Release ../CGAL-5.1/ \
+ && cmake -DCMAKE_BUILD_TYPE=Release ../CGAL-5.2/ \
&& make install \
&& cd .. \
- && rm -rf build CGAL-5.1
+ && rm -rf build CGAL-5.2
ADD .github/build-requirements.txt /
ADD .github/test-requirements.txt /
@@ -66,4 +66,4 @@ RUN pip3 install -r build-requirements.txt
RUN pip3 --no-cache-dir install -r test-requirements.txt
# apt clean up
-RUN apt autoremove && rm -rf /var/lib/apt/lists/*
+RUN apt-get autoremove && rm -rf /var/lib/apt/lists/*
diff --git a/Dockerfile_for_circleci_image_without_cgal b/Dockerfile_for_circleci_image_without_cgal
new file mode 100644
index 00000000..7bf96667
--- /dev/null
+++ b/Dockerfile_for_circleci_image_without_cgal
@@ -0,0 +1,55 @@
+FROM ubuntu:20.04
+
+# Update and upgrade distribution
+RUN apt update && \
+ apt upgrade -y
+
+# Tools necessary for installing and configuring Ubuntu
+RUN apt install -y \
+ apt-utils \
+ locales \
+ tzdata
+
+# Timezone
+RUN echo "Europe/Paris" | tee /etc/timezone && \
+ ln -fs /usr/share/zoneinfo/Europe/Paris /etc/localtime && \
+ dpkg-reconfigure -f noninteractive tzdata
+
+# Locale with UTF-8 support
+RUN echo en_US.UTF-8 UTF-8 >> /etc/locale.gen && \
+ locale-gen && \
+ update-locale LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8
+ENV LANG en_US.UTF-8
+ENV LANGUAGE en_US:en
+ENV LC_ALL en_US.UTF-8
+
+# Update again
+RUN apt update
+
+# Required for Gudhi compilation
+RUN apt install -y make \
+ git \
+ g++ \
+ cmake \
+ perl \
+ libboost-all-dev \
+ locales \
+ python3 \
+ python3-pip \
+ python3-tk \
+ python3-grpcio \
+ libfreetype6-dev \
+ pkg-config \
+ curl
+
+RUN curl -LO "https://gitlab.com/libeigen/eigen/-/archive/3.3.9/eigen-3.3.9.tar.gz" \
+ && tar xf eigen-3.3.9.tar.gz
+
+ADD .github/build-requirements.txt /
+ADD .github/test-requirements.txt /
+
+RUN pip3 install -r build-requirements.txt
+RUN pip3 --no-cache-dir install -r test-requirements.txt
+
+# apt clean up
+RUN apt-get autoremove && rm -rf /var/lib/apt/lists/*
diff --git a/Dockerfile_for_pip b/Dockerfile_for_pip
index d5ae6417..ada39647 100644
--- a/Dockerfile_for_pip
+++ b/Dockerfile_for_pip
@@ -24,14 +24,14 @@ RUN wget https://dl.bintray.com/boostorg/release/1.73.0/source/boost_1_73_0.tar.
&& cd .. \
&& rm -rf boost
-RUN wget https://github.com/CGAL/cgal/releases/download/v5.1/CGAL-5.1.tar.xz \
- && tar xf CGAL-5.1.tar.xz \
+RUN wget https://github.com/CGAL/cgal/releases/download/v5.2/CGAL-5.2.tar.xz \
+ && tar xf CGAL-5.2.tar.xz \
&& mkdir build \
&& cd build \
- && /opt/cmake/bin/cmake -DCMAKE_BUILD_TYPE=Release ../CGAL-5.1/ \
+ && /opt/cmake/bin/cmake -DCMAKE_BUILD_TYPE=Release ../CGAL-5.2/ \
&& make install \
&& cd .. \
- && rm -rf build CGAL-5.1
+ && rm -rf build CGAL-5.2
ADD .github/build-requirements.txt /
diff --git a/Dockerfile_gudhi_installation b/Dockerfile_gudhi_installation
index ebd21f8d..b0e46d72 100644
--- a/Dockerfile_gudhi_installation
+++ b/Dockerfile_gudhi_installation
@@ -68,7 +68,7 @@ RUN pip3 install \
scikit-learn
# apt clean up
-RUN apt autoremove && rm -rf /var/lib/apt/lists/*
+RUN apt-get autoremove && rm -rf /var/lib/apt/lists/*
RUN curl -LO "https://github.com/GUDHI/gudhi-devel/releases/download/tags%2Fgudhi-release-3.3.0/gudhi.3.3.0.tar.gz" \
&& tar xf gudhi.3.3.0.tar.gz \
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 64f3d141..8e88cab5 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -33,5 +33,5 @@ jobs:
cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..
make -j 4
make doxygen
- ctest -j 4 --output-on-failure -E sphinx # remove sphinx build as it fails
+ ctest -j 4 --output-on-failure # -E sphinx remove sphinx build as it fails
displayName: 'Build, test and documentation generation'
diff --git a/src/Alpha_complex/test/CMakeLists.txt b/src/Alpha_complex/test/CMakeLists.txt
index db5d840f..0595ca92 100644
--- a/src/Alpha_complex/test/CMakeLists.txt
+++ b/src/Alpha_complex/test/CMakeLists.txt
@@ -59,4 +59,18 @@ if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0)
endif()
gudhi_add_boost_test(Weighted_alpha_complex_test_unit)
+ add_executable ( Weighted_alpha_complex_non_visible_points_test_unit Weighted_alpha_complex_non_visible_points_unit_test.cpp )
+ target_link_libraries(Weighted_alpha_complex_non_visible_points_test_unit ${CGAL_LIBRARY})
+ if (TBB_FOUND)
+ target_link_libraries(Weighted_alpha_complex_non_visible_points_test_unit ${TBB_LIBRARIES})
+ endif()
+ gudhi_add_boost_test(Weighted_alpha_complex_non_visible_points_test_unit)
+
+ add_executable ( Zero_weighted_alpha_complex_test_unit Zero_weighted_alpha_complex_unit_test.cpp )
+ target_link_libraries(Zero_weighted_alpha_complex_test_unit ${CGAL_LIBRARY})
+ if (TBB_FOUND)
+ target_link_libraries(Zero_weighted_alpha_complex_test_unit ${TBB_LIBRARIES})
+ endif()
+ gudhi_add_boost_test(Zero_weighted_alpha_complex_test_unit)
+
endif (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 5.1.0) \ No newline at end of file
diff --git a/src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp b/src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp
new file mode 100644
index 00000000..dd83c1da
--- /dev/null
+++ b/src/Alpha_complex/test/Weighted_alpha_complex_non_visible_points_unit_test.cpp
@@ -0,0 +1,60 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "weighted_alpha_complex_non_visible_points"
+#include <boost/test/unit_test.hpp>
+#include <boost/mpl/list.hpp>
+
+#include <CGAL/Epick_d.h>
+#include <CGAL/Epeck_d.h>
+
+#include <vector>
+
+#include <gudhi/Alpha_complex.h>
+#include <gudhi/Simplex_tree.h>
+
+
+using list_of_1d_kernel_variants = boost::mpl::list<CGAL::Epeck_d< CGAL::Dynamic_dimension_tag >,
+ CGAL::Epeck_d< CGAL::Dimension_tag<1>>,
+ CGAL::Epick_d< CGAL::Dynamic_dimension_tag >,
+ CGAL::Epick_d< CGAL::Dimension_tag<1>>
+ >;
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(Weighted_alpha_complex_non_visible_points, Kernel, list_of_1d_kernel_variants) {
+ // check that for 2 closed weighted 1-d points, one with a high weight to hide the second one with a small weight,
+ // that the point with a small weight has the same high filtration value than the edge formed by the 2 points
+ using Point_d = typename Kernel::Point_d;
+ std::vector<Point_d> points;
+ std::vector<double> p1 {0.};
+ points.emplace_back(p1.begin(), p1.end());
+ // closed enough points
+ std::vector<double> p2 {0.1};
+ points.emplace_back(p2.begin(), p2.end());
+ std::vector<typename Kernel::FT> weights {100., 0.01};
+
+ Gudhi::alpha_complex::Alpha_complex<Kernel, true> alpha_complex(points, weights);
+ Gudhi::Simplex_tree<> stree;
+ BOOST_CHECK(alpha_complex.create_complex(stree));
+
+ std::clog << "Iterator on weighted alpha complex simplices in the filtration order, with [filtration value]:"
+ << std::endl;
+ for (auto f_simplex : stree.filtration_simplex_range()) {
+ std::clog << " ( ";
+ for (auto vertex : stree.simplex_vertex_range(f_simplex)) {
+ std::clog << vertex << " ";
+ }
+ std::clog << ") -> " << "[" << stree.filtration(f_simplex) << "] " << std::endl;
+ }
+
+ BOOST_CHECK(stree.filtration(stree.find({0})) == -100.);
+ BOOST_CHECK(stree.filtration(stree.find({1})) == stree.filtration(stree.find({0, 1})));
+ BOOST_CHECK(stree.filtration(stree.find({1})) > 100000);
+} \ No newline at end of file
diff --git a/src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp b/src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp
index d267276c..875704ee 100644
--- a/src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp
+++ b/src/Alpha_complex/test/Weighted_alpha_complex_unit_test.cpp
@@ -13,10 +13,8 @@
#include <boost/test/unit_test.hpp>
#include <boost/mpl/list.hpp>
-#include <CGAL/Epick_d.h>
#include <CGAL/Epeck_d.h>
-#include <cmath> // float comparison
#include <vector>
#include <random>
#include <array>
@@ -25,69 +23,6 @@
#include <gudhi/Alpha_complex.h>
#include <gudhi/Alpha_complex_3d.h>
#include <gudhi/Simplex_tree.h>
-#include <gudhi/Unitary_tests_utils.h>
-
-using list_of_exact_kernel_variants = boost::mpl::list<CGAL::Epeck_d< CGAL::Dynamic_dimension_tag >,
- CGAL::Epeck_d< CGAL::Dimension_tag<4> >
- > ;
-
-BOOST_AUTO_TEST_CASE_TEMPLATE(Zero_weighted_alpha_complex, Kernel, list_of_exact_kernel_variants) {
- // Check that in exact mode for static dimension 4 the code for dD unweighted and for dD weighted with all weights
- // 0 give exactly the same simplex tree (simplices and filtration values).
-
- // Random points construction
- using Point_d = typename Kernel::Point_d;
- std::vector<Point_d> points;
- std::uniform_real_distribution<double> rd_pts(-10., 10.);
- std::random_device rand_dev;
- std::mt19937 rand_engine(rand_dev());
- for (int idx = 0; idx < 20; idx++) {
- std::vector<double> point {rd_pts(rand_engine), rd_pts(rand_engine), rd_pts(rand_engine), rd_pts(rand_engine)};
- points.emplace_back(point.begin(), point.end());
- }
-
- // Alpha complex from points
- Gudhi::alpha_complex::Alpha_complex<Kernel, false> alpha_complex_from_points(points);
- Gudhi::Simplex_tree<> simplex;
- Gudhi::Simplex_tree<>::Filtration_value infty = std::numeric_limits<Gudhi::Simplex_tree<>::Filtration_value>::infinity();
- BOOST_CHECK(alpha_complex_from_points.create_complex(simplex, infty, true));
- std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:"
- << std::endl;
- for (auto f_simplex : simplex.filtration_simplex_range()) {
- std::clog << " ( ";
- for (auto vertex : simplex.simplex_vertex_range(f_simplex)) {
- std::clog << vertex << " ";
- }
- std::clog << ") -> " << "[" << simplex.filtration(f_simplex) << "] " << std::endl;
- }
-
- // Alpha complex from zero weighted points
- std::vector<typename Kernel::FT> weights(20, 0.);
- Gudhi::alpha_complex::Alpha_complex<Kernel, true> alpha_complex_from_zero_weighted_points(points, weights);
- Gudhi::Simplex_tree<> zw_simplex;
- BOOST_CHECK(alpha_complex_from_zero_weighted_points.create_complex(zw_simplex, infty, true));
-
- std::clog << "Iterator on zero weighted alpha complex simplices in the filtration order, with [filtration value]:"
- << std::endl;
- for (auto f_simplex : zw_simplex.filtration_simplex_range()) {
- std::clog << " ( ";
- for (auto vertex : zw_simplex.simplex_vertex_range(f_simplex)) {
- std::clog << vertex << " ";
- }
- std::clog << ") -> " << "[" << zw_simplex.filtration(f_simplex) << "] " << std::endl;
- }
-
- BOOST_CHECK(zw_simplex == simplex);
-}
-
-template <typename Point_d>
-bool cgal_3d_point_sort (Point_d a,Point_d b) {
- if (a[0] != b[0])
- return a[0] < b[0];
- if (a[1] != b[1])
- return a[1] < b[1];
- return a[2] < b[2];
-}
BOOST_AUTO_TEST_CASE(Weighted_alpha_complex_3d_comparison) {
// check that for random weighted 3d points in safe mode the 3D and dD codes give the same result with some tolerance
@@ -189,41 +124,4 @@ BOOST_AUTO_TEST_CASE(Weighted_alpha_complex_3d_comparison) {
}
++dD_itr;
}
-}
-
-using list_of_1d_kernel_variants = boost::mpl::list<CGAL::Epeck_d< CGAL::Dynamic_dimension_tag >,
- CGAL::Epeck_d< CGAL::Dimension_tag<1>>,
- CGAL::Epick_d< CGAL::Dynamic_dimension_tag >,
- CGAL::Epick_d< CGAL::Dimension_tag<1>>
- >;
-
-BOOST_AUTO_TEST_CASE_TEMPLATE(Weighted_alpha_complex_non_visible_points, Kernel, list_of_1d_kernel_variants) {
- // check that for 2 closed weighted 1-d points, one with a high weight to hide the second one with a small weight,
- // that the point with a small weight has the same high filtration value than the edge formed by the 2 points
- using Point_d = typename Kernel::Point_d;
- std::vector<Point_d> points;
- std::vector<double> p1 {0.};
- points.emplace_back(p1.begin(), p1.end());
- // closed enough points
- std::vector<double> p2 {0.1};
- points.emplace_back(p2.begin(), p2.end());
- std::vector<typename Kernel::FT> weights {100., 0.01};
-
- Gudhi::alpha_complex::Alpha_complex<Kernel, true> alpha_complex(points, weights);
- Gudhi::Simplex_tree<> stree;
- BOOST_CHECK(alpha_complex.create_complex(stree));
-
- std::clog << "Iterator on weighted alpha complex simplices in the filtration order, with [filtration value]:"
- << std::endl;
- for (auto f_simplex : stree.filtration_simplex_range()) {
- std::clog << " ( ";
- for (auto vertex : stree.simplex_vertex_range(f_simplex)) {
- std::clog << vertex << " ";
- }
- std::clog << ") -> " << "[" << stree.filtration(f_simplex) << "] " << std::endl;
- }
-
- BOOST_CHECK(stree.filtration(stree.find({0})) == -100.);
- BOOST_CHECK(stree.filtration(stree.find({1})) == stree.filtration(stree.find({0, 1})));
- BOOST_CHECK(stree.filtration(stree.find({1})) > 100000);
} \ No newline at end of file
diff --git a/src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp b/src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp
new file mode 100644
index 00000000..b7df07c7
--- /dev/null
+++ b/src/Alpha_complex/test/Zero_weighted_alpha_complex_unit_test.cpp
@@ -0,0 +1,77 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Vincent Rouvreau
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#define BOOST_TEST_DYN_LINK
+#define BOOST_TEST_MODULE "zero_weighted_alpha_complex"
+#include <boost/test/unit_test.hpp>
+#include <boost/mpl/list.hpp>
+
+#include <CGAL/Epeck_d.h>
+
+#include <vector>
+#include <random>
+#include <cmath> // for std::fabs
+
+#include <gudhi/Alpha_complex.h>
+#include <gudhi/Simplex_tree.h>
+#include <gudhi/Unitary_tests_utils.h>
+
+using list_of_exact_kernel_variants = boost::mpl::list<CGAL::Epeck_d< CGAL::Dynamic_dimension_tag >,
+ CGAL::Epeck_d< CGAL::Dimension_tag<4> >
+ > ;
+
+BOOST_AUTO_TEST_CASE_TEMPLATE(Zero_weighted_alpha_complex, Kernel, list_of_exact_kernel_variants) {
+ // Check that in exact mode for static dimension 4 the code for dD unweighted and for dD weighted with all weights
+ // 0 give exactly the same simplex tree (simplices and filtration values).
+
+ // Random points construction
+ using Point_d = typename Kernel::Point_d;
+ std::vector<Point_d> points;
+ std::uniform_real_distribution<double> rd_pts(-10., 10.);
+ std::random_device rand_dev;
+ std::mt19937 rand_engine(rand_dev());
+ for (int idx = 0; idx < 20; idx++) {
+ std::vector<double> point {rd_pts(rand_engine), rd_pts(rand_engine), rd_pts(rand_engine), rd_pts(rand_engine)};
+ points.emplace_back(point.begin(), point.end());
+ }
+
+ // Alpha complex from points
+ Gudhi::alpha_complex::Alpha_complex<Kernel, false> alpha_complex_from_points(points);
+ Gudhi::Simplex_tree<> simplex;
+ Gudhi::Simplex_tree<>::Filtration_value infty = std::numeric_limits<Gudhi::Simplex_tree<>::Filtration_value>::infinity();
+ BOOST_CHECK(alpha_complex_from_points.create_complex(simplex, infty, true));
+ std::clog << "Iterator on alpha complex simplices in the filtration order, with [filtration value]:"
+ << std::endl;
+ for (auto f_simplex : simplex.filtration_simplex_range()) {
+ std::clog << " ( ";
+ for (auto vertex : simplex.simplex_vertex_range(f_simplex)) {
+ std::clog << vertex << " ";
+ }
+ std::clog << ") -> " << "[" << simplex.filtration(f_simplex) << "] " << std::endl;
+ }
+
+ // Alpha complex from zero weighted points
+ std::vector<typename Kernel::FT> weights(20, 0.);
+ Gudhi::alpha_complex::Alpha_complex<Kernel, true> alpha_complex_from_zero_weighted_points(points, weights);
+ Gudhi::Simplex_tree<> zw_simplex;
+ BOOST_CHECK(alpha_complex_from_zero_weighted_points.create_complex(zw_simplex, infty, true));
+
+ std::clog << "Iterator on zero weighted alpha complex simplices in the filtration order, with [filtration value]:"
+ << std::endl;
+ for (auto f_simplex : zw_simplex.filtration_simplex_range()) {
+ std::clog << " ( ";
+ for (auto vertex : zw_simplex.simplex_vertex_range(f_simplex)) {
+ std::clog << vertex << " ";
+ }
+ std::clog << ") -> " << "[" << zw_simplex.filtration(f_simplex) << "] " << std::endl;
+ }
+
+ BOOST_CHECK(zw_simplex == simplex);
+} \ No newline at end of file
diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py
index cdcb1fde..84bc99a2 100644
--- a/src/python/gudhi/representations/vector_methods.py
+++ b/src/python/gudhi/representations/vector_methods.py
@@ -605,18 +605,19 @@ class Atol(BaseEstimator, TransformerMixin):
>>> b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]])
>>> c = np.array([[3, 2, -1], [1, 2, -1]])
>>> atol_vectoriser = Atol(quantiser=KMeans(n_clusters=2, random_state=202006))
- >>> atol_vectoriser.fit(X=[a, b, c]).centers
- array([[ 2. , 0.66666667, 3.33333333],
- [ 2.6 , 2.8 , -0.4 ]])
+ >>> atol_vectoriser.fit(X=[a, b, c]).centers # doctest: +SKIP
+ >>> # array([[ 2. , 0.66666667, 3.33333333],
+ >>> # [ 2.6 , 2.8 , -0.4 ]])
>>> atol_vectoriser(a)
- array([1.18168665, 0.42375966])
+ >>> # array([1.18168665, 0.42375966]) # doctest: +SKIP
>>> atol_vectoriser(c)
- array([0.02062512, 1.25157463])
- >>> atol_vectoriser.transform(X=[a, b, c])
- array([[1.18168665, 0.42375966],
- [0.29861028, 1.06330156],
- [0.02062512, 1.25157463]])
+ >>> # array([0.02062512, 1.25157463]) # doctest: +SKIP
+ >>> atol_vectoriser.transform(X=[a, b, c]) # doctest: +SKIP
+ >>> # array([[1.18168665, 0.42375966],
+ >>> # [0.29861028, 1.06330156],
+ >>> # [0.02062512, 1.25157463]])
"""
+ # Note the example above must be up to date with the one in tests called test_atol_doc
def __init__(self, quantiser, weighting_method="cloud", contrast="gaussian"):
"""
Constructor for the Atol measure vectorisation class.
diff --git a/src/python/test/test_representations.py b/src/python/test/test_representations.py
index 43c914f3..cda1a15b 100755
--- a/src/python/test/test_representations.py
+++ b/src/python/test/test_representations.py
@@ -46,6 +46,32 @@ def test_multiple():
assert d1 == pytest.approx(d2, rel=0.02)
+# Test sorted values as points order can be inverted, and sorted test is not documentation-friendly
+# Note the test below must be up to date with the Atol class documentation
+def test_atol_doc():
+ a = np.array([[1, 2, 4], [1, 4, 0], [1, 0, 4]])
+ b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]])
+ c = np.array([[3, 2, -1], [1, 2, -1]])
+
+ atol_vectoriser = Atol(quantiser=KMeans(n_clusters=2, random_state=202006))
+ # Atol will do
+ # X = np.concatenate([a,b,c])
+ # kmeans = KMeans(n_clusters=2, random_state=202006).fit(X)
+ # kmeans.labels_ will be : array([1, 0, 1, 0, 0, 1, 0, 0])
+ first_cluster = np.asarray([a[0], a[2], b[2]])
+ second_cluster = np.asarray([a[1], b[0], b[2], c[0], c[1]])
+
+ # Check the center of the first_cluster and second_cluster are in Atol centers
+ centers = atol_vectoriser.fit(X=[a, b, c]).centers
+ np.isclose(centers, first_cluster.mean(axis=0)).all(1).any()
+ np.isclose(centers, second_cluster.mean(axis=0)).all(1).any()
+
+ vectorization = atol_vectoriser.transform(X=[a, b, c])
+ assert np.allclose(vectorization[0], atol_vectoriser(a))
+ assert np.allclose(vectorization[1], atol_vectoriser(b))
+ assert np.allclose(vectorization[2], atol_vectoriser(c))
+
+
def test_dummy_atol():
a = np.array([[1, 2, 4], [1, 4, 0], [1, 0, 4]])
b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]])