summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorwreise <wojciech.reise@epfl.ch>2022-10-07 18:14:30 +0200
committerwreise <wojciech.reise@epfl.ch>2022-10-07 18:14:30 +0200
commit2ec6c2457d44a06deb45f0243a9c587b284daeba (patch)
treea04b43dcdc4eb90a2727509cad5c659dd46d14bc
parentd9dfffdb580ab865a829fce851779f33fa47e4f7 (diff)
parent524718d63a8f633dbcc4fe7db3fe920ebd7e972c (diff)
Merge branch 'master' into optimize_silhouettes
-rw-r--r--.github/next_release.md21
-rw-r--r--.github/workflows/pip-packaging-windows.yml2
-rw-r--r--CMakeGUDHIVersion.txt2
-rw-r--r--azure-pipelines.yml13
-rw-r--r--src/Cech_complex/benchmark/cech_complex_benchmark.cpp4
-rw-r--r--src/Cech_complex/include/gudhi/Cech_complex.h19
-rw-r--r--src/Cech_complex/include/gudhi/Cech_complex_blocker.h5
-rw-r--r--src/Cech_complex/include/gudhi/Sphere_circumradius.h17
-rw-r--r--src/Contraction/example/Rips_contraction.cpp2
-rw-r--r--src/Contraction/include/gudhi/Edge_contraction.h2
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h5
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h11
-rw-r--r--src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h5
-rw-r--r--src/Doxyfile.in68
-rw-r--r--src/GudhUI/view/Viewer_instructor.h2
-rw-r--r--src/Persistence_representations/test/persistence_heat_maps_test.cpp2
-rw-r--r--src/Persistence_representations/test/persistence_lanscapes_test.cpp2
-rw-r--r--src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h2
-rw-r--r--src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp10
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h6
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker/internal/Trie.h2
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h2
-rw-r--r--src/Skeleton_blocker/include/gudhi/Skeleton_blocker_link_complex.h2
-rwxr-xr-xsrc/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h2
-rw-r--r--src/cmake/modules/GUDHI_options.cmake4
-rw-r--r--src/common/doc/installation.h2
-rw-r--r--src/python/CMakeLists.txt11
-rw-r--r--src/python/doc/cubical_complex_sklearn_itf_ref.rst102
-rw-r--r--src/python/doc/cubical_complex_sum.inc33
-rw-r--r--src/python/doc/cubical_complex_user.rst11
-rw-r--r--src/python/doc/differentiation_sum.inc4
-rw-r--r--src/python/doc/img/sklearn.pngbin0 -> 9368 bytes
-rw-r--r--src/python/doc/img/tensorflow.pngbin0 -> 3846 bytes
-rw-r--r--src/python/doc/persistent_cohomology_user.rst29
-rw-r--r--src/python/doc/rips_complex_sum.inc6
-rw-r--r--src/python/doc/rips_complex_user.rst8
-rw-r--r--src/python/doc/simplex_tree_sum.inc6
-rw-r--r--src/python/gudhi/representations/preprocessing.py57
-rw-r--r--src/python/gudhi/sklearn/__init__.py0
-rw-r--r--src/python/gudhi/sklearn/cubical_persistence.py110
-rw-r--r--src/python/gudhi/tensorflow/cubical_layer.py2
-rw-r--r--src/python/test/test_representations_preprocessing.py39
-rw-r--r--src/python/test/test_sklearn_cubical_persistence.py59
43 files changed, 537 insertions, 154 deletions
diff --git a/.github/next_release.md b/.github/next_release.md
index f8085513..64bda353 100644
--- a/.github/next_release.md
+++ b/.github/next_release.md
@@ -1,30 +1,19 @@
-We are pleased to announce the release 3.X.X of the GUDHI library.
+We are pleased to announce the release 3.7.0 of the GUDHI library.
As a major new feature, the GUDHI library now offers ...
We are now using GitHub to develop the GUDHI library, do not hesitate to [fork the GUDHI project on GitHub](https://github.com/GUDHI/gudhi-devel). From a user point of view, we recommend to download GUDHI user version (gudhi.3.X.X.tar.gz).
-Below is a list of changes made since GUDHI 3.5.0:
+Below is a list of changes made since GUDHI 3.6.0:
-- [Alpha complex](https://gudhi.inria.fr/python/latest/alpha_complex_user.html)
- - the python weighted version for alpha complex is now available in any dimension D.
- - `alpha_complex = gudhi.AlphaComplex(off_file='/data/points/tore3D_300.off')` is deprecated, please use [read_points_from_off_file](https://gudhi.inria.fr/python/latest/point_cloud.html#gudhi.read_points_from_off_file) instead.
-
-- [Representations](https://gudhi.inria.fr/python/latest/representations.html#gudhi.representations.vector_methods.BettiCurve)
- - A more flexible Betti curve class capable of computing exact curves
-
-- [Simplex tree](https://gudhi.inria.fr/python/latest/simplex_tree_ref.html)
- - `__deepcopy__`, `copy` and copy constructors
-
-- Installation
- - Boost &ge; 1.66.0 is now required (was &ge; 1.56.0).
- - Python >= 3.5 and cython >= 0.27 are now required.
+- [Module](link)
+ - ...
- [Module](link)
- ...
- Miscellaneous
- - The [list of bugs that were solved since GUDHI-3.5.0](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.6.0+is%3Aclosed) is available on GitHub.
+ - The [list of bugs that were solved since GUDHI-3.6.0](https://github.com/GUDHI/gudhi-devel/issues?q=label%3A3.7.0+is%3Aclosed) is available on GitHub.
All modules are distributed under the terms of the MIT license.
However, there are still GPL dependencies for many modules. We invite you to check our [license dedicated web page](https://gudhi.inria.fr/licensing/) for further details.
diff --git a/.github/workflows/pip-packaging-windows.yml b/.github/workflows/pip-packaging-windows.yml
index 142a114c..48a98036 100644
--- a/.github/workflows/pip-packaging-windows.yml
+++ b/.github/workflows/pip-packaging-windows.yml
@@ -10,7 +10,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: ['3.6', '3.7', '3.8', '3.9', '3.10']
+ python-version: ['3.7', '3.8', '3.9', '3.10']
name: Build wheels for Python ${{ matrix.python-version }}
steps:
- uses: actions/checkout@v1
diff --git a/CMakeGUDHIVersion.txt b/CMakeGUDHIVersion.txt
index b7f93799..1dab47ab 100644
--- a/CMakeGUDHIVersion.txt
+++ b/CMakeGUDHIVersion.txt
@@ -1,6 +1,6 @@
# Must be conform to pep440 - https://www.python.org/dev/peps/pep-0440/#pre-releases
set (GUDHI_MAJOR_VERSION 3)
-set (GUDHI_MINOR_VERSION 6)
+set (GUDHI_MINOR_VERSION 7)
# GUDHI_PATCH_VERSION can be 'ZaN' for Alpha release, 'ZbN' for Beta release, 'ZrcN' for release candidate or 'Z' for a final release.
set (GUDHI_PATCH_VERSION 0a0)
set(GUDHI_VERSION ${GUDHI_MAJOR_VERSION}.${GUDHI_MINOR_VERSION}.${GUDHI_PATCH_VERSION})
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index e005b3be..f54e593f 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -25,14 +25,14 @@ jobs:
python -m pip install --user -r ext/gudhi-deploy/test-requirements.txt
python -m pip uninstall -y pykeops
brew update || true
- brew install graphviz doxygen boost eigen gmp mpfr tbb cgal || true
+ brew install ninja graphviz doxygen boost eigen gmp mpfr tbb cgal || true
displayName: 'Install build dependencies'
- bash: |
mkdir build
cd build
- cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DWITH_GUDHI_REMOTE_TEST=ON ..
- make
- make doxygen
+ cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -GNinja -DWITH_GUDHI_EXAMPLE=ON -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DWITH_GUDHI_REMOTE_TEST=ON ..
+ ninja
+ ninja doxygen
ctest --output-on-failure
displayName: 'Build, test and documentation generation'
@@ -62,15 +62,16 @@ jobs:
# No PyKeOps on windows, let's workaround this one.
for /F "tokens=*" %%A in (ext\gudhi-deploy\test-requirements.txt) do python -m pip install %%A
vcpkg install boost-filesystem:x64-windows boost-test:x64-windows boost-program-options:x64-windows tbb:x64-windows eigen3:x64-windows cgal:x64-windows
+ choco install -y ninja --force --force-dependencies
displayName: 'Install build dependencies'
- script: |
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" amd64
IF %errorlevel% NEQ 0 exit /b %errorlevel%
mkdir build
cd build
- cmake -G "Visual Studio 17 2022" -A x64 -DCMAKE_BUILD_TYPE=Release -DFORCE_EIGEN_DEFAULT_DENSE_INDEX_TYPE_TO_INT=ON $(cmakeVcpkgFlags) $(cmakeFlags) ..
+ cmake -DCMAKE_BUILD_TYPE=Release -G "Ninja" -DFORCE_EIGEN_DEFAULT_DENSE_INDEX_TYPE_TO_INT=ON $(cmakeVcpkgFlags) $(cmakeFlags) ..
IF %errorlevel% NEQ 0 exit /b %errorlevel%
- MSBuild GUDHIdev.sln /m /p:Configuration=Release /p:Platform=x64
+ ninja
IF %errorlevel% NEQ 0 exit /b %errorlevel%
ctest --output-on-failure -C Release -E diff_files
IF %errorlevel% NEQ 0 exit /b %errorlevel%
diff --git a/src/Cech_complex/benchmark/cech_complex_benchmark.cpp b/src/Cech_complex/benchmark/cech_complex_benchmark.cpp
index a9dc5d0d..a0e727be 100644
--- a/src/Cech_complex/benchmark/cech_complex_benchmark.cpp
+++ b/src/Cech_complex/benchmark/cech_complex_benchmark.cpp
@@ -40,9 +40,9 @@ Simplex_tree benchmark_cech(const std::string& off_file_points, const Filtration
Points_off_reader_cgal off_reader_cgal(off_file_points);
Gudhi::Clock cech_clock("Cech computation");
- Cech_complex cech_complex_from_points(off_reader_cgal.get_point_cloud(), radius);
+ Cech_complex cech_complex_from_points(off_reader_cgal.get_point_cloud(), radius, exact);
Simplex_tree cech_stree;
- cech_complex_from_points.create_complex(cech_stree, dim_max, exact);
+ cech_complex_from_points.create_complex(cech_stree, dim_max);
// ------------------------------------------
// Display information about the Cech complex
diff --git a/src/Cech_complex/include/gudhi/Cech_complex.h b/src/Cech_complex/include/gudhi/Cech_complex.h
index 08b7a72f..625f7c9c 100644
--- a/src/Cech_complex/include/gudhi/Cech_complex.h
+++ b/src/Cech_complex/include/gudhi/Cech_complex.h
@@ -62,15 +62,18 @@ class Cech_complex {
*
* @param[in] points Range of points where each point is defined as `kernel::Point_d`.
* @param[in] max_radius Maximal radius value.
+ * @param[in] exact Exact filtration values computation. Not exact if `Kernel` is not <a target="_blank"
+ * href="https://doc.cgal.org/latest/Kernel_d/structCGAL_1_1Epeck__d.html">CGAL::Epeck_d</a>.
+ * Default is false.
*
*/
template<typename InputPointRange >
- Cech_complex(const InputPointRange & points, Filtration_value max_radius) : max_radius_(max_radius) {
+ Cech_complex(const InputPointRange & points, Filtration_value max_radius, const bool exact = false) : max_radius_(max_radius), exact_(exact) {
point_cloud_.assign(std::begin(points), std::end(points));
cech_skeleton_graph_ = Gudhi::compute_proximity_graph<SimplicialComplexForCechComplex>(
- point_cloud_, max_radius_, Sphere_circumradius<Kernel, Filtration_value>());
+ point_cloud_, max_radius_, Sphere_circumradius<Kernel, Filtration_value>(exact));
}
/** \brief Initializes the simplicial complex from the proximity graph and expands it until a given maximal
@@ -78,19 +81,17 @@ class Cech_complex {
*
* @param[in] complex SimplicialComplexForCech to be created.
* @param[in] dim_max graph expansion until this given maximal dimension.
- * @param[in] exact Exact filtration values computation. Not exact if `Kernel` is not <a target="_blank"
- * href="https://doc.cgal.org/latest/Kernel_d/structCGAL_1_1Epeck__d.html">CGAL::Epeck_d</a>.
* @exception std::invalid_argument In debug mode, if `complex.num_vertices()` does not return 0.
*
*/
- void create_complex(SimplicialComplexForCechComplex& complex, int dim_max, const bool exact = false) {
+ void create_complex(SimplicialComplexForCechComplex& complex, int dim_max) {
GUDHI_CHECK(complex.num_vertices() == 0,
std::invalid_argument("Cech_complex::create_complex - simplicial complex is not empty"));
// insert the proximity graph in the simplicial complex
complex.insert_graph(cech_skeleton_graph_);
// expand the graph until dimension dim_max
- complex.expansion_with_blockers(dim_max, cech_blocker(&complex, this, exact));
+ complex.expansion_with_blockers(dim_max, cech_blocker(&complex, this));
}
/** @return max_radius value given at construction. */
@@ -106,11 +107,17 @@ class Cech_complex {
*/
std::vector<Sphere> & get_cache() { return cache_; }
+ /** \brief Check exact option
+ * @return Exact option.
+ */
+ const bool is_exact() { return exact_; }
+
private:
Proximity_graph cech_skeleton_graph_;
Filtration_value max_radius_;
Point_cloud point_cloud_;
std::vector<Sphere> cache_;
+ const bool exact_;
};
} // namespace cech_complex
diff --git a/src/Cech_complex/include/gudhi/Cech_complex_blocker.h b/src/Cech_complex/include/gudhi/Cech_complex_blocker.h
index 3ea82826..e78e37b7 100644
--- a/src/Cech_complex/include/gudhi/Cech_complex_blocker.h
+++ b/src/Cech_complex/include/gudhi/Cech_complex_blocker.h
@@ -116,7 +116,7 @@ class Cech_blocker {
}
Sphere sph = get_sphere(points.cbegin(), points.cend());
#if CGAL_VERSION_NR >= 1050000000
- if(exact_) CGAL::exact(sph.second);
+ if(cc_ptr_->is_exact()) CGAL::exact(sph.second);
#endif
CGAL::NT_converter<FT, Filtration_value> cast_to_fv;
radius = std::sqrt(cast_to_fv(sph.second));
@@ -134,13 +134,12 @@ class Cech_blocker {
}
/** \internal \brief Čech complex blocker constructor. */
- Cech_blocker(SimplicialComplexForCech* sc_ptr, Cech_complex* cc_ptr, const bool exact) : sc_ptr_(sc_ptr), cc_ptr_(cc_ptr), exact_(exact) {}
+ Cech_blocker(SimplicialComplexForCech* sc_ptr, Cech_complex* cc_ptr) : sc_ptr_(sc_ptr), cc_ptr_(cc_ptr) {}
private:
SimplicialComplexForCech* sc_ptr_;
Cech_complex* cc_ptr_;
Kernel kernel_;
- const bool exact_;
};
} // namespace cech_complex
diff --git a/src/Cech_complex/include/gudhi/Sphere_circumradius.h b/src/Cech_complex/include/gudhi/Sphere_circumradius.h
index 790f6950..2f916c0a 100644
--- a/src/Cech_complex/include/gudhi/Sphere_circumradius.h
+++ b/src/Cech_complex/include/gudhi/Sphere_circumradius.h
@@ -12,6 +12,7 @@
#define SPHERE_CIRCUMRADIUS_H_
#include <CGAL/Epick_d.h> // for #include <CGAL/NT_converter.h> which is not working/compiling alone
+#include <CGAL/Lazy_exact_nt.h> // for CGAL::exact
#include <cmath> // for std::sqrt
#include <vector>
@@ -26,6 +27,7 @@ template<typename Kernel, typename Filtration_value>
class Sphere_circumradius {
private:
Kernel kernel_;
+ const bool exact_;
public:
using FT = typename Kernel::FT;
using Point = typename Kernel::Point_d;
@@ -42,7 +44,9 @@ class Sphere_circumradius {
*
*/
Filtration_value operator()(const Point& point_1, const Point& point_2) const {
- return std::sqrt(cast_to_fv(kernel_.squared_distance_d_object()(point_1, point_2))) / 2.;
+ auto squared_dist_obj = kernel_.squared_distance_d_object()(point_1, point_2);
+ if(exact_) CGAL::exact(squared_dist_obj);
+ return std::sqrt(cast_to_fv(squared_dist_obj)) / 2.;
}
/** \brief Circumradius of sphere passing through point cloud using CGAL.
@@ -53,9 +57,18 @@ class Sphere_circumradius {
*
*/
Filtration_value operator()(const Point_cloud& point_cloud) const {
- return std::sqrt(cast_to_fv(kernel_.compute_squared_radius_d_object()(point_cloud.begin(), point_cloud.end())));
+ auto squared_radius_obj = kernel_.compute_squared_radius_d_object()(point_cloud.begin(), point_cloud.end());
+ if(exact_) CGAL::exact(squared_radius_obj);
+ return std::sqrt(cast_to_fv(squared_radius_obj));
}
+ /** \brief Constructor
+ * @param[in] exact Option for exact filtration values computation. Not exact if `Kernel` is not <a target="_blank"
+ * href="https://doc.cgal.org/latest/Kernel_d/structCGAL_1_1Epeck__d.html">CGAL::Epeck_d</a>.
+ * Default is false.
+ */
+ Sphere_circumradius(const bool exact = false) : exact_(exact) {}
+
};
} // namespace cech_complex
diff --git a/src/Contraction/example/Rips_contraction.cpp b/src/Contraction/example/Rips_contraction.cpp
index 42dd0910..547c290e 100644
--- a/src/Contraction/example/Rips_contraction.cpp
+++ b/src/Contraction/example/Rips_contraction.cpp
@@ -39,7 +39,7 @@ void build_rips(ComplexType& complex, double offset) {
int main(int argc, char *argv[]) {
if (argc != 3) {
std::cerr << "Usage " << argv[0] << " ../../../data/meshes/SO3_10000.off 0.3 to load the file " <<
- "../../data/SO3_10000.off and contract the Rips complex built with paremeter 0.3.\n";
+ "../../data/SO3_10000.off and contract the Rips complex built with parameter 0.3.\n";
return -1;
}
diff --git a/src/Contraction/include/gudhi/Edge_contraction.h b/src/Contraction/include/gudhi/Edge_contraction.h
index 0b43c3b3..dff6dc14 100644
--- a/src/Contraction/include/gudhi/Edge_contraction.h
+++ b/src/Contraction/include/gudhi/Edge_contraction.h
@@ -48,7 +48,7 @@ Therefore, the simplification can be done without enumerating the set of simplic
A typical application of this package is homology group computation. It is illustrated in the next figure where a Rips complex is built upon a set of high-dimensional points and
simplified with edge contractions.
-It has initially a big number of simplices (around 20 millions) but simplifying it to a much reduced form with only 15 vertices (and 714 simplices) takes only few seconds on a desktop machine (see the example bellow).
+It has initially a big number of simplices (around 20 millions) but simplifying it to a much reduced form with only 15 vertices (and 714 simplices) takes only few seconds on a desktop machine (see the example below).
One can then compute homology group with a simplicial complex having very few simplices instead of running the homology algorithm on the much bigger initial set of
simplices which would take much more time and memory.
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h
index dc6f5f90..a9e2d507 100644
--- a/src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_affine_plane_in_Rd.h
@@ -57,7 +57,7 @@ struct Function_affine_plane_in_Rd {
* The dimension of the vector should be the ambient dimension of the manifold.
*/
Function_affine_plane_in_Rd(const Eigen::MatrixXd& normal_matrix, const Eigen::VectorXd& offset)
- : normal_matrix_(normal_matrix), d_(normal_matrix.rows()), k_(normal_matrix.cols()), m_(d_ - k_), off_(offset) {
+ : normal_matrix_(normal_matrix), d_(normal_matrix.rows()), k_(normal_matrix.cols()), off_(offset) {
normal_matrix_.colwise().normalize();
}
@@ -73,14 +73,13 @@ struct Function_affine_plane_in_Rd {
: normal_matrix_(normal_matrix),
d_(normal_matrix.rows()),
k_(normal_matrix.cols()),
- m_(d_ - k_),
off_(Eigen::VectorXd::Zero(d_)) {
normal_matrix_.colwise().normalize();
}
private:
Eigen::MatrixXd normal_matrix_;
- std::size_t d_, k_, m_;
+ std::size_t d_, k_;
Eigen::VectorXd off_;
};
diff --git a/src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h b/src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h
index 11b379f3..f315d794 100644
--- a/src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h
+++ b/src/Coxeter_triangulation/include/gudhi/Functions/Function_moment_curve_in_Rd.h
@@ -46,6 +46,11 @@ struct Function_moment_curve_in_Rd {
return result;
}
+ /** @brief Returns the radius of the moment curve. */
+ double get_radius() const{
+ return r_;
+ }
+
/**
* \brief Constructor of the function that defines an implicit moment curve
* in the d-dimensional Euclidean space.
@@ -53,7 +58,7 @@ struct Function_moment_curve_in_Rd {
* @param[in] r Numerical parameter.
* @param[in] d The ambient dimension.
*/
- Function_moment_curve_in_Rd(double r, std::size_t d) : m_(1), k_(d - 1), d_(d), r_(r) {}
+ Function_moment_curve_in_Rd(double r, std::size_t d) : k_(d - 1), d_(d), r_(r) {}
/**
* \brief Constructor of the function that defines an implicit moment curve
@@ -64,10 +69,10 @@ struct Function_moment_curve_in_Rd {
* @param[in] offset The offset of the moment curve.
*/
Function_moment_curve_in_Rd(double r, std::size_t d, Eigen::VectorXd& offset)
- : m_(1), k_(d - 1), d_(d), r_(r), off_(offset) {}
+ : k_(d - 1), d_(d), r_(r), off_(offset) {}
private:
- std::size_t m_, k_, d_;
+ std::size_t k_, d_;
double r_;
Eigen::VectorXd off_;
};
diff --git a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h
index 3ee73754..594b6fbf 100644
--- a/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h
+++ b/src/Coxeter_triangulation/include/gudhi/Permutahedral_representation/Integer_combination_iterator.h
@@ -68,7 +68,7 @@ class Integer_combination_iterator
public:
template <class Bound_range>
Integer_combination_iterator(const uint& n, const uint& k, const Bound_range& bounds)
- : value_(k + 2), is_end_(n == 0 || k == 0), n_(n), k_(k) {
+ : value_(k + 2), is_end_(n == 0 || k == 0), k_(k) {
bounds_.reserve(k + 2);
uint sum_radices = 0;
for (auto b : bounds) {
@@ -96,13 +96,12 @@ class Integer_combination_iterator
}
// Used for the creating an end iterator
- Integer_combination_iterator() : is_end_(true), n_(0), k_(0) {}
+ Integer_combination_iterator() : is_end_(true), k_(0) {}
private:
value_t value_; // the dereference value
bool is_end_; // is true when the current integer combination is the final one
- uint n_;
uint k_;
std::vector<uint> bounds_;
};
diff --git a/src/Doxyfile.in b/src/Doxyfile.in
index 6e0e0333..1ec190d9 100644
--- a/src/Doxyfile.in
+++ b/src/Doxyfile.in
@@ -1143,6 +1143,11 @@ HTML_EXTRA_STYLESHEET = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/stylesheet.css
HTML_EXTRA_FILES =
+# Default here is AUTO_LIGHT which means "Automatically set the mode according
+# to the user preference, use light mode if no preference is set".
+# Force it to LIGHT (white), as the rest of the documentation is white.
+HTML_COLORSTYLE = LIGHT
+
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the style sheet and background images according to
# this color. Hue is specified as an angle on a colorwheel, see
@@ -1452,17 +1457,6 @@ EXT_LINKS_IN_WINDOW = NO
FORMULA_FONTSIZE = 10
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are not
-# supported properly for IE 6.0, but are supported on all modern browsers.
-#
-# Note that when changing this option you need to delete any form_*.png files in
-# the HTML output directory before the changes have effect.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_TRANSPARENT = YES
-
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# http://www.mathjax.org) which uses client side Javascript for the rendering
# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
@@ -2127,26 +2121,38 @@ HAVE_DOT = YES
DOT_NUM_THREADS = 0
-# When you want a differently looking font in the dot files that doxygen
-# generates you can specify the font name using DOT_FONTNAME. You need to make
-# sure dot is able to find the font, which can be done by putting it in a
-# standard location or by setting the DOTFONTPATH environment variable or by
-# setting DOT_FONTPATH to the directory containing the font.
-# The default value is: Helvetica.
+# DOT_COMMON_ATTR is common attributes for nodes, edges and labels of
+# subgraphs. When you want a differently looking font in the dot files that
+# doxygen generates you can specify fontname, fontcolor and fontsize attributes.
+# For details please see <a href=https://graphviz.org/doc/info/attrs.html>Node,
+# Edge and Graph Attributes specification</a> You need to make sure dot is able
+# to find the font, which can be done by putting it in a standard location or by
+# setting the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the
+# directory containing the font. Default graphviz fontsize is 14.
+# The default value is: fontname=Helvetica,fontsize=10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_COMMON_ATTR = "fontname=Helvetica,fontsize=10"
+
+# DOT_EDGE_ATTR is concatenated with DOT_COMMON_ATTR. For elegant style you can
+# add 'arrowhead=open, arrowtail=open, arrowsize=0.5'. <a
+# href=https://graphviz.org/doc/info/arrows.html>Complete documentation about
+# arrows shapes.</a>
+# The default value is: labelfontname=Helvetica,labelfontsize=10.
# This tag requires that the tag HAVE_DOT is set to YES.
-DOT_FONTNAME = Helvetica
+DOT_EDGE_ATTR = "labelfontname=Helvetica,labelfontsize=10"
-# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
-# dot graphs.
-# Minimum value: 4, maximum value: 24, default value: 10.
+# DOT_NODE_ATTR is concatenated with DOT_COMMON_ATTR. For view without boxes
+# around nodes set 'shape=plain' or 'shape=plaintext' <a
+# href=https://www.graphviz.org/doc/info/shapes.html>Shapes specification</a>
+# The default value is: shape=box,height=0.2,width=0.4.
# This tag requires that the tag HAVE_DOT is set to YES.
-DOT_FONTSIZE = 10
+DOT_NODE_ATTR = "shape=box,height=0.2,width=0.4"
-# By default doxygen will tell dot to use the default font as specified with
-# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
-# the path where dot can find it using this tag.
+# You can set the path where dot can find font specified with fontname in
+# DOT_COMMON_ATTR and others dot attributes.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTPATH =
@@ -2358,18 +2364,6 @@ DOT_GRAPH_MAX_NODES = 50
MAX_DOT_GRAPH_DEPTH = 0
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not seem
-# to support this out of the box.
-#
-# Warning: Depending on the platform used, enabling this option may lead to
-# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
-# read).
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_TRANSPARENT = NO
-
# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
# makes dot run faster, but since only newer versions of dot (>1.8.10) support
diff --git a/src/GudhUI/view/Viewer_instructor.h b/src/GudhUI/view/Viewer_instructor.h
index 58cbcd31..09ed102f 100644
--- a/src/GudhUI/view/Viewer_instructor.h
+++ b/src/GudhUI/view/Viewer_instructor.h
@@ -11,7 +11,7 @@
#ifndef VIEW_VIEWER_INSTRUCTOR_H_
#define VIEW_VIEWER_INSTRUCTOR_H_
-// todo do a viewer instructor that have directely a pointer to a QGLviewer and buffer ot not triangles
+// todo do a viewer instructor that has directly a pointer to a QGLviewer and buffer ot not triangles
#include <QFileDialog>
#include <QKeyEvent>
diff --git a/src/Persistence_representations/test/persistence_heat_maps_test.cpp b/src/Persistence_representations/test/persistence_heat_maps_test.cpp
index b3240758..bf531773 100644
--- a/src/Persistence_representations/test/persistence_heat_maps_test.cpp
+++ b/src/Persistence_representations/test/persistence_heat_maps_test.cpp
@@ -78,7 +78,7 @@ BOOST_AUTO_TEST_CASE(check_compute_percentage_of_active_of_heat_maps) {
to_compute_percentage_of_active.push_back(&q);
to_compute_percentage_of_active.push_back(&r);
Persistence_heat_maps<constant_scaling_function> percentage_of_active;
- percentage_of_active.compute_percentage_of_active(to_compute_percentage_of_active, 0.1);
+ percentage_of_active.compute_percentage_of_active(to_compute_percentage_of_active, 0);
Persistence_heat_maps<constant_scaling_function> template_percentage_of_active;
template_percentage_of_active.load_from_file("data/template_percentage_of_active_of_heat_maps");
diff --git a/src/Persistence_representations/test/persistence_lanscapes_test.cpp b/src/Persistence_representations/test/persistence_lanscapes_test.cpp
index 21ef18a0..59924f16 100644
--- a/src/Persistence_representations/test/persistence_lanscapes_test.cpp
+++ b/src/Persistence_representations/test/persistence_lanscapes_test.cpp
@@ -238,7 +238,7 @@ if ( argc != 2 )
double integral = p.compute_integral_of_landscape();
cout << "integral : " << integral <<endl;
- //compute integral for each level separatelly
+ //compute integral for each level separately
for ( size_t level = 0 ; level != p.size() ; ++level )
{
cout << p.compute_integral_of_landscape( level ) << endl;
diff --git a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
index 2301a66b..c00bd33d 100644
--- a/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
+++ b/src/Persistent_cohomology/include/gudhi/Persistent_cohomology.h
@@ -723,7 +723,7 @@ class Persistent_cohomology {
boost::disjoint_sets<int *, Simplex_key *> dsets_;
/* The compressed annotation matrix fields.*/
Cam cam_;
- /* Dictionary establishing the correspondance between the Simplex_key of
+ /* Dictionary establishing the correspondence between the Simplex_key of
* the root vertex in the union-find ds and the Simplex_key of the vertex which
* created the connected component as a 0-dimension homology feature.*/
std::map<Simplex_key, Simplex_key> zero_cocycles_;
diff --git a/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp b/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp
index 229ae46f..f6118fe0 100644
--- a/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp
+++ b/src/Simplex_tree/test/simplex_tree_ctor_and_move_unit_test.cpp
@@ -98,8 +98,16 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(simplex_copy_constructor, Simplex_tree, list_of_te
BOOST_CHECK(st == st4);
BOOST_CHECK(st3 == st);
+#ifdef __clang__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wself-assign-overloaded"
+#endif
st = st;
- print_simplex_filtration(st4, "Third self copy assignment from the default Simplex_tree");
+#ifdef __clang__
+#pragma GCC diagnostic pop
+#endif
+
+ print_simplex_filtration(st, "Third self copy assignment from the default Simplex_tree");
BOOST_CHECK(st3 == st);
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h
index 5abd64d7..4c0c7dad 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/Skeleton_blocker_sub_complex.h
@@ -196,10 +196,8 @@ class Skeleton_blocker_sub_complex : public ComplexType {
};
/**
- * @remark remarque perte de temps a creer un nouveau simplexe a chaque fois
- * alors qu'on pourrait utiliser a la place de 'addresses_sigma_in_link'
- * un simplex avec des valeurs sp�ciales ComplexDS::null_vertex par exemple
- * pour indiquer qu'un vertex n'appartient pas au complex
+ * @remark waste of time to create a new simplex each time when we could use instead of addresses_sigma_in_link a
+ * simplex with special values (ComplexDS::null_vertex e.g.) to indicate that a vertex does not belong to the complex.
*/
template<typename ComplexType>
bool proper_face_in_union(
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/internal/Trie.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/internal/Trie.h
index 18ae6a92..116bc779 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/internal/Trie.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker/internal/Trie.h
@@ -150,7 +150,7 @@ struct Trie {
++s_pos;
while (s_pos != s.end() && current != 0) {
bool found = false;
- for (const auto child : current->childs) {
+ for (const auto& child : current->childs) {
if (child->v == *s_pos) {
++s_pos;
current = child.get();
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h
index 8ceaa480..b4ffc756 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_complex.h
@@ -1291,7 +1291,7 @@ class Skeleton_blocker_complex {
typedef boost::iterator_range<Complex_neighbors_vertices_iterator> Complex_neighbors_vertices_range;
/**
- * @brief Returns a Complex_edge_range over all edges of the simplicial complex that passes trough v
+ * @brief Returns a Complex_edge_range over all edges of the simplicial complex that passes through v
*/
Complex_neighbors_vertices_range vertex_range(Vertex_handle v) const {
auto begin = Complex_neighbors_vertices_iterator(this, v);
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_link_complex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_link_complex.h
index a2637da3..b3bf0382 100644
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_link_complex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_link_complex.h
@@ -164,7 +164,7 @@ ComplexType> {
Vertex_handle y_parent = *parent_complex.get_address(
this->get_id(*y_link));
if (parent_complex.contains_edge(x_parent, y_parent)) {
- // we check that there is no blocker subset of alpha passing trough x and y
+ // we check that there is no blocker subset of alpha passing through x and y
bool new_edge = true;
for (auto blocker_parent : parent_complex.const_blocker_range(
x_parent)) {
diff --git a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h
index 5db9c2fd..e686aaec 100755
--- a/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h
+++ b/src/Skeleton_blocker/include/gudhi/Skeleton_blocker_simplifiable_complex.h
@@ -267,7 +267,7 @@ void Skeleton_blocker_complex<SkeletonBlockerDS>::remove_blocker_include_in_simp
template<typename SkeletonBlockerDS>
void Skeleton_blocker_complex<SkeletonBlockerDS>::tip_blockers(Vertex_handle a, Vertex_handle b,
std::vector<Simplex> & buffer) const {
- for (auto const & blocker : this->const_blocker_range(a)) {
+ for (auto const blocker : this->const_blocker_range(a)) {
Simplex beta = (*blocker);
beta.remove_vertex(a);
buffer.push_back(beta);
diff --git a/src/cmake/modules/GUDHI_options.cmake b/src/cmake/modules/GUDHI_options.cmake
index 5e28c87d..8379e3c6 100644
--- a/src/cmake/modules/GUDHI_options.cmake
+++ b/src/cmake/modules/GUDHI_options.cmake
@@ -4,7 +4,7 @@ option(WITH_GUDHI_REMOTE_TEST "Activate/deactivate datasets fetching test which
option(WITH_GUDHI_PYTHON "Activate/deactivate python module compilation and installation" ON)
option(WITH_GUDHI_TEST "Activate/deactivate examples compilation and installation" ON)
option(WITH_GUDHI_UTILITIES "Activate/deactivate utilities compilation and installation" ON)
-option(WITH_GUDHI_THIRD_PARTY "Activate/deactivate third party libraries cmake detection. When set to OFF, it is usefull for doxygen or user_version i.e." ON)
+option(WITH_GUDHI_THIRD_PARTY "Activate/deactivate third party libraries cmake detection. When set to OFF, it is useful for doxygen or user_version i.e." ON)
if (NOT WITH_GUDHI_THIRD_PARTY)
set (WITH_GUDHI_BENCHMARK OFF)
@@ -12,4 +12,4 @@ if (NOT WITH_GUDHI_THIRD_PARTY)
set (WITH_GUDHI_PYTHON OFF)
set (WITH_GUDHI_TEST OFF)
set (WITH_GUDHI_UTILITIES OFF)
-endif() \ No newline at end of file
+endif()
diff --git a/src/common/doc/installation.h b/src/common/doc/installation.h
index 28526498..63a37a25 100644
--- a/src/common/doc/installation.h
+++ b/src/common/doc/installation.h
@@ -45,7 +45,7 @@ make \endverbatim
*
* \subsection documentationgeneration C++ documentation
* To generate the C++ documentation, the <a target="_blank" href="http://www.doxygen.org/">doxygen</a> program
- * is required (version &ge; 1.9.3 is advised). Run the following command in a terminal:
+ * is required (version &ge; 1.9.5 is advised). Run the following command in a terminal:
* \verbatim make doxygen \endverbatim
* Documentation will be generated in a folder named <code>html</code>.
*
diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt
index 65adef75..5f323935 100644
--- a/src/python/CMakeLists.txt
+++ b/src/python/CMakeLists.txt
@@ -298,6 +298,7 @@ if(PYTHONINTERP_FOUND)
file(COPY "gudhi/dtm_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/hera/__init__.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/hera")
file(COPY "gudhi/datasets" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi" FILES_MATCHING PATTERN "*.py")
+ file(COPY "gudhi/sklearn" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/")
# Some files for pip package
@@ -574,6 +575,11 @@ if(PYTHONINTERP_FOUND)
add_gudhi_py_test(test_betti_curve_representations)
endif()
+ # Representations preprocessing
+ if(SKLEARN_FOUND)
+ add_gudhi_py_test(test_representations_preprocessing)
+ endif()
+
# Time Delay
add_gudhi_py_test(test_time_delay)
@@ -603,6 +609,11 @@ if(PYTHONINTERP_FOUND)
add_gudhi_py_test(test_remote_datasets)
endif()
+ # sklearn
+ if(SKLEARN_FOUND)
+ add_gudhi_py_test(test_sklearn_cubical_persistence)
+ endif()
+
# persistence graphical tools
if(MATPLOTLIB_FOUND)
add_gudhi_py_test(test_persistence_graphical_tools)
diff --git a/src/python/doc/cubical_complex_sklearn_itf_ref.rst b/src/python/doc/cubical_complex_sklearn_itf_ref.rst
new file mode 100644
index 00000000..90ae9ccd
--- /dev/null
+++ b/src/python/doc/cubical_complex_sklearn_itf_ref.rst
@@ -0,0 +1,102 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+Cubical complex persistence scikit-learn like interface
+#######################################################
+
+.. list-table::
+ :width: 100%
+ :header-rows: 0
+
+ * - :Since: GUDHI 3.6.0
+ - :License: MIT
+ - :Requires: `Scikit-learn <installation.html#scikit-learn>`_
+
+Cubical complex persistence scikit-learn like interface example
+---------------------------------------------------------------
+
+In this example, hand written digits are used as an input.
+a TDA scikit-learn pipeline is constructed and is composed of:
+
+#. :class:`~gudhi.sklearn.cubical_persistence.CubicalPersistence` that builds a cubical complex from the inputs and
+ returns its persistence diagrams
+#. :class:`~gudhi.representations.preprocessing.DiagramSelector` that removes non-finite persistence diagrams values
+#. :class:`~gudhi.representations.vector_methods.PersistenceImage` that builds the persistence images from persistence diagrams
+#. `SVC <https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html>`_ which is a scikit-learn support
+ vector classifier.
+
+This ML pipeline is trained to detect if the hand written digit is an '8' or not, thanks to the fact that an '8' has
+two holes in :math:`\mathbf{H}_1`, or, like in this example, three connected components in :math:`\mathbf{H}_0`.
+
+.. code-block:: python
+
+ # Standard scientific Python imports
+ import numpy as np
+
+ # Standard scikit-learn imports
+ from sklearn.datasets import fetch_openml
+ from sklearn.pipeline import Pipeline
+ from sklearn.model_selection import train_test_split
+ from sklearn.svm import SVC
+ from sklearn import metrics
+
+ # Import TDA pipeline requirements
+ from gudhi.sklearn.cubical_persistence import CubicalPersistence
+ from gudhi.representations import PersistenceImage, DiagramSelector
+
+ X, y = fetch_openml("mnist_784", version=1, return_X_y=True, as_frame=False)
+
+ # Target is: "is an eight ?"
+ y = (y == "8") * 1
+ print("There are", np.sum(y), "eights out of", len(y), "numbers.")
+
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
+ pipe = Pipeline(
+ [
+ ("cub_pers", CubicalPersistence(homology_dimensions=0, newshape=[-1, 28, 28], n_jobs=-2)),
+ # Or for multiple persistence dimension computation
+ # ("cub_pers", CubicalPersistence(homology_dimensions=[0, 1], newshape=[-1, 28, 28])),
+ # ("H0_diags", DimensionSelector(index=0), # where index is the index in homology_dimensions array
+ ("finite_diags", DiagramSelector(use=True, point_type="finite")),
+ (
+ "pers_img",
+ PersistenceImage(bandwidth=50, weight=lambda x: x[1] ** 2, im_range=[0, 256, 0, 256], resolution=[20, 20]),
+ ),
+ ("svc", SVC()),
+ ]
+ )
+
+ # Learn from the train subset
+ pipe.fit(X_train, y_train)
+ # Predict from the test subset
+ predicted = pipe.predict(X_test)
+
+ print(f"Classification report for TDA pipeline {pipe}:\n" f"{metrics.classification_report(y_test, predicted)}\n")
+
+.. code-block:: none
+
+ There are 6825 eights out of 70000 numbers.
+ Classification report for TDA pipeline Pipeline(steps=[('cub_pers',
+ CubicalPersistence(newshape=[28, 28], n_jobs=-2)),
+ ('finite_diags', DiagramSelector(use=True)),
+ ('pers_img',
+ PersistenceImage(bandwidth=50, im_range=[0, 256, 0, 256],
+ weight=<function <lambda> at 0x7f3e54137ae8>)),
+ ('svc', SVC())]):
+ precision recall f1-score support
+
+ 0 0.97 0.99 0.98 25284
+ 1 0.92 0.68 0.78 2716
+
+ accuracy 0.96 28000
+ macro avg 0.94 0.84 0.88 28000
+ weighted avg 0.96 0.96 0.96 28000
+
+Cubical complex persistence scikit-learn like interface reference
+-----------------------------------------------------------------
+
+.. autoclass:: gudhi.sklearn.cubical_persistence.CubicalPersistence
+ :members:
+ :special-members: __init__
+ :show-inheritance: \ No newline at end of file
diff --git a/src/python/doc/cubical_complex_sum.inc b/src/python/doc/cubical_complex_sum.inc
index 90ec9fc2..b27843e5 100644
--- a/src/python/doc/cubical_complex_sum.inc
+++ b/src/python/doc/cubical_complex_sum.inc
@@ -1,17 +1,22 @@
.. table::
:widths: 30 40 30
- +--------------------------------------------------------------------------+----------------------------------------------------------------------+---------------------------------------------------------+
- | .. figure:: | The cubical complex represents a grid as a cell complex with | :Author: Pawel Dlotko |
- | ../../doc/Bitmap_cubical_complex/Cubical_complex_representation.png | cells of all dimensions. | |
- | :alt: Cubical complex representation | | :Since: GUDHI 2.0.0 |
- | :figclass: align-center | | |
- | | | :License: MIT |
- | | | |
- +--------------------------------------------------------------------------+----------------------------------------------------------------------+---------------------------------------------------------+
- | * :doc:`cubical_complex_user` | * :doc:`cubical_complex_ref` |
- | | * :doc:`periodic_cubical_complex_ref` |
- +--------------------------------------------------------------------------+----------------------------------------------------------------------+---------------------------------------------------------+
- | | * :doc:`cubical_complex_tflow_itf_ref` | :requires: `TensorFlow <installation.html#tensorflow>`_ |
- | | | |
- +--------------------------------------------------------------------------+----------------------------------------------------------------------+---------------------------------------------------------+
+ +--------------------------------------------------------------------------+--------------------------------------------------------------+-------------------------------------------------------------+
+ | .. figure:: | The cubical complex represents a grid as a cell complex with | :Author: Pawel Dlotko |
+ | ../../doc/Bitmap_cubical_complex/Cubical_complex_representation.png | cells of all dimensions. | :Since: GUDHI 2.0.0 |
+ | :alt: Cubical complex representation | | :License: MIT |
+ | :figclass: align-center | | |
+ +--------------------------------------------------------------------------+--------------------------------------------------------------+-------------------------------------------------------------+
+ | * :doc:`cubical_complex_user` | * :doc:`cubical_complex_ref` |
+ | | * :doc:`periodic_cubical_complex_ref` |
+ +--------------------------------------------------------------------------+--------------------------------------------------------------+-------------------------------------------------------------+
+ | .. image:: | * :doc:`cubical_complex_tflow_itf_ref` | :requires: `TensorFlow <installation.html#tensorflow>`_ |
+ | img/tensorflow.png | | |
+ | :target: https://www.tensorflow.org | | |
+ | :height: 30 | | |
+ +--------------------------------------------------------------------------+--------------------------------------------------------------+-------------------------------------------------------------+
+ | .. image:: | * :doc:`cubical_complex_sklearn_itf_ref` | :Requires: `Scikit-learn <installation.html#scikit-learn>`_ |
+ | img/sklearn.png | | |
+ | :target: https://scikit-learn.org | | |
+ | :height: 30 | | |
+ +--------------------------------------------------------------------------+--------------------------------------------------------------+-------------------------------------------------------------+
diff --git a/src/python/doc/cubical_complex_user.rst b/src/python/doc/cubical_complex_user.rst
index 6a211347..42a23875 100644
--- a/src/python/doc/cubical_complex_user.rst
+++ b/src/python/doc/cubical_complex_user.rst
@@ -7,14 +7,7 @@ Cubical complex user manual
Definition
----------
-===================================== ===================================== =====================================
-:Author: Pawel Dlotko :Since: GUDHI PYTHON 2.0.0 :License: GPL v3
-===================================== ===================================== =====================================
-
-+---------------------------------------------+----------------------------------------------------------------------+
-| :doc:`cubical_complex_user` | * :doc:`cubical_complex_ref` |
-| | * :doc:`periodic_cubical_complex_ref` |
-+---------------------------------------------+----------------------------------------------------------------------+
+.. include:: cubical_complex_sum.inc
The cubical complex is an example of a structured complex useful in computational mathematics (specially rigorous
numerics) and image analysis.
@@ -163,4 +156,4 @@ Tutorial
--------
This `notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-cubical-complexes.ipynb>`_
-explains how to represent sublevels sets of functions using cubical complexes. \ No newline at end of file
+explains how to represent sublevels sets of functions using cubical complexes.
diff --git a/src/python/doc/differentiation_sum.inc b/src/python/doc/differentiation_sum.inc
index 3aec33df..140cf180 100644
--- a/src/python/doc/differentiation_sum.inc
+++ b/src/python/doc/differentiation_sum.inc
@@ -1,8 +1,8 @@
.. list-table::
- :widths: 40 30 30
+ :width: 100%
:header-rows: 0
- * - :Since: GUDHI 3.5.0
+ * - :Since: GUDHI 3.6.0
- :License: MIT
- :Requires: `TensorFlow <installation.html#tensorflow>`_
diff --git a/src/python/doc/img/sklearn.png b/src/python/doc/img/sklearn.png
new file mode 100644
index 00000000..d1fecbbf
--- /dev/null
+++ b/src/python/doc/img/sklearn.png
Binary files differ
diff --git a/src/python/doc/img/tensorflow.png b/src/python/doc/img/tensorflow.png
new file mode 100644
index 00000000..a75f3f5b
--- /dev/null
+++ b/src/python/doc/img/tensorflow.png
Binary files differ
diff --git a/src/python/doc/persistent_cohomology_user.rst b/src/python/doc/persistent_cohomology_user.rst
index a3f294b2..39744b95 100644
--- a/src/python/doc/persistent_cohomology_user.rst
+++ b/src/python/doc/persistent_cohomology_user.rst
@@ -6,19 +6,24 @@ Persistent cohomology user manual
=================================
Definition
----------
-===================================== ===================================== =====================================
-:Author: Clément Maria :Since: GUDHI PYTHON 2.0.0 :License: GPL v3
-===================================== ===================================== =====================================
-
-+-----------------------------------------------------------------+-----------------------------------------------------------------------+
-| :doc:`persistent_cohomology_user` | Please refer to each data structure that contains persistence |
-| | feature for reference: |
-| | |
-| | * :doc:`simplex_tree_ref` |
-| | * :doc:`cubical_complex_ref` |
-| | * :doc:`periodic_cubical_complex_ref` |
-+-----------------------------------------------------------------+-----------------------------------------------------------------------+
+.. list-table::
+ :width: 100%
+ :header-rows: 0
+
+ * - :Author: Clément Maria
+ - :Since: GUDHI 2.0.0
+ - :License: MIT
+
+.. list-table::
+ :width: 100%
+ :header-rows: 0
+
+ * - :doc:`persistent_cohomology_user`
+ - Please refer to each data structure that contains persistence feature for reference:
+ * :doc:`simplex_tree_ref`
+ * :doc:`cubical_complex_ref`
+ * :doc:`periodic_cubical_complex_ref`
Computation of persistent cohomology using the algorithm of :cite:`DBLP:journals/dcg/SilvaMV11` and
:cite:`DBLP:conf/compgeom/DeyFW14` and the Compressed Annotation Matrix implementation of
diff --git a/src/python/doc/rips_complex_sum.inc b/src/python/doc/rips_complex_sum.inc
index 6931ebee..2b125e54 100644
--- a/src/python/doc/rips_complex_sum.inc
+++ b/src/python/doc/rips_complex_sum.inc
@@ -12,6 +12,8 @@
+----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------------------+
| * :doc:`rips_complex_user` | * :doc:`rips_complex_ref` |
+----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------------------+
- | | * :doc:`rips_complex_tflow_itf_ref` | :requires: `TensorFlow <installation.html#tensorflow>`_ |
- | | | |
+ | .. image:: | * :doc:`rips_complex_tflow_itf_ref` | :requires: `TensorFlow <installation.html#tensorflow>`_ |
+ | img/tensorflow.png | | |
+ | :target: https://www.tensorflow.org | | |
+ | :height: 30 | | |
+----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------------------+
diff --git a/src/python/doc/rips_complex_user.rst b/src/python/doc/rips_complex_user.rst
index 27d218d4..c41a7803 100644
--- a/src/python/doc/rips_complex_user.rst
+++ b/src/python/doc/rips_complex_user.rst
@@ -7,13 +7,7 @@ Rips complex user manual
Definition
----------
-================================================================================ ================================ ======================
-:Authors: Clément Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse, Yuichi Ike :Since: GUDHI 2.0.0 :License: GPL v3
-================================================================================ ================================ ======================
-
-+-------------------------------------------+----------------------------------------------------------------------+
-| :doc:`rips_complex_user` | :doc:`rips_complex_ref` |
-+-------------------------------------------+----------------------------------------------------------------------+
+.. include:: rips_complex_sum.inc
The `Rips complex <https://en.wikipedia.org/wiki/Vietoris%E2%80%93Rips_complex>`_ is a simplicial complex that
generalizes proximity (:math:`\varepsilon`-ball) graphs to higher dimensions. The vertices correspond to the input
diff --git a/src/python/doc/simplex_tree_sum.inc b/src/python/doc/simplex_tree_sum.inc
index 3ad1292c..6b534c9e 100644
--- a/src/python/doc/simplex_tree_sum.inc
+++ b/src/python/doc/simplex_tree_sum.inc
@@ -11,6 +11,8 @@
+----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------+
| * :doc:`simplex_tree_user` | * :doc:`simplex_tree_ref` |
+----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------+
- | | * :doc:`ls_simplex_tree_tflow_itf_ref` | :requires: `TensorFlow <installation.html#tensorflow>`_ |
- | | | |
+ | .. image:: | * :doc:`ls_simplex_tree_tflow_itf_ref` | :requires: `TensorFlow <installation.html#tensorflow>`_ |
+ | img/tensorflow.png | | |
+ | :target: https://www.tensorflow.org | | |
+ | :height: 30 | | |
+----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------+
diff --git a/src/python/gudhi/representations/preprocessing.py b/src/python/gudhi/representations/preprocessing.py
index a8545349..8722e162 100644
--- a/src/python/gudhi/representations/preprocessing.py
+++ b/src/python/gudhi/representations/preprocessing.py
@@ -1,10 +1,11 @@
# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
-# Author(s): Mathieu Carrière
+# Author(s): Mathieu Carrière, Vincent Rouvreau
#
# Copyright (C) 2018-2019 Inria
#
# Modification(s):
+# - 2021/10 Vincent Rouvreau: Add DimensionSelector
# - YYYY/MM Author: Description of the modification
import numpy as np
@@ -75,7 +76,7 @@ class Clamping(BaseEstimator, TransformerMixin):
Constructor for the Clamping class.
Parameters:
- limit (double): clamping value (default np.inf).
+ limit (float): clamping value (default np.inf).
"""
self.minimum = minimum
self.maximum = maximum
@@ -234,7 +235,7 @@ class ProminentPoints(BaseEstimator, TransformerMixin):
use (bool): whether to use the class or not (default False).
location (string): either "upper" or "lower" (default "upper"). Whether to keep the points that are far away ("upper") or close ("lower") to the diagonal.
num_pts (int): cardinality threshold (default 10). If location == "upper", keep the top **num_pts** points that are the farthest away from the diagonal. If location == "lower", keep the top **num_pts** points that are the closest to the diagonal.
- threshold (double): distance-to-diagonal threshold (default -1). If location == "upper", keep the points that are at least at a distance **threshold** from the diagonal. If location == "lower", keep the points that are at most at a distance **threshold** from the diagonal.
+ threshold (float): distance-to-diagonal threshold (default -1). If location == "upper", keep the points that are at least at a distance **threshold** from the diagonal. If location == "lower", keep the points that are at most at a distance **threshold** from the diagonal.
"""
self.num_pts = num_pts
self.threshold = threshold
@@ -317,7 +318,7 @@ class DiagramSelector(BaseEstimator, TransformerMixin):
Parameters:
use (bool): whether to use the class or not (default False).
- limit (double): second coordinate value that is the criterion for being an essential point (default numpy.inf).
+ limit (float): second coordinate value that is the criterion for being an essential point (default numpy.inf).
point_type (string): either "finite" or "essential". The type of the points that are going to be extracted.
"""
self.use, self.limit, self.point_type = use, limit, point_type
@@ -363,3 +364,51 @@ class DiagramSelector(BaseEstimator, TransformerMixin):
n x 2 numpy array: extracted persistence diagram.
"""
return self.fit_transform([diag])[0]
+
+
+# Mermaid sequence diagram - https://mermaid-js.github.io/mermaid-live-editor/
+# sequenceDiagram
+# USER->>DimensionSelector: fit_transform(<br/>[[array( Hi(X0) ), array( Hj(X0) ), ...],<br/> [array( Hi(X1) ), array( Hj(X1) ), ...],<br/> ...])
+# DimensionSelector->>thread1: _transform([array( Hi(X0) ), array( Hj(X0) )], ...)
+# DimensionSelector->>thread2: _transform([array( Hi(X1) ), array( Hj(X1) )], ...)
+# Note right of DimensionSelector: ...
+# thread1->>DimensionSelector: array( Hn(X0) )
+# thread2->>DimensionSelector: array( Hn(X1) )
+# Note right of DimensionSelector: ...
+# DimensionSelector->>USER: [array( Hn(X0) ), <br/> array( Hn(X1) ), <br/> ...]
+
+class DimensionSelector(BaseEstimator, TransformerMixin):
+ """
+ This is a class to select persistence diagrams in a specific dimension from its index.
+ """
+
+ def __init__(self, index=0):
+ """
+ Constructor for the DimensionSelector class.
+
+ Parameters:
+ index (int): The returned persistence diagrams dimension index. Default value is `0`.
+ """
+ self.index = index
+
+ def fit(self, X, Y=None):
+ """
+ Nothing to be done, but useful when included in a scikit-learn Pipeline.
+ """
+ return self
+
+ def transform(self, X, Y=None):
+ """
+ Select persistence diagrams from its dimension.
+
+ Parameters:
+ X (list of list of tuple): List of list of persistence pairs, i.e.
+ `[[array( Hi(X0) ), array( Hj(X0) ), ...], [array( Hi(X1) ), array( Hj(X1) ), ...], ...]`
+
+ Returns:
+ list of tuple:
+ Persistence diagrams in a specific dimension. i.e. if `index` was set to `m` and `Hn` is at index `m` of
+ the input, it returns `[array( Hn(X0) ), array( Hn(X1), ...]`
+ """
+
+ return [persistence[self.index] for persistence in X]
diff --git a/src/python/gudhi/sklearn/__init__.py b/src/python/gudhi/sklearn/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/python/gudhi/sklearn/__init__.py
diff --git a/src/python/gudhi/sklearn/cubical_persistence.py b/src/python/gudhi/sklearn/cubical_persistence.py
new file mode 100644
index 00000000..672af278
--- /dev/null
+++ b/src/python/gudhi/sklearn/cubical_persistence.py
@@ -0,0 +1,110 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Vincent Rouvreau
+#
+# Copyright (C) 2021 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+from .. import CubicalComplex
+from sklearn.base import BaseEstimator, TransformerMixin
+
+import numpy as np
+# joblib is required by scikit-learn
+from joblib import Parallel, delayed
+
+# Mermaid sequence diagram - https://mermaid-js.github.io/mermaid-live-editor/
+# sequenceDiagram
+# USER->>CubicalPersistence: fit_transform(X)
+# CubicalPersistence->>thread1: _tranform(X[0])
+# CubicalPersistence->>thread2: _tranform(X[1])
+# Note right of CubicalPersistence: ...
+# thread1->>CubicalPersistence: [array( H0(X[0]) ), array( H1(X[0]) )]
+# thread2->>CubicalPersistence: [array( H0(X[1]) ), array( H1(X[1]) )]
+# Note right of CubicalPersistence: ...
+# CubicalPersistence->>USER: [[array( H0(X[0]) ), array( H1(X[0]) )],<br/> [array( H0(X[1]) ), array( H1(X[1]) )],<br/> ...]
+
+
+class CubicalPersistence(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the persistence diagrams from a cubical complex.
+ """
+
+ def __init__(
+ self,
+ homology_dimensions,
+ newshape=None,
+ homology_coeff_field=11,
+ min_persistence=0.0,
+ n_jobs=None,
+ ):
+ """
+ Constructor for the CubicalPersistence class.
+
+ Parameters:
+ homology_dimensions (int or list of int): The returned persistence diagrams dimension(s).
+ Short circuit the use of :class:`~gudhi.representations.preprocessing.DimensionSelector` when only one
+ dimension matters (in other words, when `homology_dimensions` is an int).
+ newshape (tuple of ints): If cells filtration values require to be reshaped
+ (cf. :func:`~gudhi.sklearn.cubical_persistence.CubicalPersistence.transform`), set `newshape`
+ to perform `numpy.reshape(X, newshape, order='C')` in
+ :func:`~gudhi.sklearn.cubical_persistence.CubicalPersistence.transform` method.
+ homology_coeff_field (int): The homology coefficient field. Must be a prime number. Default value is 11.
+ min_persistence (float): The minimum persistence value to take into account (strictly greater than
+ `min_persistence`). Default value is `0.0`. Set `min_persistence` to `-1.0` to see all values.
+ n_jobs (int): cf. https://joblib.readthedocs.io/en/latest/generated/joblib.Parallel.html
+ """
+ self.homology_dimensions = homology_dimensions
+ self.newshape = newshape
+ self.homology_coeff_field = homology_coeff_field
+ self.min_persistence = min_persistence
+ self.n_jobs = n_jobs
+
+ def fit(self, X, Y=None):
+ """
+ Nothing to be done, but useful when included in a scikit-learn Pipeline.
+ """
+ return self
+
+ def __transform(self, cells):
+ cubical_complex = CubicalComplex(top_dimensional_cells=cells)
+ cubical_complex.compute_persistence(
+ homology_coeff_field=self.homology_coeff_field, min_persistence=self.min_persistence
+ )
+ return [
+ cubical_complex.persistence_intervals_in_dimension(dim) for dim in self.homology_dimensions
+ ]
+
+ def __transform_only_this_dim(self, cells):
+ cubical_complex = CubicalComplex(top_dimensional_cells=cells)
+ cubical_complex.compute_persistence(
+ homology_coeff_field=self.homology_coeff_field, min_persistence=self.min_persistence
+ )
+ return cubical_complex.persistence_intervals_in_dimension(self.homology_dimensions)
+
+ def transform(self, X, Y=None):
+ """Compute all the cubical complexes and their associated persistence diagrams.
+
+ :param X: List of cells filtration values (`numpy.reshape(X, newshape, order='C'` if `newshape` is set with a tuple of ints).
+ :type X: list of list of float OR list of numpy.ndarray
+
+ :return: Persistence diagrams in the format:
+
+ - If `homology_dimensions` was set to `n`: `[array( Hn(X[0]) ), array( Hn(X[1]) ), ...]`
+ - If `homology_dimensions` was set to `[i, j]`: `[[array( Hi(X[0]) ), array( Hj(X[0]) )], [array( Hi(X[1]) ), array( Hj(X[1]) )], ...]`
+ :rtype: list of (,2) array_like or list of list of (,2) array_like
+ """
+ if self.newshape is not None:
+ X = np.reshape(X, self.newshape, order='C')
+
+ # Depends on homology_dimensions is an integer or a list of integer (else case)
+ if isinstance(self.homology_dimensions, int):
+ # threads is preferred as cubical construction and persistence computation releases the GIL
+ return Parallel(n_jobs=self.n_jobs, prefer="threads")(
+ delayed(self.__transform_only_this_dim)(cells) for cells in X
+ )
+ else:
+ # threads is preferred as cubical construction and persistence computation releases the GIL
+ return Parallel(n_jobs=self.n_jobs, prefer="threads")(delayed(self.__transform)(cells) for cells in X)
+
diff --git a/src/python/gudhi/tensorflow/cubical_layer.py b/src/python/gudhi/tensorflow/cubical_layer.py
index 3304e719..5df2c370 100644
--- a/src/python/gudhi/tensorflow/cubical_layer.py
+++ b/src/python/gudhi/tensorflow/cubical_layer.py
@@ -18,7 +18,7 @@ def _Cubical(Xflat, Xdim, dimensions, homology_coeff_field):
cc = CubicalComplex(dimensions=Xdim[::-1], top_dimensional_cells=Xflat)
cc.compute_persistence(homology_coeff_field=homology_coeff_field)
- # Retrieve and ouput image indices/pixels corresponding to positive and negative simplices
+ # Retrieve and output image indices/pixels corresponding to positive and negative simplices
cof_pp = cc.cofaces_of_persistence_pairs()
L_cofs = []
diff --git a/src/python/test/test_representations_preprocessing.py b/src/python/test/test_representations_preprocessing.py
new file mode 100644
index 00000000..838cf30c
--- /dev/null
+++ b/src/python/test/test_representations_preprocessing.py
@@ -0,0 +1,39 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Vincent Rouvreau
+
+ Copyright (C) 2021 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.representations.preprocessing import DimensionSelector
+import numpy as np
+import pytest
+
+H0_0 = np.array([0.0, 0.0])
+H1_0 = np.array([1.0, 0.0])
+H0_1 = np.array([0.0, 1.0])
+H1_1 = np.array([1.0, 1.0])
+H0_2 = np.array([0.0, 2.0])
+H1_2 = np.array([1.0, 2.0])
+
+
+def test_dimension_selector():
+ X = [[H0_0, H1_0], [H0_1, H1_1], [H0_2, H1_2]]
+ ds = DimensionSelector(index=0)
+ h0 = ds.fit_transform(X)
+ np.testing.assert_array_equal(h0[0], H0_0)
+ np.testing.assert_array_equal(h0[1], H0_1)
+ np.testing.assert_array_equal(h0[2], H0_2)
+
+ ds = DimensionSelector(index=1)
+ h1 = ds.fit_transform(X)
+ np.testing.assert_array_equal(h1[0], H1_0)
+ np.testing.assert_array_equal(h1[1], H1_1)
+ np.testing.assert_array_equal(h1[2], H1_2)
+
+ ds = DimensionSelector(index=2)
+ with pytest.raises(IndexError):
+ h2 = ds.fit_transform([[H0_0, H1_0], [H0_1, H1_1], [H0_2, H1_2]])
diff --git a/src/python/test/test_sklearn_cubical_persistence.py b/src/python/test/test_sklearn_cubical_persistence.py
new file mode 100644
index 00000000..1c05a215
--- /dev/null
+++ b/src/python/test/test_sklearn_cubical_persistence.py
@@ -0,0 +1,59 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Vincent Rouvreau
+
+ Copyright (C) 2021 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.sklearn.cubical_persistence import CubicalPersistence
+import numpy as np
+from sklearn import datasets
+
+CUBICAL_PERSISTENCE_H0_IMG0 = np.array([[0.0, 6.0], [0.0, 8.0], [0.0, np.inf]])
+
+
+def test_simple_constructor_from_top_cells():
+ cells = datasets.load_digits().images[0]
+ cp = CubicalPersistence(homology_dimensions=0)
+ np.testing.assert_array_equal(cp._CubicalPersistence__transform_only_this_dim(cells), CUBICAL_PERSISTENCE_H0_IMG0)
+ cp = CubicalPersistence(homology_dimensions=[0, 2])
+ diags = cp._CubicalPersistence__transform(cells)
+ assert len(diags) == 2
+ np.testing.assert_array_equal(diags[0], CUBICAL_PERSISTENCE_H0_IMG0)
+
+
+def test_simple_constructor_from_top_cells_list():
+ digits = datasets.load_digits().images[:10]
+ cp = CubicalPersistence(homology_dimensions=0, n_jobs=-2)
+
+ diags = cp.fit_transform(digits)
+ assert len(diags) == 10
+ np.testing.assert_array_equal(diags[0], CUBICAL_PERSISTENCE_H0_IMG0)
+
+ cp = CubicalPersistence(homology_dimensions=[0, 1], n_jobs=-1)
+ diagsH0H1 = cp.fit_transform(digits)
+ assert len(diagsH0H1) == 10
+ for idx in range(10):
+ np.testing.assert_array_equal(diags[idx], diagsH0H1[idx][0])
+
+def test_simple_constructor_from_flattened_cells():
+ cells = datasets.load_digits().images[0]
+ # Not squared (extended) flatten cells
+ flat_cells = np.hstack((cells, np.zeros((cells.shape[0], 2)))).flatten()
+
+ cp = CubicalPersistence(homology_dimensions=0, newshape=[-1, 8, 10])
+ diags = cp.fit_transform([flat_cells])
+
+ np.testing.assert_array_equal(diags[0], CUBICAL_PERSISTENCE_H0_IMG0)
+
+ # Not squared (extended) non-flatten cells
+ cells = np.hstack((cells, np.zeros((cells.shape[0], 2))))
+
+ # The aim of this second part of the test is to resize even if not mandatory
+ cp = CubicalPersistence(homology_dimensions=0, newshape=[-1, 8, 10])
+ diags = cp.fit_transform([cells])
+
+ np.testing.assert_array_equal(diags[0], CUBICAL_PERSISTENCE_H0_IMG0)