summaryrefslogtreecommitdiff
path: root/src/python/gudhi
diff options
context:
space:
mode:
authorHind-M <hind.montassif@gmail.com>2021-11-22 16:45:50 +0100
committerHind-M <hind.montassif@gmail.com>2021-11-22 16:45:50 +0100
commit8b6d8660cedcfe9b95d6edf18ae85358c787226a (patch)
tree984405cdedcf97cc53b3ce420592b599b1645c26 /src/python/gudhi
parentbb07d2bb439e827f232a7504fc2144a4ba5a2478 (diff)
parent61416d47f1d4fd96694ae58a83d29505b637d421 (diff)
Merge remote-tracking branch 'upstream/master' into gspr/exact-betti-curve
Diffstat (limited to 'src/python/gudhi')
-rw-r--r--src/python/gudhi/clustering/tomato.py4
-rw-r--r--src/python/gudhi/cubical_complex.pyx12
-rw-r--r--src/python/gudhi/datasets/__init__.py0
-rw-r--r--src/python/gudhi/datasets/generators/__init__.py0
-rw-r--r--src/python/gudhi/datasets/generators/_points.cc120
-rw-r--r--src/python/gudhi/datasets/generators/points.py56
-rw-r--r--src/python/gudhi/periodic_cubical_complex.pyx12
-rw-r--r--src/python/gudhi/point_cloud/knn.py12
-rw-r--r--src/python/gudhi/representations/vector_methods.py97
-rw-r--r--src/python/gudhi/simplex_tree.pxd4
-rw-r--r--src/python/gudhi/simplex_tree.pyx33
-rw-r--r--src/python/gudhi/wasserstein/wasserstein.py222
12 files changed, 487 insertions, 85 deletions
diff --git a/src/python/gudhi/clustering/tomato.py b/src/python/gudhi/clustering/tomato.py
index fbba3cc8..d0e9995c 100644
--- a/src/python/gudhi/clustering/tomato.py
+++ b/src/python/gudhi/clustering/tomato.py
@@ -271,7 +271,7 @@ class Tomato:
l = self.max_weight_per_cc_.min()
r = self.max_weight_per_cc_.max()
if self.diagram_.size > 0:
- plt.plot(self.diagram_[:, 0], self.diagram_[:, 1], "ro")
+ plt.plot(self.diagram_[:, 0], self.diagram_[:, 1], "o", color="red")
l = min(l, self.diagram_[:, 1].min())
r = max(r, self.diagram_[:, 0].max())
if l == r:
@@ -283,7 +283,7 @@ class Tomato:
l, r = -1.0, 1.0
plt.plot([l, r], [l, r])
plt.plot(
- self.max_weight_per_cc_, numpy.full(self.max_weight_per_cc_.shape, 1.1 * l - 0.1 * r), "ro", color="green"
+ self.max_weight_per_cc_, numpy.full(self.max_weight_per_cc_.shape, 1.1 * l - 0.1 * r), "o", color="green"
)
plt.show()
diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx
index 28fbe3af..8e244bb8 100644
--- a/src/python/gudhi/cubical_complex.pyx
+++ b/src/python/gudhi/cubical_complex.pyx
@@ -35,7 +35,7 @@ cdef extern from "Cubical_complex_interface.h" namespace "Gudhi":
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Cubical_complex::Cubical_complex_interface<>>":
Cubical_complex_persistence_interface(Bitmap_cubical_complex_base_interface * st, bool persistence_dim_max) nogil
- void compute_persistence(int homology_coeff_field, double min_persistence) nogil
+ void compute_persistence(int homology_coeff_field, double min_persistence) nogil except+
vector[pair[int, pair[double, double]]] get_persistence() nogil
vector[vector[int]] cofaces_of_cubical_persistence_pairs() nogil
vector[int] betti_numbers() nogil
@@ -147,7 +147,7 @@ cdef class CubicalComplex:
:func:`persistence` returns.
:param homology_coeff_field: The homology coefficient field. Must be a
- prime number
+ prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int.
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
@@ -169,7 +169,7 @@ cdef class CubicalComplex:
"""This function computes and returns the persistence of the complex.
:param homology_coeff_field: The homology coefficient field. Must be a
- prime number
+ prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int.
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
@@ -281,4 +281,8 @@ cdef class CubicalComplex:
launched first.
"""
assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()"
- return np.array(self.pcohptr.intervals_in_dimension(dimension))
+ piid = np.array(self.pcohptr.intervals_in_dimension(dimension))
+ # Workaround https://github.com/GUDHI/gudhi-devel/issues/507
+ if len(piid) == 0:
+ return np.empty(shape = [0, 2])
+ return piid
diff --git a/src/python/gudhi/datasets/__init__.py b/src/python/gudhi/datasets/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/python/gudhi/datasets/__init__.py
diff --git a/src/python/gudhi/datasets/generators/__init__.py b/src/python/gudhi/datasets/generators/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/python/gudhi/datasets/generators/__init__.py
diff --git a/src/python/gudhi/datasets/generators/_points.cc b/src/python/gudhi/datasets/generators/_points.cc
new file mode 100644
index 00000000..70ce4925
--- /dev/null
+++ b/src/python/gudhi/datasets/generators/_points.cc
@@ -0,0 +1,120 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Hind Montassif
+ *
+ * Copyright (C) 2021 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <pybind11/pybind11.h>
+#include <pybind11/numpy.h>
+
+#include <gudhi/random_point_generators.h>
+#include <gudhi/Debug_utils.h>
+
+#include <CGAL/Epick_d.h>
+
+namespace py = pybind11;
+
+
+typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Kern;
+
+py::array_t<double> generate_points_on_sphere(size_t n_samples, int ambient_dim, double radius, std::string sample) {
+
+ if (sample != "random") {
+ throw pybind11::value_error("This sample type is not supported");
+ }
+
+ py::array_t<double> points({n_samples, (size_t)ambient_dim});
+
+ py::buffer_info buf = points.request();
+ double *ptr = static_cast<double *>(buf.ptr);
+
+ GUDHI_CHECK(n_samples == buf.shape[0], "Py array first dimension not matching n_samples on sphere");
+ GUDHI_CHECK(ambient_dim == buf.shape[1], "Py array second dimension not matching the ambient space dimension");
+
+
+ std::vector<typename Kern::Point_d> points_generated;
+
+ {
+ py::gil_scoped_release release;
+ points_generated = Gudhi::generate_points_on_sphere_d<Kern>(n_samples, ambient_dim, radius);
+ }
+
+ for (size_t i = 0; i < n_samples; i++)
+ for (int j = 0; j < ambient_dim; j++)
+ ptr[i*ambient_dim+j] = points_generated[i][j];
+
+ return points;
+}
+
+py::array_t<double> generate_points_on_torus(size_t n_samples, int dim, std::string sample) {
+
+ if ( (sample != "random") && (sample != "grid")) {
+ throw pybind11::value_error("This sample type is not supported");
+ }
+
+ std::vector<typename Kern::Point_d> points_generated;
+
+ {
+ py::gil_scoped_release release;
+ points_generated = Gudhi::generate_points_on_torus_d<Kern>(n_samples, dim, sample);
+ }
+
+ size_t npoints = points_generated.size();
+
+ GUDHI_CHECK(2*dim == points_generated[0].size(), "Py array second dimension not matching the double torus dimension");
+
+ py::array_t<double> points({npoints, (size_t)2*dim});
+
+ py::buffer_info buf = points.request();
+ double *ptr = static_cast<double *>(buf.ptr);
+
+ for (size_t i = 0; i < npoints; i++)
+ for (int j = 0; j < 2*dim; j++)
+ ptr[i*(2*dim)+j] = points_generated[i][j];
+
+ return points;
+}
+
+PYBIND11_MODULE(_points, m) {
+ m.attr("__license__") = "LGPL v3";
+
+ m.def("sphere", &generate_points_on_sphere,
+ py::arg("n_samples"), py::arg("ambient_dim"),
+ py::arg("radius") = 1., py::arg("sample") = "random",
+ R"pbdoc(
+ Generate random i.i.d. points uniformly on a (d-1)-sphere in R^d
+
+ :param n_samples: The number of points to be generated.
+ :type n_samples: integer
+ :param ambient_dim: The ambient dimension d.
+ :type ambient_dim: integer
+ :param radius: The radius. Default value is `1.`.
+ :type radius: float
+ :param sample: The sample type. Default and only available value is `"random"`.
+ :type sample: string
+ :rtype: numpy array of float
+ :returns: the generated points on a sphere.
+ )pbdoc");
+
+ m.def("ctorus", &generate_points_on_torus,
+ py::arg("n_samples"), py::arg("dim"), py::arg("sample") = "random",
+ R"pbdoc(
+ Generate random i.i.d. points on a d-torus in R^2d or as a grid
+
+ :param n_samples: The number of points to be generated.
+ :type n_samples: integer
+ :param dim: The dimension of the torus on which points would be generated in R^2*dim.
+ :type dim: integer
+ :param sample: The sample type. Available values are: `"random"` and `"grid"`. Default value is `"random"`.
+ :type sample: string
+ :rtype: numpy array of float.
+ The shape of returned numpy array is :
+ if sample is 'random' : (n_samples, 2*dim).
+ if sample is 'grid' : (⌊n_samples**(1./dim)⌋**dim, 2*dim), where shape[0] is rounded down to the closest perfect 'dim'th power.
+ :returns: the generated points on a torus.
+ )pbdoc");
+}
diff --git a/src/python/gudhi/datasets/generators/points.py b/src/python/gudhi/datasets/generators/points.py
new file mode 100644
index 00000000..cf97777d
--- /dev/null
+++ b/src/python/gudhi/datasets/generators/points.py
@@ -0,0 +1,56 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Hind Montassif
+#
+# Copyright (C) 2021 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+
+from ._points import ctorus
+from ._points import sphere
+
+def _generate_random_points_on_torus(n_samples, dim):
+
+ # Generate random angles of size n_samples*dim
+ alpha = 2*np.pi*np.random.rand(n_samples*dim)
+
+ # Based on angles, construct points of size n_samples*dim on a circle and reshape the result in a n_samples*2*dim array
+ array_points = np.column_stack([np.cos(alpha), np.sin(alpha)]).reshape(-1, 2*dim)
+
+ return array_points
+
+def _generate_grid_points_on_torus(n_samples, dim):
+
+ # Generate points on a dim-torus as a grid
+ n_samples_grid = int((n_samples+.5)**(1./dim)) # add .5 to avoid rounding down with numerical approximations
+ alpha = np.linspace(0, 2*np.pi, n_samples_grid, endpoint=False)
+
+ array_points = np.column_stack([np.cos(alpha), np.sin(alpha)])
+ array_points_idx = np.empty([n_samples_grid]*dim + [dim], dtype=int)
+ for i, x in enumerate(np.ix_(*([np.arange(n_samples_grid)]*dim))):
+ array_points_idx[...,i] = x
+ return array_points[array_points_idx].reshape(-1, 2*dim)
+
+def torus(n_samples, dim, sample='random'):
+ """
+ Generate points on a flat dim-torus in R^2dim either randomly or on a grid
+
+ :param n_samples: The number of points to be generated.
+ :param dim: The dimension of the torus on which points would be generated in R^2*dim.
+ :param sample: The sample type of the generated points. Can be 'random' or 'grid'.
+ :returns: numpy array containing the generated points on a torus.
+ The shape of returned numpy array is:
+ if sample is 'random' : (n_samples, 2*dim).
+ if sample is 'grid' : (⌊n_samples**(1./dim)⌋**dim, 2*dim), where shape[0] is rounded down to the closest perfect 'dim'th power.
+ """
+ if sample == 'random':
+ # Generate points randomly
+ return _generate_random_points_on_torus(n_samples, dim)
+ elif sample == 'grid':
+ # Generate points on a grid
+ return _generate_grid_points_on_torus(n_samples, dim)
+ else:
+ raise ValueError("Sample type '{}' is not supported".format(sample))
diff --git a/src/python/gudhi/periodic_cubical_complex.pyx b/src/python/gudhi/periodic_cubical_complex.pyx
index d353d2af..6c21e902 100644
--- a/src/python/gudhi/periodic_cubical_complex.pyx
+++ b/src/python/gudhi/periodic_cubical_complex.pyx
@@ -32,7 +32,7 @@ cdef extern from "Cubical_complex_interface.h" namespace "Gudhi":
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Periodic_cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Cubical_complex::Cubical_complex_interface<Gudhi::cubical_complex::Bitmap_cubical_complex_periodic_boundary_conditions_base<double>>>":
Periodic_cubical_complex_persistence_interface(Periodic_cubical_complex_base_interface * st, bool persistence_dim_max) nogil
- void compute_persistence(int homology_coeff_field, double min_persistence) nogil
+ void compute_persistence(int homology_coeff_field, double min_persistence) nogil except +
vector[pair[int, pair[double, double]]] get_persistence() nogil
vector[vector[int]] cofaces_of_cubical_persistence_pairs() nogil
vector[int] betti_numbers() nogil
@@ -148,7 +148,7 @@ cdef class PeriodicCubicalComplex:
:func:`persistence` returns.
:param homology_coeff_field: The homology coefficient field. Must be a
- prime number
+ prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int.
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
@@ -170,7 +170,7 @@ cdef class PeriodicCubicalComplex:
"""This function computes and returns the persistence of the complex.
:param homology_coeff_field: The homology coefficient field. Must be a
- prime number
+ prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int.
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
@@ -280,4 +280,8 @@ cdef class PeriodicCubicalComplex:
launched first.
"""
assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()"
- return np.array(self.pcohptr.intervals_in_dimension(dimension))
+ piid = np.array(self.pcohptr.intervals_in_dimension(dimension))
+ # Workaround https://github.com/GUDHI/gudhi-devel/issues/507
+ if len(piid) == 0:
+ return np.empty(shape = [0, 2])
+ return piid
diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py
index 994be3b6..de5844f9 100644
--- a/src/python/gudhi/point_cloud/knn.py
+++ b/src/python/gudhi/point_cloud/knn.py
@@ -8,6 +8,7 @@
# - YYYY/MM Author: Description of the modification
import numpy
+import warnings
# TODO: https://github.com/facebookresearch/faiss
@@ -111,7 +112,7 @@ class KNearestNeighbors:
nargs = {
k: v for k, v in self.params.items() if k in {"p", "n_jobs", "metric_params", "algorithm", "leaf_size"}
}
- self.nn = NearestNeighbors(self.k, metric=self.metric, **nargs)
+ self.nn = NearestNeighbors(n_neighbors=self.k, metric=self.metric, **nargs)
self.nn.fit(X)
if self.params["implementation"] == "hnsw":
@@ -257,6 +258,9 @@ class KNearestNeighbors:
if ef is not None:
self.graph.set_ef(ef)
neighbors, distances = self.graph.knn_query(X, k, num_threads=self.params["num_threads"])
+ with warnings.catch_warnings():
+ if not(numpy.all(numpy.isfinite(distances))):
+ warnings.warn("Overflow/infinite value encountered while computing 'distances'", RuntimeWarning)
# The k nearest neighbors are always sorted. I couldn't find it in the doc, but the code calls searchKnn,
# which returns a priority_queue, and then fills the return array backwards with top/pop on the queue.
if self.return_index:
@@ -290,6 +294,9 @@ class KNearestNeighbors:
if self.return_index:
if self.return_distance:
distances, neighbors = mat.Kmin_argKmin(k, dim=1)
+ with warnings.catch_warnings():
+ if not(torch.isfinite(distances).all()):
+ warnings.warn("Overflow/infinite value encountered while computing 'distances'", RuntimeWarning)
if p != numpy.inf:
distances = distances ** (1.0 / p)
return neighbors, distances
@@ -298,6 +305,9 @@ class KNearestNeighbors:
return neighbors
if self.return_distance:
distances = mat.Kmin(k, dim=1)
+ with warnings.catch_warnings():
+ if not(torch.isfinite(distances).all()):
+ warnings.warn("Overflow/infinite value encountered while computing 'distances'", RuntimeWarning)
if p != numpy.inf:
distances = distances ** (1.0 / p)
return distances
diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py
index 814b6081..018e9b21 100644
--- a/src/python/gudhi/representations/vector_methods.py
+++ b/src/python/gudhi/representations/vector_methods.py
@@ -7,6 +7,7 @@
# Modification(s):
# - 2020/06 Martin: ATOL integration
# - 2020/12 Gard: A more flexible Betti curve class capable of computing exact curves.
+# - 2021/11 Vincent Rouvreau: factorize _automatic_sample_range
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
@@ -47,10 +48,14 @@ class PersistenceImage(BaseEstimator, TransformerMixin):
y (n x 1 array): persistence diagram labels (unused).
"""
if np.isnan(np.array(self.im_range)).any():
- new_X = BirthPersistenceTransform().fit_transform(X)
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(new_X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.im_range = np.where(np.isnan(np.array(self.im_range)), np.array([mx, Mx, my, My]), np.array(self.im_range))
+ try:
+ new_X = BirthPersistenceTransform().fit_transform(X)
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(new_X,y)
+ [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ self.im_range = np.where(np.isnan(np.array(self.im_range)), np.array([mx, Mx, my, My]), np.array(self.im_range))
+ except ValueError:
+ # Empty persistence diagram case - https://github.com/GUDHI/gudhi-devel/issues/507
+ pass
return self
def transform(self, X):
@@ -96,6 +101,28 @@ class PersistenceImage(BaseEstimator, TransformerMixin):
"""
return self.fit_transform([diag])[0,:]
+def _automatic_sample_range(sample_range, X, y):
+ """
+ Compute and returns sample range from the persistence diagrams if one of the sample_range values is numpy.nan.
+
+ Parameters:
+ sample_range (a numpy array of 2 float): minimum and maximum of all piecewise-linear function domains, of
+ the form [x_min, x_max].
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ nan_in_range = np.isnan(sample_range)
+ if nan_in_range.any():
+ try:
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
+ [mx,my] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]]
+ [Mx,My] = [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ return np.where(nan_in_range, np.array([mx, My]), sample_range)
+ except ValueError:
+ # Empty persistence diagram case - https://github.com/GUDHI/gudhi-devel/issues/507
+ pass
+ return sample_range
+
class Landscape(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence landscapes from a list of persistence diagrams. A persistence landscape is a collection of 1D piecewise-linear functions computed from the rank function associated to the persistence diagram. These piecewise-linear functions are then sampled evenly on a given range and the corresponding vectors of samples are concatenated and returned. See http://jmlr.org/papers/v16/bubenik15a.html for more details.
@@ -121,10 +148,7 @@ class Landscape(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- if self.nan_in_range.any():
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.sample_range = np.where(self.nan_in_range, np.array([mx, My]), np.array(self.sample_range))
+ self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
return self
def transform(self, X):
@@ -220,10 +244,7 @@ class Silhouette(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- if np.isnan(np.array(self.sample_range)).any():
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
return self
def transform(self, X):
@@ -349,6 +370,9 @@ class BettiCurve(BaseEstimator, TransformerMixin):
else:
self.grid_ = np.array(self.predefined_grid)
+
+ #self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
+
return self
@@ -473,10 +497,7 @@ class Entropy(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- if np.isnan(np.array(self.sample_range)).any():
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ self.sample_range = _automatic_sample_range(np.array(self.sample_range), X, y)
return self
def transform(self, X):
@@ -495,9 +516,13 @@ class Entropy(BaseEstimator, TransformerMixin):
new_X = BirthPersistenceTransform().fit_transform(X)
for i in range(num_diag):
-
orig_diagram, diagram, num_pts_in_diag = X[i], new_X[i], X[i].shape[0]
- new_diagram = DiagramScaler(use=True, scalers=[([1], MaxAbsScaler())]).fit_transform([diagram])[0]
+ try:
+ new_diagram = DiagramScaler(use=True, scalers=[([1], MaxAbsScaler())]).fit_transform([diagram])[0]
+ except ValueError:
+ # Empty persistence diagram case - https://github.com/GUDHI/gudhi-devel/issues/507
+ assert len(diagram) == 0
+ new_diagram = np.empty(shape = [0, 2])
if self.mode == "scalar":
ent = - np.sum( np.multiply(new_diagram[:,1], np.log(new_diagram[:,1])) )
@@ -511,12 +536,11 @@ class Entropy(BaseEstimator, TransformerMixin):
max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
for k in range(min_idx, max_idx):
ent[k] += (-1) * new_diagram[j,1] * np.log(new_diagram[j,1])
- if self.normalized:
- ent = ent / np.linalg.norm(ent, ord=1)
- Xfit.append(np.reshape(ent,[1,-1]))
-
- Xfit = np.concatenate(Xfit, 0)
+ if self.normalized:
+ ent = ent / np.linalg.norm(ent, ord=1)
+ Xfit.append(np.reshape(ent,[1,-1]))
+ Xfit = np.concatenate(Xfit, axis=0)
return Xfit
def __call__(self, diag):
@@ -577,7 +601,13 @@ class TopologicalVector(BaseEstimator, TransformerMixin):
diagram, num_pts_in_diag = X[i], X[i].shape[0]
pers = 0.5 * (diagram[:,1]-diagram[:,0])
min_pers = np.minimum(pers,np.transpose(pers))
- distances = DistanceMetric.get_metric("chebyshev").pairwise(diagram)
+ # Works fine with sklearn 1.0, but an ValueError exception is thrown on past versions
+ try:
+ distances = DistanceMetric.get_metric("chebyshev").pairwise(diagram)
+ except ValueError:
+ # Empty persistence diagram case - https://github.com/GUDHI/gudhi-devel/issues/507
+ assert len(diagram) == 0
+ distances = np.empty(shape = [0, 0])
vect = np.flip(np.sort(np.triu(np.minimum(distances, min_pers)), axis=None), 0)
dim = min(len(vect), thresh)
Xfit[i, :dim] = vect[:dim]
@@ -704,18 +734,19 @@ class Atol(BaseEstimator, TransformerMixin):
>>> b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]])
>>> c = np.array([[3, 2, -1], [1, 2, -1]])
>>> atol_vectoriser = Atol(quantiser=KMeans(n_clusters=2, random_state=202006))
- >>> atol_vectoriser.fit(X=[a, b, c]).centers
- array([[ 2. , 0.66666667, 3.33333333],
- [ 2.6 , 2.8 , -0.4 ]])
+ >>> atol_vectoriser.fit(X=[a, b, c]).centers # doctest: +SKIP
+ >>> # array([[ 2. , 0.66666667, 3.33333333],
+ >>> # [ 2.6 , 2.8 , -0.4 ]])
>>> atol_vectoriser(a)
- array([1.18168665, 0.42375966])
+ >>> # array([1.18168665, 0.42375966]) # doctest: +SKIP
>>> atol_vectoriser(c)
- array([0.02062512, 1.25157463])
- >>> atol_vectoriser.transform(X=[a, b, c])
- array([[1.18168665, 0.42375966],
- [0.29861028, 1.06330156],
- [0.02062512, 1.25157463]])
+ >>> # array([0.02062512, 1.25157463]) # doctest: +SKIP
+ >>> atol_vectoriser.transform(X=[a, b, c]) # doctest: +SKIP
+ >>> # array([[1.18168665, 0.42375966],
+ >>> # [0.29861028, 1.06330156],
+ >>> # [0.02062512, 1.25157463]])
"""
+ # Note the example above must be up to date with the one in tests called test_atol_doc
def __init__(self, quantiser, weighting_method="cloud", contrast="gaussian"):
"""
Constructor for the Atol measure vectorisation class.
diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd
index 000323af..006a24ed 100644
--- a/src/python/gudhi/simplex_tree.pxd
+++ b/src/python/gudhi/simplex_tree.pxd
@@ -44,7 +44,7 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
cdef cppclass Simplex_tree_interface_full_featured "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>":
- Simplex_tree() nogil
+ Simplex_tree_interface_full_featured() nogil
double simplex_filtration(vector[int] simplex) nogil
void assign_simplex_filtration(vector[int] simplex, double filtration) nogil
void initialize_filtration() nogil
@@ -78,7 +78,7 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Simplex_tree<Gudhi::Simplex_tree_options_full_featured>>":
Simplex_tree_persistence_interface(Simplex_tree_interface_full_featured * st, bool persistence_dim_max) nogil
- void compute_persistence(int homology_coeff_field, double min_persistence) nogil
+ void compute_persistence(int homology_coeff_field, double min_persistence) nogil except +
vector[pair[int, pair[double, double]]] get_persistence() nogil
vector[int] betti_numbers() nogil
vector[int] persistent_betti_numbers(double from_value, double to_value) nogil
diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx
index d7991417..c3720936 100644
--- a/src/python/gudhi/simplex_tree.pyx
+++ b/src/python/gudhi/simplex_tree.pyx
@@ -9,9 +9,8 @@
from cython.operator import dereference, preincrement
from libc.stdint cimport intptr_t
-import numpy
-from numpy import array as np_array
-cimport simplex_tree
+import numpy as np
+cimport gudhi.simplex_tree
__author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
@@ -412,7 +411,7 @@ cdef class SimplexTree:
"""This function retrieves good values for extended persistence, and separate the diagrams into the Ordinary,
Relative, Extended+ and Extended- subdiagrams.
- :param homology_coeff_field: The homology coefficient field. Must be a prime number. Default value is 11.
+ :param homology_coeff_field: The homology coefficient field. Must be a prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int
:param min_persistence: The minimum persistence value (i.e., the absolute value of the difference between the
persistence diagram point coordinates) to take into account (strictly greater than min_persistence).
@@ -449,7 +448,7 @@ cdef class SimplexTree:
"""This function computes and returns the persistence of the simplicial complex.
:param homology_coeff_field: The homology coefficient field. Must be a
- prime number. Default value is 11.
+ prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
@@ -472,7 +471,7 @@ cdef class SimplexTree:
when you do not want the list :func:`persistence` returns.
:param homology_coeff_field: The homology coefficient field. Must be a
- prime number. Default value is 11.
+ prime number. Default value is 11. Max is 46337.
:type homology_coeff_field: int
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
@@ -542,7 +541,11 @@ cdef class SimplexTree:
function to be launched first.
"""
assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()"
- return np_array(self.pcohptr.intervals_in_dimension(dimension))
+ piid = np.array(self.pcohptr.intervals_in_dimension(dimension))
+ # Workaround https://github.com/GUDHI/gudhi-devel/issues/507
+ if len(piid) == 0:
+ return np.empty(shape = [0, 2])
+ return piid
def persistence_pairs(self):
"""This function returns a list of persistence birth and death simplices pairs.
@@ -583,8 +586,8 @@ cdef class SimplexTree:
"""
assert self.pcohptr != NULL, "lower_star_persistence_generators() requires that persistence() be called first."
gen = self.pcohptr.lower_star_generators()
- normal = [np_array(d).reshape(-1,2) for d in gen.first]
- infinite = [np_array(d) for d in gen.second]
+ normal = [np.array(d).reshape(-1,2) for d in gen.first]
+ infinite = [np.array(d) for d in gen.second]
return (normal, infinite)
def flag_persistence_generators(self):
@@ -602,19 +605,19 @@ cdef class SimplexTree:
assert self.pcohptr != NULL, "flag_persistence_generators() requires that persistence() be called first."
gen = self.pcohptr.flag_generators()
if len(gen.first) == 0:
- normal0 = numpy.empty((0,3))
+ normal0 = np.empty((0,3))
normals = []
else:
l = iter(gen.first)
- normal0 = np_array(next(l)).reshape(-1,3)
- normals = [np_array(d).reshape(-1,4) for d in l]
+ normal0 = np.array(next(l)).reshape(-1,3)
+ normals = [np.array(d).reshape(-1,4) for d in l]
if len(gen.second) == 0:
- infinite0 = numpy.empty(0)
+ infinite0 = np.empty(0)
infinites = []
else:
l = iter(gen.second)
- infinite0 = np_array(next(l))
- infinites = [np_array(d).reshape(-1,2) for d in l]
+ infinite0 = np.array(next(l))
+ infinites = [np.array(d).reshape(-1,2) for d in l]
return (normal0, normals, infinite0, infinites)
def collapse_edges(self, nb_iterations = 1):
diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py
index a9d1cdff..dc18806e 100644
--- a/src/python/gudhi/wasserstein/wasserstein.py
+++ b/src/python/gudhi/wasserstein/wasserstein.py
@@ -9,6 +9,7 @@
import numpy as np
import scipy.spatial.distance as sc
+import warnings
try:
import ot
@@ -70,6 +71,7 @@ def _perstot_autodiff(X, order, internal_p):
'''
return _dist_to_diag(X, internal_p).norms.lp(order)
+
def _perstot(X, order, internal_p, enable_autodiff):
'''
:param X: (n x 2) numpy.array (points of a given diagram).
@@ -79,6 +81,9 @@ def _perstot(X, order, internal_p, enable_autodiff):
transparent to automatic differentiation.
:type enable_autodiff: bool
:returns: float, the total persistence of the diagram (that is, its distance to the empty diagram).
+
+ .. note::
+ Can be +inf if the diagram has an essential part (points with infinite coordinates).
'''
if enable_autodiff:
import eagerpy as ep
@@ -88,32 +93,163 @@ def _perstot(X, order, internal_p, enable_autodiff):
return np.linalg.norm(_dist_to_diag(X, internal_p), ord=order)
-def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enable_autodiff=False):
+def _get_essential_parts(a):
'''
- :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points
- (i.e. with infinite coordinate).
- :param Y: (m x 2) numpy.array encoding the second diagram.
- :param matching: if True, computes and returns the optimal matching between X and Y, encoded as
- a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to
- the j-th point in Y, with the convention (-1) represents the diagonal.
- :param order: exponent for Wasserstein; Default value is 1.
- :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2);
- Default value is `np.inf`.
- :param enable_autodiff: If X and Y are torch.tensor or tensorflow.Tensor, make the computation
+ :param a: (n x 2) numpy.array (point of a diagram)
+ :returns: five lists of indices (between 0 and len(a)) accounting for the five types of points with infinite
+ coordinates that can occur in a diagram, namely:
+ type0 : (-inf, finite)
+ type1 : (finite, +inf)
+ type2 : (-inf, +inf)
+ type3 : (-inf, -inf)
+ type4 : (+inf, +inf)
+ .. note::
+ For instance, a[_get_essential_parts(a)[0]] returns the points in a of coordinates (-inf, x) for some finite x.
+ Note also that points with (+inf, -inf) are not handled (points (x,y) in dgm satisfy by assumption (y >= x)).
+
+ Finally, we consider that points with coordinates (-inf,-inf) and (+inf, +inf) belong to the diagonal.
+ '''
+ if len(a):
+ first_coord_finite = np.isfinite(a[:,0])
+ second_coord_finite = np.isfinite(a[:,1])
+ first_coord_infinite_positive = (a[:,0] == np.inf)
+ second_coord_infinite_positive = (a[:,1] == np.inf)
+ first_coord_infinite_negative = (a[:,0] == -np.inf)
+ second_coord_infinite_negative = (a[:,1] == -np.inf)
+
+ ess_first_type = np.where(second_coord_finite & first_coord_infinite_negative)[0] # coord (-inf, x)
+ ess_second_type = np.where(first_coord_finite & second_coord_infinite_positive)[0] # coord (x, +inf)
+ ess_third_type = np.where(first_coord_infinite_negative & second_coord_infinite_positive)[0] # coord (-inf, +inf)
+
+ ess_fourth_type = np.where(first_coord_infinite_negative & second_coord_infinite_negative)[0] # coord (-inf, -inf)
+ ess_fifth_type = np.where(first_coord_infinite_positive & second_coord_infinite_positive)[0] # coord (+inf, +inf)
+ return ess_first_type, ess_second_type, ess_third_type, ess_fourth_type, ess_fifth_type
+ else:
+ return [], [], [], [], []
+
+
+def _cost_and_match_essential_parts(X, Y, idX, idY, order, axis):
+ '''
+ :param X: (n x 2) numpy.array (dgm points)
+ :param Y: (n x 2) numpy.array (dgm points)
+ :param idX: indices to consider for this one dimensional OT problem (in X)
+ :param idY: indices to consider for this one dimensional OT problem (in Y)
+ :param order: exponent for Wasserstein distance computation
+ :param axis: must be 0 or 1, correspond to the coordinate which is finite.
+ :returns: cost (float) and match for points with *one* infinite coordinate.
+
+ .. note::
+ Assume idX, idY come when calling _handle_essential_parts, thus have same length.
+ '''
+ u = X[idX, axis]
+ v = Y[idY, axis]
+
+ cost = np.sum(np.abs(np.sort(u) - np.sort(v))**(order)) # OT cost in 1D
+
+ sortidX = idX[np.argsort(u)]
+ sortidY = idY[np.argsort(v)]
+ # We return [i,j] sorted per value
+ match = list(zip(sortidX, sortidY))
+
+ return cost, match
+
+
+def _handle_essential_parts(X, Y, order):
+ '''
+ :param X: (n x 2) numpy array, first diagram.
+ :param Y: (n x 2) numpy array, second diagram.
+ :order: Wasserstein order for cost computation.
+ :returns: cost and matching due to essential parts. If cost is +inf, matching will be set to None.
+ '''
+ ess_parts_X = _get_essential_parts(X)
+ ess_parts_Y = _get_essential_parts(Y)
+
+ # Treats the case of infinite cost (cardinalities of essential parts differ).
+ for u, v in list(zip(ess_parts_X, ess_parts_Y))[:3]: # ignore types 4 and 5 as they belong to the diagonal
+ if len(u) != len(v):
+ return np.inf, None
+
+ # Now we know each essential part has the same number of points in both diagrams.
+ # Handle type 0 and type 1 essential parts (those with one finite coordinates)
+ c1, m1 = _cost_and_match_essential_parts(X, Y, ess_parts_X[0], ess_parts_Y[0], axis=1, order=order)
+ c2, m2 = _cost_and_match_essential_parts(X, Y, ess_parts_X[1], ess_parts_Y[1], axis=0, order=order)
+
+ c = c1 + c2
+ m = m1 + m2
+
+ # Handle type3 (coordinates (-inf,+inf), so we just align points)
+ m += list(zip(ess_parts_X[2], ess_parts_Y[2]))
+
+ # Handle type 4 and 5, considered as belonging to the diagonal so matched to (-1) with cost 0.
+ for z in ess_parts_X[3:]:
+ m += [(u, -1) for u in z] # points in X are matched to -1
+ for z in ess_parts_Y[3:]:
+ m += [(-1, v) for v in z] # -1 is match to points in Y
+
+ return c, np.array(m)
+
+
+def _finite_part(X):
+ '''
+ :param X: (n x 2) numpy array encoding a persistence diagram.
+ :returns: The finite part of a diagram `X` (points with finite coordinates).
+ '''
+ return X[np.where(np.isfinite(X[:,0]) & np.isfinite(X[:,1]))]
+
+
+def _warn_infty(matching):
+ '''
+ Handle essential parts with different cardinalities. Warn the user about cost being infinite and (if
+ `matching=True`) about the returned matching being `None`.
+ '''
+ if matching:
+ warnings.warn('Cardinality of essential parts differs. Distance (cost) is +inf, and the returned matching is None.')
+ return np.inf, None
+ else:
+ warnings.warn('Cardinality of essential parts differs. Distance (cost) is +inf.')
+ return np.inf
+
+
+def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enable_autodiff=False,
+ keep_essential_parts=True):
+ '''
+ Compute the Wasserstein distance between persistence diagram using Python Optimal Transport backend.
+ Diagrams can contain points with infinity coordinates (essential parts).
+ Points with (-inf,-inf) and (+inf,+inf) coordinates are considered as belonging to the diagonal.
+ If the distance between two diagrams is +inf (which happens if the cardinalities of essential
+ parts differ) and optimal matching is required, it will be set to ``None``.
+
+ :param X: The first diagram.
+ :type X: n x 2 numpy.array
+ :param Y: The second diagram.
+ :type Y: m x 2 numpy.array
+ :param matching: if ``True``, computes and returns the optimal matching between X and Y, encoded as
+ a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to
+ the j-th point in Y, with the convention that (-1) represents the diagonal.
+ :param order: Wasserstein exponent q (1 <= q < infinity).
+ :type order: float
+ :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2).
+ :type internal_p: float
+ :param enable_autodiff: If X and Y are ``torch.tensor`` or ``tensorflow.Tensor``, make the computation
transparent to automatic differentiation. This requires the package EagerPy and is currently incompatible
- with `matching=True`.
+ with ``matching=True`` and with ``keep_essential_parts=True``.
- .. note:: This considers the function defined on the coordinates of the off-diagonal points of X and Y
+ .. note:: This considers the function defined on the coordinates of the off-diagonal finite points of X and Y
and lets the various frameworks compute its gradient. It never pulls new points from the diagonal.
:type enable_autodiff: bool
- :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with
+ :param keep_essential_parts: If ``False``, only considers the finite points in the diagrams.
+ Otherwise, include essential parts in cost and matching computation.
+ :type keep_essential_parts: bool
+ :returns: The Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with
respect to the internal_p-norm as ground metric.
If matching is set to True, also returns the optimal matching between X and Y.
+ If cost is +inf, any matching is optimal and thus it returns `None` instead.
'''
+
+ # First step: handle empty diagrams
n = len(X)
m = len(Y)
- # handle empty diagrams
if n == 0:
if m == 0:
if not matching:
@@ -122,16 +258,45 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab
else:
return 0., np.array([])
else:
- if not matching:
- return _perstot(Y, order, internal_p, enable_autodiff)
+ cost = _perstot(Y, order, internal_p, enable_autodiff)
+ if cost == np.inf:
+ return _warn_infty(matching)
else:
- return _perstot(Y, order, internal_p, enable_autodiff), np.array([[-1, j] for j in range(m)])
+ if not matching:
+ return cost
+ else:
+ return cost, np.array([[-1, j] for j in range(m)])
elif m == 0:
- if not matching:
- return _perstot(X, order, internal_p, enable_autodiff)
+ cost = _perstot(X, order, internal_p, enable_autodiff)
+ if cost == np.inf:
+ return _warn_infty(matching)
else:
- return _perstot(X, order, internal_p, enable_autodiff), np.array([[i, -1] for i in range(n)])
+ if not matching:
+ return cost
+ else:
+ return cost, np.array([[i, -1] for i in range(n)])
+
+ # Check essential part and enable autodiff together
+ if enable_autodiff and keep_essential_parts:
+ warnings.warn('''enable_autodiff=True and keep_essential_parts=True are incompatible together.
+ keep_essential_parts is set to False: only points with finite coordinates are considered
+ in the following.
+ ''')
+ keep_essential_parts = False
+
+ # Second step: handle essential parts if needed.
+ if keep_essential_parts:
+ essential_cost, essential_matching = _handle_essential_parts(X, Y, order=order)
+ if (essential_cost == np.inf):
+ return _warn_infty(matching) # Tells the user that cost is infty and matching (if True) is None.
+ # avoid computing transport cost between the finite parts if essential parts
+ # cardinalities do not match (saves time)
+ else:
+ essential_cost = 0
+ essential_matching = None
+
+ # Now the standard pipeline for finite parts
if enable_autodiff:
import eagerpy as ep
@@ -139,6 +304,12 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab
Y_orig = ep.astensor(Y)
X = X_orig.numpy()
Y = Y_orig.numpy()
+
+ # Extract finite points of the diagrams.
+ X, Y = _finite_part(X), _finite_part(Y)
+ n = len(X)
+ m = len(Y)
+
M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p)
a = np.ones(n+1) # weight vector of the input diagram. Uniform here.
a[-1] = m
@@ -154,7 +325,10 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab
# Now we turn to -1 points encoding the diagonal
match[:,0][match[:,0] >= n] = -1
match[:,1][match[:,1] >= m] = -1
- return ot_cost ** (1./order) , match
+ # Finally incorporate the essential part matching
+ if essential_matching is not None:
+ match = np.concatenate([match, essential_matching]) if essential_matching.size else match
+ return (ot_cost + essential_cost) ** (1./order) , match
if enable_autodiff:
P = ot.emd(a=a, b=b, M=M, numItermax=2000000)
@@ -173,9 +347,9 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab
return ep.concatenate(dists).norms.lp(order).raw
# We can also concatenate the 3 vectors to compute just one norm.
- # Comptuation of the otcost using the ot.emd2 library.
+ # Comptuation of the ot cost using the ot.emd2 library.
# Note: it is the Wasserstein distance to the power q.
# The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value?
ot_cost = ot.emd2(a, b, M, numItermax=2000000)
- return ot_cost ** (1./order)
+ return (ot_cost + essential_cost) ** (1./order)