summaryrefslogtreecommitdiff
path: root/src/python/gudhi
diff options
context:
space:
mode:
Diffstat (limited to 'src/python/gudhi')
-rw-r--r--src/python/gudhi/alpha_complex.pyx27
-rw-r--r--src/python/gudhi/bottleneck.cc50
-rw-r--r--src/python/gudhi/bottleneck.pyx48
-rw-r--r--src/python/gudhi/cubical_complex.pyx133
-rw-r--r--src/python/gudhi/hera.cc56
-rw-r--r--src/python/gudhi/nerve_gic.pyx41
-rw-r--r--src/python/gudhi/off_reader.pyx12
-rw-r--r--src/python/gudhi/periodic_cubical_complex.pyx124
-rw-r--r--src/python/gudhi/persistence_graphical_tools.py136
-rw-r--r--src/python/gudhi/point_cloud/__init__.py0
-rw-r--r--src/python/gudhi/point_cloud/dtm.py70
-rw-r--r--src/python/gudhi/point_cloud/knn.py328
-rw-r--r--src/python/gudhi/point_cloud/timedelay.py94
-rw-r--r--src/python/gudhi/representations/kernel_methods.py183
-rw-r--r--src/python/gudhi/representations/metrics.py385
-rw-r--r--src/python/gudhi/representations/preprocessing.py60
-rw-r--r--src/python/gudhi/representations/vector_methods.py84
-rw-r--r--src/python/gudhi/rips_complex.pyx17
-rw-r--r--src/python/gudhi/simplex_tree.pxd81
-rw-r--r--src/python/gudhi/simplex_tree.pyx316
-rw-r--r--src/python/gudhi/wasserstein.py97
-rw-r--r--src/python/gudhi/wasserstein/__init__.py1
-rw-r--r--src/python/gudhi/wasserstein/barycenter.py146
-rw-r--r--src/python/gudhi/wasserstein/wasserstein.py181
-rw-r--r--src/python/gudhi/weighted_rips_complex.py59
25 files changed, 2136 insertions, 593 deletions
diff --git a/src/python/gudhi/alpha_complex.pyx b/src/python/gudhi/alpha_complex.pyx
index fff3e920..d75e374a 100644
--- a/src/python/gudhi/alpha_complex.pyx
+++ b/src/python/gudhi/alpha_complex.pyx
@@ -1,5 +1,7 @@
-# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
-# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
+# which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full
+# license details.
# Author(s): Vincent Rouvreau
#
# Copyright (C) 2016 Inria
@@ -7,6 +9,7 @@
# Modification(s):
# - YYYY/MM Author: Description of the modification
+from __future__ import print_function
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
@@ -24,11 +27,11 @@ __license__ = "GPL v3"
cdef extern from "Alpha_complex_interface.h" namespace "Gudhi":
cdef cppclass Alpha_complex_interface "Gudhi::alpha_complex::Alpha_complex_interface":
- Alpha_complex_interface(vector[vector[double]] points) except +
+ Alpha_complex_interface(vector[vector[double]] points) nogil except +
# bool from_file is a workaround for cython to find the correct signature
- Alpha_complex_interface(string off_file, bool from_file) except +
- vector[double] get_point(int vertex) except +
- void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, double max_alpha_square) except +
+ Alpha_complex_interface(string off_file, bool from_file) nogil except +
+ vector[double] get_point(int vertex) nogil except +
+ void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, double max_alpha_square) nogil except +
# AlphaComplex python interface
cdef class AlphaComplex:
@@ -67,16 +70,20 @@ cdef class AlphaComplex:
# The real cython constructor
def __cinit__(self, points = None, off_file = ''):
+ cdef vector[vector[double]] pts
if off_file:
if os.path.isfile(off_file):
- self.thisptr = new Alpha_complex_interface(off_file.encode('utf-8'), True)
+ self.thisptr = new Alpha_complex_interface(
+ off_file.encode('utf-8'), True)
else:
print("file " + off_file + " not found.")
else:
if points is None:
# Empty Alpha construction
points=[]
- self.thisptr = new Alpha_complex_interface(points)
+ pts = points
+ with nogil:
+ self.thisptr = new Alpha_complex_interface(pts)
def __dealloc__(self):
@@ -109,6 +116,8 @@ cdef class AlphaComplex:
:rtype: SimplexTree
"""
stree = SimplexTree()
+ cdef double mas = max_alpha_square
cdef intptr_t stree_int_ptr=stree.thisptr
- self.thisptr.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr, max_alpha_square)
+ with nogil:
+ self.thisptr.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr, mas)
return stree
diff --git a/src/python/gudhi/bottleneck.cc b/src/python/gudhi/bottleneck.cc
new file mode 100644
index 00000000..732cb9a8
--- /dev/null
+++ b/src/python/gudhi/bottleneck.cc
@@ -0,0 +1,50 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Marc Glisse
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <gudhi/Bottleneck.h>
+
+#include <pybind11_diagram_utils.h>
+
+double bottleneck(Dgm d1, Dgm d2, double epsilon)
+{
+ // I *think* the call to request() has to be before releasing the GIL.
+ auto diag1 = numpy_to_range_of_pairs(d1);
+ auto diag2 = numpy_to_range_of_pairs(d2);
+
+ py::gil_scoped_release release;
+
+ return Gudhi::persistence_diagram::bottleneck_distance(diag1, diag2, epsilon);
+}
+
+PYBIND11_MODULE(bottleneck, m) {
+ m.attr("__license__") = "GPL v3";
+ m.def("bottleneck_distance", &bottleneck,
+ py::arg("diagram_1"), py::arg("diagram_2"),
+ py::arg("e") = (std::numeric_limits<double>::min)(),
+ R"pbdoc(
+ This function returns the point corresponding to a given vertex.
+
+ :param diagram_1: The first diagram.
+ :type diagram_1: numpy array of shape (m,2)
+ :param diagram_2: The second diagram.
+ :type diagram_2: numpy array of shape (n,2)
+ :param e: If `e` is 0, this uses an expensive algorithm to compute the
+ exact distance.
+ If `e` is not 0, it asks for an additive `e`-approximation, and
+ currently also allows a small multiplicative error (the last 2 or 3
+ bits of the mantissa may be wrong). This version of the algorithm takes
+ advantage of the limited precision of `double` and is usually a lot
+ faster to compute, whatever the value of `e`.
+ Thus, by default, `e` is the smallest positive double.
+ :type e: float
+ :rtype: float
+ :returns: the bottleneck distance.
+ )pbdoc");
+}
diff --git a/src/python/gudhi/bottleneck.pyx b/src/python/gudhi/bottleneck.pyx
deleted file mode 100644
index af011e88..00000000
--- a/src/python/gudhi/bottleneck.pyx
+++ /dev/null
@@ -1,48 +0,0 @@
-# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
-# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
-# Author(s): Vincent Rouvreau
-#
-# Copyright (C) 2016 Inria
-#
-# Modification(s):
-# - YYYY/MM Author: Description of the modification
-
-from cython cimport numeric
-from libcpp.vector cimport vector
-from libcpp.utility cimport pair
-import os
-
-__author__ = "Vincent Rouvreau"
-__copyright__ = "Copyright (C) 2016 Inria"
-__license__ = "GPL v3"
-
-cdef extern from "Bottleneck_distance_interface.h" namespace "Gudhi::persistence_diagram":
- double bottleneck(vector[pair[double, double]], vector[pair[double, double]], double)
- double bottleneck(vector[pair[double, double]], vector[pair[double, double]])
-
-def bottleneck_distance(diagram_1, diagram_2, e=None):
- """This function returns the point corresponding to a given vertex.
-
- :param diagram_1: The first diagram.
- :type diagram_1: vector[pair[double, double]]
- :param diagram_2: The second diagram.
- :type diagram_2: vector[pair[double, double]]
- :param e: If `e` is 0, this uses an expensive algorithm to compute the
- exact distance.
- If `e` is not 0, it asks for an additive `e`-approximation, and
- currently also allows a small multiplicative error (the last 2 or 3
- bits of the mantissa may be wrong). This version of the algorithm takes
- advantage of the limited precision of `double` and is usually a lot
- faster to compute, whatever the value of `e`.
-
- Thus, by default, `e` is the smallest positive double.
- :type e: float
- :rtype: float
- :returns: the bottleneck distance.
- """
- if e is None:
- # Default value is the smallest double value (not 0, 0 is for exact version)
- return bottleneck(diagram_1, diagram_2)
- else:
- # Can be 0 for exact version
- return bottleneck(diagram_1, diagram_2, e)
diff --git a/src/python/gudhi/cubical_complex.pyx b/src/python/gudhi/cubical_complex.pyx
index cbeda014..ca979eda 100644
--- a/src/python/gudhi/cubical_complex.pyx
+++ b/src/python/gudhi/cubical_complex.pyx
@@ -1,5 +1,7 @@
-# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
-# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
+# which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full
+# license details.
# Author(s): Vincent Rouvreau
#
# Copyright (C) 2016 Inria
@@ -7,12 +9,15 @@
# Modification(s):
# - YYYY/MM Author: Description of the modification
+from __future__ import print_function
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
from libcpp.string cimport string
from libcpp cimport bool
+import errno
import os
+import sys
import numpy as np
@@ -30,7 +35,9 @@ cdef extern from "Cubical_complex_interface.h" namespace "Gudhi":
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Cubical_complex::Cubical_complex_interface<>>":
Cubical_complex_persistence_interface(Bitmap_cubical_complex_base_interface * st, bool persistence_dim_max)
- vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence)
+ void compute_persistence(int homology_coeff_field, double min_persistence)
+ vector[pair[int, pair[double, double]]] get_persistence()
+ vector[vector[int]] cofaces_of_cubical_persistence_pairs()
vector[int] betti_numbers()
vector[int] persistent_betti_numbers(double from_value, double to_value)
vector[pair[double,double]] intervals_in_dimension(int dimension)
@@ -87,10 +94,12 @@ cdef class CubicalComplex:
if os.path.isfile(perseus_file):
self.thisptr = new Bitmap_cubical_complex_base_interface(perseus_file.encode('utf-8'))
else:
- print("file " + perseus_file + " not found.")
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ perseus_file)
else:
print("CubicalComplex can be constructed from dimensions and "
- "top_dimensional_cells or from a Perseus-style file name.")
+ "top_dimensional_cells or from a Perseus-style file name.",
+ file=sys.stderr)
def __dealloc__(self):
if self.thisptr != NULL:
@@ -122,8 +131,31 @@ cdef class CubicalComplex:
"""
return self.thisptr.dimension()
+ def compute_persistence(self, homology_coeff_field=11, min_persistence=0):
+ """This function computes the persistence of the complex, so it can be
+ accessed through :func:`persistent_betti_numbers`,
+ :func:`persistence_intervals_in_dimension`, etc. This function is
+ equivalent to :func:`persistence` when you do not want the list
+ :func:`persistence` returns.
+
+ :param homology_coeff_field: The homology coefficient field. Must be a
+ prime number
+ :type homology_coeff_field: int.
+ :param min_persistence: The minimum persistence value to take into
+ account (strictly greater than min_persistence). Default value is
+ 0.0.
+ Sets min_persistence to -1.0 to see all values.
+ :type min_persistence: float.
+ :returns: Nothing.
+ """
+ if self.pcohptr != NULL:
+ del self.pcohptr
+ assert self.__is_defined()
+ self.pcohptr = new Cubical_complex_persistence_interface(self.thisptr, True)
+ self.pcohptr.compute_persistence(homology_coeff_field, min_persistence)
+
def persistence(self, homology_coeff_field=11, min_persistence=0):
- """This function returns the persistence of the complex.
+ """This function computes and returns the persistence of the complex.
:param homology_coeff_field: The homology coefficient field. Must be a
prime number
@@ -136,30 +168,74 @@ cdef class CubicalComplex:
:returns: list of pairs(dimension, pair(birth, death)) -- the
persistence of the complex.
"""
- if self.pcohptr != NULL:
- del self.pcohptr
- if self.thisptr != NULL:
- self.pcohptr = new Cubical_complex_persistence_interface(self.thisptr, True)
- cdef vector[pair[int, pair[double, double]]] persistence_result
- if self.pcohptr != NULL:
- persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence)
- return persistence_result
+ self.compute_persistence(homology_coeff_field, min_persistence)
+ return self.pcohptr.get_persistence()
+
+ def cofaces_of_persistence_pairs(self):
+ """A persistence interval is described by a pair of cells, one that creates the
+ feature and one that kills it. The filtration values of those 2 cells give coordinates
+ for a point in a persistence diagram, or a bar in a barcode. Structurally, in the
+ cubical complexes provided here, the filtration value of any cell is the minimum of the
+ filtration values of the maximal cells that contain it. Connecting persistence diagram
+ coordinates to the corresponding value in the input (i.e. the filtration values of
+ the top-dimensional cells) is useful for differentiation purposes.
+
+ This function returns a list of pairs of top-dimensional cells corresponding to
+ the persistence birth and death cells of the filtration. The cells are represented by
+ their indices in the input list of top-dimensional cells (and not their indices in the
+ internal datastructure that includes non-maximal cells). Note that when two adjacent
+ top-dimensional cells have the same filtration value, we arbitrarily return one of the two
+ when calling the function on one of their common faces.
+
+ :returns: The top-dimensional cells/cofaces of the positive and negative cells,
+ together with the corresponding homological dimension, in two lists of numpy arrays of integers.
+ The first list contains the regular persistence pairs, grouped by dimension.
+ It contains numpy arrays of shape [number_of_persistence_points, 2].
+ The indices of the arrays in the list correspond to the homological dimensions, and the
+ integers of each row in each array correspond to: (index of positive top-dimensional cell,
+ index of negative top-dimensional cell).
+ The second list contains the essential features, grouped by dimension.
+ It contains numpy arrays of shape [number_of_persistence_points, 1].
+ The indices of the arrays in the list correspond to the homological dimensions, and the
+ integers of each row in each array correspond to: (index of positive top-dimensional cell).
+ """
+
+ assert self.pcohptr != NULL, "compute_persistence() must be called before cofaces_of_persistence_pairs()"
+
+ cdef vector[vector[int]] persistence_result
+ output = [[],[]]
+ persistence_result = self.pcohptr.cofaces_of_cubical_persistence_pairs()
+ pr = np.array(persistence_result)
+
+ ess_ind = np.argwhere(pr[:,2] == -1)[:,0]
+ ess = pr[ess_ind]
+ max_h = max(ess[:,0])+1
+ for h in range(max_h):
+ hidxs = np.argwhere(ess[:,0] == h)[:,0]
+ output[1].append(ess[hidxs][:,1])
+
+ reg_ind = np.setdiff1d(np.array(range(len(pr))), ess_ind)
+ reg = pr[reg_ind]
+ max_h = max(reg[:,0])+1
+ for h in range(max_h):
+ hidxs = np.argwhere(reg[:,0] == h)[:,0]
+ output[0].append(reg[hidxs][:,1:])
+
+ return output
def betti_numbers(self):
"""This function returns the Betti numbers of the complex.
:returns: list of int -- The Betti numbers ([B0, B1, ..., Bn]).
- :note: betti_numbers function requires persistence function to be
+ :note: betti_numbers function requires :func:`compute_persistence` function to be
launched first.
:note: betti_numbers function always returns [1, 0, 0, ...] as infinity
filtration cubes are not removed from the complex.
"""
- cdef vector[int] bn_result
- if self.pcohptr != NULL:
- bn_result = self.pcohptr.betti_numbers()
- return bn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before betti_numbers()"
+ return self.pcohptr.betti_numbers()
def persistent_betti_numbers(self, from_value, to_value):
"""This function returns the persistent Betti numbers of the complex.
@@ -174,13 +250,11 @@ cdef class CubicalComplex:
:returns: list of int -- The persistent Betti numbers ([B0, B1, ...,
Bn]).
- :note: persistent_betti_numbers function requires persistence
+ :note: persistent_betti_numbers function requires :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[int] pbn_result
- if self.pcohptr != NULL:
- pbn_result = self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
- return pbn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistent_betti_numbers()"
+ return self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
def persistence_intervals_in_dimension(self, dimension):
"""This function returns the persistence intervals of the complex in a
@@ -191,13 +265,8 @@ cdef class CubicalComplex:
:returns: The persistence intervals.
:rtype: numpy array of dimension 2
- :note: intervals_in_dim function requires persistence function to be
+ :note: intervals_in_dim function requires :func:`compute_persistence` function to be
launched first.
"""
- cdef vector[pair[double,double]] intervals_result
- if self.pcohptr != NULL:
- intervals_result = self.pcohptr.intervals_in_dimension(dimension)
- else:
- print("intervals_in_dim function requires persistence function"
- " to be launched first.")
- return np.array(intervals_result)
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()"
+ return np.array(self.pcohptr.intervals_in_dimension(dimension))
diff --git a/src/python/gudhi/hera.cc b/src/python/gudhi/hera.cc
new file mode 100644
index 00000000..ea80a9a8
--- /dev/null
+++ b/src/python/gudhi/hera.cc
@@ -0,0 +1,56 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Marc Glisse
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <wasserstein.h> // Hera
+
+#include <pybind11_diagram_utils.h>
+
+double wasserstein_distance(
+ Dgm d1, Dgm d2,
+ double wasserstein_power, double internal_p,
+ double delta)
+{
+ // I *think* the call to request() has to be before releasing the GIL.
+ auto diag1 = numpy_to_range_of_pairs(d1);
+ auto diag2 = numpy_to_range_of_pairs(d2);
+
+ py::gil_scoped_release release;
+
+ hera::AuctionParams<double> params;
+ params.wasserstein_power = wasserstein_power;
+ // hera encodes infinity as -1...
+ if(std::isinf(internal_p)) internal_p = hera::get_infinity<double>();
+ params.internal_p = internal_p;
+ params.delta = delta;
+ // The extra parameters are purposedly not exposed for now.
+ return hera::wasserstein_dist(diag1, diag2, params);
+}
+
+PYBIND11_MODULE(hera, m) {
+ m.def("wasserstein_distance", &wasserstein_distance,
+ py::arg("X"), py::arg("Y"),
+ py::arg("order") = 1,
+ py::arg("internal_p") = std::numeric_limits<double>::infinity(),
+ py::arg("delta") = .01,
+ R"pbdoc(
+ Compute the Wasserstein distance between two diagrams.
+ Points at infinity are supported.
+
+ Parameters:
+ X (n x 2 numpy array): First diagram
+ Y (n x 2 numpy array): Second diagram
+ order (float): Wasserstein exponent W_q
+ internal_p (float): Internal Minkowski norm L^p in R^2
+ delta (float): Relative error 1+delta
+
+ Returns:
+ float: Approximate Wasserstein distance W_q(X,Y)
+ )pbdoc");
+}
diff --git a/src/python/gudhi/nerve_gic.pyx b/src/python/gudhi/nerve_gic.pyx
index 382e71c5..9c89b239 100644
--- a/src/python/gudhi/nerve_gic.pyx
+++ b/src/python/gudhi/nerve_gic.pyx
@@ -1,5 +1,7 @@
-# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
-# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
+# which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full
+# license details.
# Author(s): Vincent Rouvreau
#
# Copyright (C) 2018 Inria
@@ -7,11 +9,13 @@
# Modification(s):
# - YYYY/MM Author: Description of the modification
+from __future__ import print_function
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
from libcpp.string cimport string
from libcpp cimport bool
+import errno
import os
from libc.stdint cimport intptr_t
@@ -96,7 +100,8 @@ cdef class CoverComplex:
return self.thisptr != NULL
def set_point_cloud_from_range(self, cloud):
- """ Reads and stores the input point cloud from a vector stored in memory.
+ """ Reads and stores the input point cloud from a vector stored in
+ memory.
:param cloud: Input vector containing the point cloud.
:type cloud: vector[vector[double]]
@@ -104,7 +109,8 @@ cdef class CoverComplex:
return self.thisptr.set_point_cloud_from_range(cloud)
def set_distances_from_range(self, distance_matrix):
- """ Reads and stores the input distance matrix from a vector stored in memory.
+ """ Reads and stores the input distance matrix from a vector stored in
+ memory.
:param distance_matrix: Input vector containing the distance matrix.
:type distance_matrix: vector[vector[double]]
@@ -163,7 +169,8 @@ cdef class CoverComplex:
"""
stree = SimplexTree()
cdef intptr_t stree_int_ptr=stree.thisptr
- self.thisptr.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr)
+ self.thisptr.create_simplex_tree(
+ <Simplex_tree_interface_full_featured*>stree_int_ptr)
return stree
def find_simplices(self):
@@ -182,12 +189,12 @@ cdef class CoverComplex:
if os.path.isfile(off_file):
return self.thisptr.read_point_cloud(off_file.encode('utf-8'))
else:
- print("file " + off_file + " not found.")
- return False
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ off_file)
def set_automatic_resolution(self):
"""Computes the optimal length of intervals (i.e. the smallest interval
- length avoiding discretization artifacts—see :cite:`Carriere17c`) for a
+ length avoiding discretization artifacts - see :cite:`Carriere17c`) for a
functional cover.
:rtype: double
@@ -214,7 +221,8 @@ cdef class CoverComplex:
if os.path.isfile(color_file_name):
self.thisptr.set_color_from_file(color_file_name.encode('utf-8'))
else:
- print("file " + color_file_name + " not found.")
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ color_file_name)
def set_color_from_range(self, color):
"""Computes the function used to color the nodes of the simplicial
@@ -235,7 +243,8 @@ cdef class CoverComplex:
if os.path.isfile(cover_file_name):
self.thisptr.set_cover_from_file(cover_file_name.encode('utf-8'))
else:
- print("file " + cover_file_name + " not found.")
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ cover_file_name)
def set_cover_from_function(self):
"""Creates a cover C from the preimages of the function f.
@@ -268,7 +277,8 @@ cdef class CoverComplex:
if os.path.isfile(func_file_name):
self.thisptr.set_function_from_file(func_file_name.encode('utf-8'))
else:
- print("file " + func_file_name + " not found.")
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ func_file_name)
def set_function_from_range(self, function):
"""Creates the function f from a vector stored in memory.
@@ -288,7 +298,7 @@ cdef class CoverComplex:
def set_graph_from_automatic_rips(self, N=100):
"""Creates a graph G from a Rips complex whose threshold value is
- automatically tuned with subsampling—see.
+ automatically tuned with subsampling - see :cite:`Carriere17c`.
:param N: Number of subsampling iteration (the default reasonable value
is 100, but there is no guarantee on how to choose it).
@@ -302,14 +312,15 @@ cdef class CoverComplex:
"""Creates a graph G from a file containing the edges.
:param graph_file_name: Name of the input graph file. The graph file
- contains one edge per line, each edge being represented by the IDs of
- its two nodes.
+ contains one edge per line, each edge being represented by the IDs
+ of its two nodes.
:type graph_file_name: string
"""
if os.path.isfile(graph_file_name):
self.thisptr.set_graph_from_file(graph_file_name.encode('utf-8'))
else:
- print("file " + graph_file_name + " not found.")
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ graph_file_name)
def set_graph_from_OFF(self):
"""Creates a graph G from the triangulation given by the input OFF
diff --git a/src/python/gudhi/off_reader.pyx b/src/python/gudhi/off_reader.pyx
index 7e6d9d80..a3200704 100644
--- a/src/python/gudhi/off_reader.pyx
+++ b/src/python/gudhi/off_reader.pyx
@@ -1,5 +1,7 @@
-# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
-# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ -
+# which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full
+# license details.
# Author(s): Vincent Rouvreau
#
# Copyright (C) 2016 Inria
@@ -7,9 +9,11 @@
# Modification(s):
# - YYYY/MM Author: Description of the modification
+from __future__ import print_function
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.string cimport string
+import errno
import os
__author__ = "Vincent Rouvreau"
@@ -32,6 +36,6 @@ def read_points_from_off_file(off_file=''):
if os.path.isfile(off_file):
return read_points_from_OFF_file(off_file.encode('utf-8'))
else:
- print("file " + off_file + " not found.")
- return []
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
+ off_file)
diff --git a/src/python/gudhi/periodic_cubical_complex.pyx b/src/python/gudhi/periodic_cubical_complex.pyx
index 37f76201..06309772 100644
--- a/src/python/gudhi/periodic_cubical_complex.pyx
+++ b/src/python/gudhi/periodic_cubical_complex.pyx
@@ -7,11 +7,13 @@
# Modification(s):
# - YYYY/MM Author: Description of the modification
+from __future__ import print_function
from cython cimport numeric
from libcpp.vector cimport vector
from libcpp.utility cimport pair
from libcpp.string cimport string
from libcpp cimport bool
+import sys
import os
import numpy as np
@@ -30,7 +32,9 @@ cdef extern from "Cubical_complex_interface.h" namespace "Gudhi":
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Periodic_cubical_complex_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Cubical_complex::Cubical_complex_interface<Gudhi::cubical_complex::Bitmap_cubical_complex_periodic_boundary_conditions_base<double>>>":
Periodic_cubical_complex_persistence_interface(Periodic_cubical_complex_base_interface * st, bool persistence_dim_max)
- vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence)
+ void compute_persistence(int homology_coeff_field, double min_persistence)
+ vector[pair[int, pair[double, double]]] get_persistence()
+ vector[vector[int]] cofaces_of_cubical_persistence_pairs()
vector[int] betti_numbers()
vector[int] persistent_betti_numbers(double from_value, double to_value)
vector[pair[double,double]] intervals_in_dimension(int dimension)
@@ -95,12 +99,12 @@ cdef class PeriodicCubicalComplex:
if os.path.isfile(perseus_file):
self.thisptr = new Periodic_cubical_complex_base_interface(perseus_file.encode('utf-8'))
else:
- print("file " + perseus_file + " not found.")
+ print("file " + perseus_file + " not found.", file=sys.stderr)
else:
print("CubicalComplex can be constructed from dimensions, "
"top_dimensional_cells and periodic_dimensions, or from "
"top_dimensional_cells and periodic_dimensions or from "
- "a Perseus-style file name.")
+ "a Perseus-style file name.", file=sys.stderr)
def __dealloc__(self):
if self.thisptr != NULL:
@@ -132,8 +136,31 @@ cdef class PeriodicCubicalComplex:
"""
return self.thisptr.dimension()
+ def compute_persistence(self, homology_coeff_field=11, min_persistence=0):
+ """This function computes the persistence of the complex, so it can be
+ accessed through :func:`persistent_betti_numbers`,
+ :func:`persistence_intervals_in_dimension`, etc. This function is
+ equivalent to :func:`persistence` when you do not want the list
+ :func:`persistence` returns.
+
+ :param homology_coeff_field: The homology coefficient field. Must be a
+ prime number
+ :type homology_coeff_field: int.
+ :param min_persistence: The minimum persistence value to take into
+ account (strictly greater than min_persistence). Default value is
+ 0.0.
+ Sets min_persistence to -1.0 to see all values.
+ :type min_persistence: float.
+ :returns: Nothing.
+ """
+ if self.pcohptr != NULL:
+ del self.pcohptr
+ assert self.__is_defined()
+ self.pcohptr = new Periodic_cubical_complex_persistence_interface(self.thisptr, True)
+ self.pcohptr.compute_persistence(homology_coeff_field, min_persistence)
+
def persistence(self, homology_coeff_field=11, min_persistence=0):
- """This function returns the persistence of the complex.
+ """This function computes and returns the persistence of the complex.
:param homology_coeff_field: The homology coefficient field. Must be a
prime number
@@ -146,30 +173,74 @@ cdef class PeriodicCubicalComplex:
:returns: list of pairs(dimension, pair(birth, death)) -- the
persistence of the complex.
"""
+ self.compute_persistence(homology_coeff_field, min_persistence)
+ return self.pcohptr.get_persistence()
+
+ def cofaces_of_persistence_pairs(self):
+ """A persistence interval is described by a pair of cells, one that creates the
+ feature and one that kills it. The filtration values of those 2 cells give coordinates
+ for a point in a persistence diagram, or a bar in a barcode. Structurally, in the
+ cubical complexes provided here, the filtration value of any cell is the minimum of the
+ filtration values of the maximal cells that contain it. Connecting persistence diagram
+ coordinates to the corresponding value in the input (i.e. the filtration values of
+ the top-dimensional cells) is useful for differentiation purposes.
+
+ This function returns a list of pairs of top-dimensional cells corresponding to
+ the persistence birth and death cells of the filtration. The cells are represented by
+ their indices in the input list of top-dimensional cells (and not their indices in the
+ internal datastructure that includes non-maximal cells). Note that when two adjacent
+ top-dimensional cells have the same filtration value, we arbitrarily return one of the two
+ when calling the function on one of their common faces.
+
+ :returns: The top-dimensional cells/cofaces of the positive and negative cells,
+ together with the corresponding homological dimension, in two lists of numpy arrays of integers.
+ The first list contains the regular persistence pairs, grouped by dimension.
+ It contains numpy arrays of shape [number_of_persistence_points, 2].
+ The indices of the arrays in the list correspond to the homological dimensions, and the
+ integers of each row in each array correspond to: (index of positive top-dimensional cell,
+ index of negative top-dimensional cell).
+ The second list contains the essential features, grouped by dimension.
+ It contains numpy arrays of shape [number_of_persistence_points, 1].
+ The indices of the arrays in the list correspond to the homological dimensions, and the
+ integers of each row in each array correspond to: (index of positive top-dimensional cell).
+ """
+ cdef vector[vector[int]] persistence_result
if self.pcohptr != NULL:
- del self.pcohptr
- if self.thisptr != NULL:
- self.pcohptr = new Periodic_cubical_complex_persistence_interface(self.thisptr, True)
- cdef vector[pair[int, pair[double, double]]] persistence_result
- if self.pcohptr != NULL:
- persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence)
- return persistence_result
+ output = [[],[]]
+ persistence_result = self.pcohptr.cofaces_of_cubical_persistence_pairs()
+ pr = np.array(persistence_result)
+
+ ess_ind = np.argwhere(pr[:,2] == -1)[:,0]
+ ess = pr[ess_ind]
+ max_h = max(ess[:,0])+1
+ for h in range(max_h):
+ hidxs = np.argwhere(ess[:,0] == h)[:,0]
+ output[1].append(ess[hidxs][:,1])
+
+ reg_ind = np.setdiff1d(np.array(range(len(pr))), ess_ind)
+ reg = pr[reg_ind]
+ max_h = max(reg[:,0])+1
+ for h in range(max_h):
+ hidxs = np.argwhere(reg[:,0] == h)[:,0]
+ output[0].append(reg[hidxs][:,1:])
+ else:
+ print("cofaces_of_persistence_pairs function requires persistence function"
+ " to be launched first.")
+ return output
def betti_numbers(self):
"""This function returns the Betti numbers of the complex.
:returns: list of int -- The Betti numbers ([B0, B1, ..., Bn]).
- :note: betti_numbers function requires persistence function to be
+ :note: betti_numbers function requires :func:`compute_persistence` function to be
launched first.
- :note: betti_numbers function always returns [1, 0, 0, ...] as infinity
+ :note: This function always returns the Betti numbers of a torus as infinity
filtration cubes are not removed from the complex.
"""
- cdef vector[int] bn_result
- if self.pcohptr != NULL:
- bn_result = self.pcohptr.betti_numbers()
- return bn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before betti_numbers()"
+ return self.pcohptr.betti_numbers()
def persistent_betti_numbers(self, from_value, to_value):
"""This function returns the persistent Betti numbers of the complex.
@@ -184,13 +255,11 @@ cdef class PeriodicCubicalComplex:
:returns: list of int -- The persistent Betti numbers ([B0, B1, ...,
Bn]).
- :note: persistent_betti_numbers function requires persistence
+ :note: persistent_betti_numbers function requires :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[int] pbn_result
- if self.pcohptr != NULL:
- pbn_result = self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
- return pbn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistent_betti_numbers()"
+ return self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
def persistence_intervals_in_dimension(self, dimension):
"""This function returns the persistence intervals of the complex in a
@@ -201,13 +270,8 @@ cdef class PeriodicCubicalComplex:
:returns: The persistence intervals.
:rtype: numpy array of dimension 2
- :note: intervals_in_dim function requires persistence function to be
+ :note: intervals_in_dim function requires :func:`compute_persistence` function to be
launched first.
"""
- cdef vector[pair[double,double]] intervals_result
- if self.pcohptr != NULL:
- intervals_result = self.pcohptr.intervals_in_dimension(dimension)
- else:
- print("intervals_in_dim function requires persistence function"
- " to be launched first.")
- return np.array(intervals_result)
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()"
+ return np.array(self.pcohptr.intervals_in_dimension(dimension))
diff --git a/src/python/gudhi/persistence_graphical_tools.py b/src/python/gudhi/persistence_graphical_tools.py
index 246280de..6a74a6ca 100644
--- a/src/python/gudhi/persistence_graphical_tools.py
+++ b/src/python/gudhi/persistence_graphical_tools.py
@@ -5,6 +5,7 @@
# Copyright (C) 2016 Inria
#
# Modification(s):
+# - 2020/02 Theo Lacombe: Added more options for improved rendering and more flexibility.
# - YYYY/MM Author: Description of the modification
from os import path
@@ -14,7 +15,7 @@ import numpy as np
from gudhi.reader_utils import read_persistence_intervals_in_dimension
from gudhi.reader_utils import read_persistence_intervals_grouped_by_dimension
-__author__ = "Vincent Rouvreau, Bertrand Michel"
+__author__ = "Vincent Rouvreau, Bertrand Michel, Theo Lacombe"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
@@ -43,6 +44,19 @@ def __min_birth_max_death(persistence, band=0.0):
max_death += band
return (min_birth, max_death)
+
+def _array_handler(a):
+ '''
+ :param a: if array, assumes it is a (n x 2) np.array and return a
+ persistence-compatible list (padding with 0), so that the
+ plot can be performed seamlessly.
+ '''
+ if isinstance(a[0][1], np.float64) or isinstance(a[0][1], float):
+ return [[0, x] for x in a]
+ else:
+ return a
+
+
def plot_persistence_barcode(
persistence=[],
persistence_file="",
@@ -52,14 +66,17 @@ def plot_persistence_barcode(
inf_delta=0.1,
legend=False,
colormap=None,
- axes=None
+ axes=None,
+ fontsize=16,
):
"""This function plots the persistence bar code from persistence values list
- or from a :doc:`persistence file <fileformats>`.
+ , a np.array of shape (N x 2) (representing a diagram
+ in a single homology dimension),
+ or from a `persistence diagram <fileformats.html#persistence-diagram>`_ file.
- :param persistence: Persistence intervals values list grouped by dimension.
- :type persistence: list of tuples(dimension, tuple(birth, death)).
- :param persistence_file: A :doc:`persistence file <fileformats>` style name
+ :param persistence: Persistence intervals values list. Can be grouped by dimension or not.
+ :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death).
+ :param persistence_file: A `persistence diagram <fileformats.html#persistence-diagram>`_ file style name
(reset persistence if both are set).
:type persistence_file: string
:param alpha: barcode transparency value (0.0 transparent through 1.0
@@ -81,11 +98,16 @@ def plot_persistence_barcode(
:param axes: A matplotlib-like subplot axes. If None, the plot is drawn on
a new set of axes.
:type axes: `matplotlib.axes.Axes`
+ :param fontsize: Fontsize to use in axis.
+ :type fontsize: int
:returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
try:
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
+ from matplotlib import rc
+ plt.rc('text', usetex=True)
+ plt.rc('font', family='serif')
if persistence_file != "":
if path.isfile(persistence_file):
@@ -101,6 +123,8 @@ def plot_persistence_barcode(
print("file " + persistence_file + " not found.")
return None
+ persistence = _array_handler(persistence)
+
if max_barcodes != 1000:
print("Deprecated parameter. It has been replaced by max_intervals")
max_intervals = max_barcodes
@@ -163,7 +187,7 @@ def plot_persistence_barcode(
loc="lower right",
)
- axes.set_title("Persistence barcode")
+ axes.set_title("Persistence barcode", fontsize=fontsize)
# Ends plot on infinity value and starts a little bit before min_birth
axes.axis([axis_start, infinity, 0, ind])
@@ -183,14 +207,17 @@ def plot_persistence_diagram(
inf_delta=0.1,
legend=False,
colormap=None,
- axes=None
+ axes=None,
+ fontsize=16,
+ greyblock=True
):
"""This function plots the persistence diagram from persistence values
- list or from a :doc:`persistence file <fileformats>`.
+ list, a np.array of shape (N x 2) representing a diagram in a single
+ homology dimension, or from a `persistence diagram <fileformats.html#persistence-diagram>`_ file`.
- :param persistence: Persistence intervals values list grouped by dimension.
- :type persistence: list of tuples(dimension, tuple(birth, death)).
- :param persistence_file: A :doc:`persistence file <fileformats>` style name
+ :param persistence: Persistence intervals values list. Can be grouped by dimension or not.
+ :type persistence: an array of (dimension, array of (birth, death)) or an array of (birth, death).
+ :param persistence_file: A `persistence diagram <fileformats.html#persistence-diagram>`_ file style name
(reset persistence if both are set).
:type persistence_file: string
:param alpha: plot transparency value (0.0 transparent through 1.0
@@ -214,11 +241,18 @@ def plot_persistence_diagram(
:param axes: A matplotlib-like subplot axes. If None, the plot is drawn on
a new set of axes.
:type axes: `matplotlib.axes.Axes`
+ :param fontsize: Fontsize to use in axis.
+ :type fontsize: int
+ :param greyblock: if we want to plot a grey patch on the lower half plane for nicer rendering. Default True.
+ :type greyblock: boolean
:returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
try:
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
+ from matplotlib import rc
+ plt.rc('text', usetex=True)
+ plt.rc('font', family='serif')
if persistence_file != "":
if path.isfile(persistence_file):
@@ -234,6 +268,8 @@ def plot_persistence_diagram(
print("file " + persistence_file + " not found.")
return None
+ persistence = _array_handler(persistence)
+
if max_plots != 1000:
print("Deprecated parameter. It has been replaced by max_intervals")
max_intervals = max_plots
@@ -256,19 +292,18 @@ def plot_persistence_diagram(
# Replace infinity values with max_death + delta for diagram to be more
# readable
infinity = max_death + delta
+ axis_end = max_death + delta / 2
axis_start = min_birth - delta
- # line display of equation : birth = death
- x = np.linspace(axis_start, infinity, 1000)
- # infinity line and text
- axes.plot(x, x, color="k", linewidth=1.0)
- axes.plot(x, [infinity] * len(x), linewidth=1.0, color="k", alpha=alpha)
- axes.text(axis_start, infinity, r"$\infty$", color="k", alpha=alpha)
# bootstrap band
if band > 0.0:
+ x = np.linspace(axis_start, infinity, 1000)
axes.fill_between(x, x, x + band, alpha=alpha, facecolor="red")
-
+ # lower diag patch
+ if greyblock:
+ axes.add_patch(mpatches.Polygon([[axis_start, axis_start], [axis_end, axis_start], [axis_end, axis_end]], fill=True, color='lightgrey'))
# Draw points in loop
+ pts_at_infty = False # Records presence of pts at infty
for interval in reversed(persistence):
if float(interval[1][1]) != float("inf"):
# Finite death case
@@ -279,10 +314,23 @@ def plot_persistence_diagram(
color=colormap[interval[0]],
)
else:
+ pts_at_infty = True
# Infinite death case for diagram to be nicer
axes.scatter(
interval[1][0], infinity, alpha=alpha, color=colormap[interval[0]]
)
+ if pts_at_infty:
+ # infinity line and text
+ axes.plot([axis_start, axis_end], [axis_start, axis_end], linewidth=1.0, color="k")
+ axes.plot([axis_start, axis_end], [infinity, infinity], linewidth=1.0, color="k", alpha=alpha)
+ # Infinity label
+ yt = axes.get_yticks()
+ yt = yt[np.where(yt < axis_end)] # to avoid ploting ticklabel higher than infinity
+ yt = np.append(yt, infinity)
+ ytl = ["%.3f" % e for e in yt] # to avoid float precision error
+ ytl[-1] = r'$+\infty$'
+ axes.set_yticks(yt)
+ axes.set_yticklabels(ytl)
if legend:
dimensions = list(set(item[0] for item in persistence))
@@ -293,11 +341,11 @@ def plot_persistence_diagram(
]
)
- axes.set_xlabel("Birth")
- axes.set_ylabel("Death")
+ axes.set_xlabel("Birth", fontsize=fontsize)
+ axes.set_ylabel("Death", fontsize=fontsize)
+ axes.set_title("Persistence diagram", fontsize=fontsize)
# Ends plot on infinity value and starts a little bit before min_birth
- axes.axis([axis_start, infinity, axis_start, infinity + delta])
- axes.set_title("Persistence diagram")
+ axes.axis([axis_start, axis_end, axis_start, infinity + delta/2])
return axes
except ImportError:
@@ -313,18 +361,26 @@ def plot_persistence_density(
dimension=None,
cmap=None,
legend=False,
- axes=None
+ axes=None,
+ fontsize=16,
+ greyblock=False
):
"""This function plots the persistence density from persistence
- values list or from a :doc:`persistence file <fileformats>`. Be
- aware that this function does not distinguish the dimension, it is
+ values list, np.array of shape (N x 2) representing a diagram
+ in a single homology dimension,
+ or from a `persistence diagram <fileformats.html#persistence-diagram>`_ file.
+ Be aware that this function does not distinguish the dimension, it is
up to you to select the required one. This function also does not handle
degenerate data set (scipy correlation matrix inversion can fail).
- :param persistence: Persistence intervals values list grouped by dimension.
- :type persistence: list of tuples(dimension, tuple(birth, death)).
- :param persistence_file: A :doc:`persistence file <fileformats>`
- style name (reset persistence if both are set).
+ :Requires: `SciPy <installation.html#scipy>`_
+
+ :param persistence: Persistence intervals values list.
+ Can be grouped by dimension or not.
+ :type persistence: an array of (dimension, array of (birth, death))
+ or an array of (birth, death).
+ :param persistence_file: A `persistence diagram <fileformats.html#persistence-diagram>`_
+ file style name (reset persistence if both are set).
:type persistence_file: string
:param nbins: Evaluate a gaussian kde on a regular grid of nbins x
nbins over data extents (default is 300)
@@ -355,11 +411,20 @@ def plot_persistence_density(
:param axes: A matplotlib-like subplot axes. If None, the plot is drawn on
a new set of axes.
:type axes: `matplotlib.axes.Axes`
+ :param fontsize: Fontsize to use in axis.
+ :type fontsize: int
+ :param greyblock: if we want to plot a grey patch on the lower half plane
+ for nicer rendering. Default False.
+ :type greyblock: boolean
:returns: (`matplotlib.axes.Axes`): The axes on which the plot was drawn.
"""
try:
import matplotlib.pyplot as plt
+ import matplotlib.patches as mpatches
from scipy.stats import kde
+ from matplotlib import rc
+ plt.rc('text', usetex=True)
+ plt.rc('font', family='serif')
if persistence_file != "":
if dimension is None:
@@ -374,6 +439,7 @@ def plot_persistence_density(
return None
if len(persistence) > 0:
+ persistence = _array_handler(persistence)
persistence_dim = np.array(
[
(dim_interval[1][0], dim_interval[1][1])
@@ -418,12 +484,16 @@ def plot_persistence_density(
# Make the plot
img = axes.pcolormesh(xi, yi, zi.reshape(xi.shape), cmap=cmap)
+ if greyblock:
+ axes.add_patch(mpatches.Polygon([[birth.min(), birth.min()], [death.max(), birth.min()], [death.max(), death.max()]], fill=True, color='lightgrey'))
+
if legend:
plt.colorbar(img, ax=axes)
- axes.set_xlabel("Birth")
- axes.set_ylabel("Death")
- axes.set_title("Persistence density")
+ axes.set_xlabel("Birth", fontsize=fontsize)
+ axes.set_ylabel("Death", fontsize=fontsize)
+ axes.set_title("Persistence density", fontsize=fontsize)
+
return axes
except ImportError:
diff --git a/src/python/gudhi/point_cloud/__init__.py b/src/python/gudhi/point_cloud/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/python/gudhi/point_cloud/__init__.py
diff --git a/src/python/gudhi/point_cloud/dtm.py b/src/python/gudhi/point_cloud/dtm.py
new file mode 100644
index 00000000..13e16d24
--- /dev/null
+++ b/src/python/gudhi/point_cloud/dtm.py
@@ -0,0 +1,70 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Marc Glisse
+#
+# Copyright (C) 2020 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+from .knn import KNearestNeighbors
+
+__author__ = "Marc Glisse"
+__copyright__ = "Copyright (C) 2020 Inria"
+__license__ = "MIT"
+
+
+class DistanceToMeasure:
+ """
+ Class to compute the distance to the empirical measure defined by a point set, as introduced in :cite:`dtm`.
+ """
+
+ def __init__(self, k, q=2, **kwargs):
+ """
+ Args:
+ k (int): number of neighbors (possibly including the point itself).
+ q (float): order used to compute the distance to measure. Defaults to 2.
+ kwargs: same parameters as :class:`~gudhi.point_cloud.knn.KNearestNeighbors`, except that
+ metric="neighbors" means that :func:`transform` expects an array with the distances
+ to the k nearest neighbors.
+ """
+ self.k = k
+ self.q = q
+ self.params = kwargs
+
+ def fit_transform(self, X, y=None):
+ return self.fit(X).transform(X)
+
+ def fit(self, X, y=None):
+ """
+ Args:
+ X (numpy.array): coordinates for mass points.
+ """
+ if self.params.setdefault("metric", "euclidean") != "neighbors":
+ self.knn = KNearestNeighbors(
+ self.k, return_index=False, return_distance=True, sort_results=False, **self.params
+ )
+ self.knn.fit(X)
+ return self
+
+ def transform(self, X):
+ """
+ Args:
+ X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed",
+ or distances to the k nearest neighbors if metric is "neighbors" (if the array has more
+ than k columns, the remaining ones are ignored).
+
+ Returns:
+ numpy.array: a 1-d array with, for each point of X, its distance to the measure defined
+ by the argument of :func:`fit`.
+ """
+ if self.params["metric"] == "neighbors":
+ distances = X[:, : self.k]
+ else:
+ distances = self.knn.transform(X)
+ distances = distances ** self.q
+ dtm = distances.sum(-1) / self.k
+ dtm = dtm ** (1.0 / self.q)
+ # We compute too many powers, 1/p in knn then q in dtm, 1/q in dtm then q or some log in the caller.
+ # Add option to skip the final root?
+ return dtm
diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py
new file mode 100644
index 00000000..86008bc3
--- /dev/null
+++ b/src/python/gudhi/point_cloud/knn.py
@@ -0,0 +1,328 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Marc Glisse
+#
+# Copyright (C) 2020 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy
+
+# TODO: https://github.com/facebookresearch/faiss
+
+__author__ = "Marc Glisse"
+__copyright__ = "Copyright (C) 2020 Inria"
+__license__ = "MIT"
+
+
+class KNearestNeighbors:
+ """
+ Class wrapping several implementations for computing the k nearest neighbors in a point set.
+
+ :Requires: `PyKeOps <installation.html#pykeops>`_, `SciPy <installation.html#scipy>`_,
+ `Scikit-learn <installation.html#scikit-learn>`_, and/or `Hnswlib <installation.html#hnswlib>`_
+ in function of the selected `implementation`.
+ """
+
+ def __init__(self, k, return_index=True, return_distance=False, metric="euclidean", **kwargs):
+ """
+ Args:
+ k (int): number of neighbors (possibly including the point itself).
+ return_index (bool): if True, return the index of each neighbor.
+ return_distance (bool): if True, return the distance to each neighbor.
+ implementation (str): choice of the library that does the real work.
+
+ * 'keops' for a brute-force, CUDA implementation through pykeops. Useful when the dimension becomes large (10+) but the number of points remains low (less than a million). Only "minkowski" and its aliases are supported.
+ * 'ckdtree' for scipy's cKDTree. Only "minkowski" and its aliases are supported.
+ * 'sklearn' for scikit-learn's NearestNeighbors. Note that this provides in particular an option algorithm="brute".
+ * 'hnsw' for hnswlib.Index. It can be very fast but does not provide guarantees. Only supports "euclidean" for now.
+ * None will try to select a sensible one (scipy if possible, scikit-learn otherwise).
+ metric (str): see `sklearn.neighbors.NearestNeighbors`.
+ eps (float): relative error when computing nearest neighbors with the cKDTree.
+ p (float): norm L^p on input points (including numpy.inf) if metric is "minkowski". Defaults to 2.
+ n_jobs (int): number of jobs to schedule for parallel processing of nearest neighbors on the CPU.
+ If -1 is given all processors are used. Default: 1.
+ sort_results (bool): if True, then distances and indices of each point are
+ sorted on return, so that the first column contains the closest points.
+ Otherwise, neighbors are returned in an arbitrary order. Defaults to True.
+ enable_autodiff (bool): if the input is a torch.tensor, jax.numpy.ndarray or tensorflow.Tensor, this
+ instructs the function to compute distances in a way that works with automatic differentiation.
+ This is experimental, not supported for all metrics, and requires the package EagerPy.
+ Defaults to False.
+ kwargs: additional parameters are forwarded to the backends.
+ """
+ self.k = k
+ self.return_index = return_index
+ self.return_distance = return_distance
+ self.metric = metric
+ self.params = kwargs
+ # canonicalize
+ if metric == "euclidean":
+ self.params["p"] = 2
+ self.metric = "minkowski"
+ elif metric == "manhattan":
+ self.params["p"] = 1
+ self.metric = "minkowski"
+ elif metric == "chebyshev":
+ self.params["p"] = numpy.inf
+ self.metric = "minkowski"
+ elif metric == "minkowski":
+ self.params["p"] = kwargs.get("p", 2)
+ if self.params.get("implementation") in {"keops", "ckdtree"}:
+ assert self.metric == "minkowski"
+ if self.params.get("implementation") == "hnsw":
+ assert self.metric == "minkowski" and self.params["p"] == 2
+ if not self.params.get("implementation"):
+ if self.metric == "minkowski":
+ self.params["implementation"] = "ckdtree"
+ else:
+ self.params["implementation"] = "sklearn"
+ if not return_distance:
+ self.params["enable_autodiff"] = False
+
+ def fit_transform(self, X, y=None):
+ return self.fit(X).transform(X)
+
+ def fit(self, X, y=None):
+ """
+ Args:
+ X (numpy.array): coordinates for reference points.
+ """
+ self.ref_points = X
+ if self.params.get("enable_autodiff", False):
+ import eagerpy as ep
+
+ X = ep.astensor(X)
+ if self.params["implementation"] != "keops" or not isinstance(X, ep.PyTorchTensor):
+ # I don't know a clever way to reuse a GPU tensor from tensorflow in pytorch
+ # without copying to/from the CPU.
+ X = X.numpy()
+ if self.params["implementation"] == "ckdtree":
+ # sklearn could handle this, but it is much slower
+ from scipy.spatial import cKDTree
+
+ self.kdtree = cKDTree(X)
+
+ if self.params["implementation"] == "sklearn" and self.metric != "precomputed":
+ # FIXME: sklearn badly handles "precomputed"
+ from sklearn.neighbors import NearestNeighbors
+
+ nargs = {
+ k: v for k, v in self.params.items() if k in {"p", "n_jobs", "metric_params", "algorithm", "leaf_size"}
+ }
+ self.nn = NearestNeighbors(self.k, metric=self.metric, **nargs)
+ self.nn.fit(X)
+
+ if self.params["implementation"] == "hnsw":
+ import hnswlib
+
+ self.graph = hnswlib.Index("l2", len(X[0])) # Actually returns squared distances
+ self.graph.init_index(
+ len(X), **{k: v for k, v in self.params.items() if k in {"ef_construction", "M", "random_seed"}}
+ )
+ n = self.params.get("num_threads")
+ if n is None:
+ n = self.params.get("n_jobs", 1)
+ self.params["num_threads"] = n
+ self.graph.add_items(X, num_threads=n)
+
+ return self
+
+ def transform(self, X):
+ """
+ Args:
+ X (numpy.array): coordinates for query points, or distance matrix if metric is "precomputed".
+
+ Returns:
+ numpy.array: if return_index, an array of shape (len(X), k) with the indices (in the argument
+ of :func:`fit`) of the k nearest neighbors to the points of X. If return_distance, an array of the
+ same shape with the distances to those neighbors. If both, a tuple with the two arrays, in this order.
+ """
+ if self.params.get("enable_autodiff", False):
+ # pykeops does not support autodiff for kmin yet, but when it does in the future,
+ # we may want a special path.
+ import eagerpy as ep
+
+ save_return_index = self.return_index
+ self.return_index = True
+ self.return_distance = False
+ self.params["enable_autodiff"] = False
+ try:
+ newX = ep.astensor(X)
+ if self.params["implementation"] != "keops" or (
+ not isinstance(newX, ep.PyTorchTensor) and not isinstance(newX, ep.NumPyTensor)
+ ):
+ newX = newX.numpy()
+ else:
+ newX = newX.raw
+ neighbors = self.transform(newX)
+ finally:
+ self.return_index = save_return_index
+ self.return_distance = True
+ self.params["enable_autodiff"] = True
+ # We can implement more later as needed
+ assert self.metric == "minkowski"
+ p = self.params["p"]
+ Y = ep.astensor(self.ref_points)
+ neighbor_pts = Y[
+ neighbors,
+ ]
+ diff = neighbor_pts - X[:, None, :]
+ if isinstance(diff, ep.PyTorchTensor):
+ # https://github.com/jonasrauber/eagerpy/issues/6
+ distances = ep.astensor(diff.raw.norm(p, -1))
+ else:
+ distances = diff.norms.lp(p, -1)
+ if self.return_index:
+ return neighbors, distances.raw
+ else:
+ return distances.raw
+
+ metric = self.metric
+ k = self.k
+
+ if metric == "precomputed":
+ # scikit-learn could handle that, but they insist on calling fit() with an unused square array, which is too unnatural.
+ if self.return_index:
+ n_jobs = self.params.get("n_jobs", 1)
+ # Supposedly numpy can be compiled with OpenMP and handle this, but nobody does that?!
+ if n_jobs == 1:
+ neighbors = numpy.argpartition(X, k - 1)[:, 0:k]
+ if self.params.get("sort_results", True):
+ X = numpy.take_along_axis(X, neighbors, axis=-1)
+ ngb_order = numpy.argsort(X, axis=-1)
+ neighbors = numpy.take_along_axis(neighbors, ngb_order, axis=-1)
+ else:
+ ngb_order = neighbors
+ if self.return_distance:
+ distances = numpy.take_along_axis(X, ngb_order, axis=-1)
+ return neighbors, distances
+ else:
+ return neighbors
+ else:
+ from joblib import Parallel, delayed, effective_n_jobs
+ from sklearn.utils import gen_even_slices
+
+ slices = gen_even_slices(len(X), effective_n_jobs(n_jobs))
+ parallel = Parallel(prefer="threads", n_jobs=n_jobs)
+ if self.params.get("sort_results", True):
+
+ def func(M):
+ neighbors = numpy.argpartition(M, k - 1)[:, 0:k]
+ Y = numpy.take_along_axis(M, neighbors, axis=-1)
+ ngb_order = numpy.argsort(Y, axis=-1)
+ return numpy.take_along_axis(neighbors, ngb_order, axis=-1)
+
+ else:
+
+ def func(M):
+ return numpy.argpartition(M, k - 1)[:, 0:k]
+
+ neighbors = numpy.concatenate(parallel(delayed(func)(X[s]) for s in slices))
+ if self.return_distance:
+ distances = numpy.take_along_axis(X, neighbors, axis=-1)
+ return neighbors, distances
+ else:
+ return neighbors
+ if self.return_distance:
+ n_jobs = self.params.get("n_jobs", 1)
+ if n_jobs == 1:
+ distances = numpy.partition(X, k - 1)[:, 0:k]
+ if self.params.get("sort_results"):
+ # partition is not guaranteed to sort the lower half, although it often does
+ distances.sort(axis=-1)
+ else:
+ from joblib import Parallel, delayed, effective_n_jobs
+ from sklearn.utils import gen_even_slices
+
+ if self.params.get("sort_results"):
+
+ def func(M):
+ # Not partitioning in place, because we should not modify the user's array?
+ r = numpy.partition(M, k - 1)[:, 0:k]
+ r.sort(axis=-1)
+ return r
+
+ else:
+ func = lambda M: numpy.partition(M, k - 1)[:, 0:k]
+ slices = gen_even_slices(len(X), effective_n_jobs(n_jobs))
+ parallel = Parallel(prefer="threads", n_jobs=n_jobs)
+ distances = numpy.concatenate(parallel(delayed(func)(X[s]) for s in slices))
+ return distances
+ return None
+
+ if self.params["implementation"] == "hnsw":
+ ef = self.params.get("ef")
+ if ef is not None:
+ self.graph.set_ef(ef)
+ neighbors, distances = self.graph.knn_query(X, k, num_threads=self.params["num_threads"])
+ # The k nearest neighbors are always sorted. I couldn't find it in the doc, but the code calls searchKnn,
+ # which returns a priority_queue, and then fills the return array backwards with top/pop on the queue.
+ if self.return_index:
+ if self.return_distance:
+ return neighbors, numpy.sqrt(distances)
+ else:
+ return neighbors
+ if self.return_distance:
+ return numpy.sqrt(distances)
+ return None
+
+ if self.params["implementation"] == "keops":
+ import torch
+ from pykeops.torch import LazyTensor
+
+ # 'float64' is slow except on super expensive GPUs. Allow it with some param?
+ XX = torch.as_tensor(X, dtype=torch.float32)
+ if X is self.ref_points:
+ YY = XX
+ else:
+ YY = torch.as_tensor(self.ref_points, dtype=torch.float32)
+ p = self.params["p"]
+ if p == numpy.inf:
+ # Requires pykeops 1.4 or later
+ mat = (LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])).abs().max(-1)
+ elif p == 2: # Any even integer?
+ mat = ((LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])) ** p).sum(-1)
+ else:
+ mat = ((LazyTensor(XX[:, None, :]) - LazyTensor(YY[None, :, :])).abs() ** p).sum(-1)
+
+ if self.return_index:
+ if self.return_distance:
+ distances, neighbors = mat.Kmin_argKmin(k, dim=1)
+ if p != numpy.inf:
+ distances = distances ** (1.0 / p)
+ return neighbors, distances
+ else:
+ neighbors = mat.argKmin(k, dim=1)
+ return neighbors
+ if self.return_distance:
+ distances = mat.Kmin(k, dim=1)
+ if p != numpy.inf:
+ distances = distances ** (1.0 / p)
+ return distances
+ return None
+
+ if self.params["implementation"] == "ckdtree":
+ qargs = {key: val for key, val in self.params.items() if key in {"p", "eps", "n_jobs"}}
+ distances, neighbors = self.kdtree.query(X, k=self.k, **qargs)
+ if self.return_index:
+ if self.return_distance:
+ return neighbors, distances
+ else:
+ return neighbors
+ if self.return_distance:
+ return distances
+ return None
+
+ assert self.params["implementation"] == "sklearn"
+ if self.return_distance:
+ distances, neighbors = self.nn.kneighbors(X, return_distance=True)
+ if self.return_index:
+ return neighbors, distances
+ else:
+ return distances
+ if self.return_index:
+ neighbors = self.nn.kneighbors(X, return_distance=False)
+ return neighbors
+ return None
diff --git a/src/python/gudhi/point_cloud/timedelay.py b/src/python/gudhi/point_cloud/timedelay.py
new file mode 100644
index 00000000..5292e752
--- /dev/null
+++ b/src/python/gudhi/point_cloud/timedelay.py
@@ -0,0 +1,94 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Martin Royer, Yuichi Ike, Masatoshi Takenouchi
+#
+# Copyright (C) 2020 Inria, Copyright (C) 2020 Fujitsu Laboratories Ltd.
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+
+
+class TimeDelayEmbedding:
+ """Point cloud transformation class. Embeds time-series data in the R^d according to
+ `Takens' Embedding Theorem <https://en.wikipedia.org/wiki/Takens%27s_theorem>`_ and obtains the
+ coordinates of each point.
+
+ Parameters
+ ----------
+ dim : int, optional (default=3)
+ `d` of R^d to be embedded.
+ delay : int, optional (default=1)
+ Time-Delay embedding.
+ skip : int, optional (default=1)
+ How often to skip embedded points.
+
+ Example
+ -------
+
+ Given delay=3 and skip=2, a point cloud which is obtained by embedding
+ a scalar time-series into R^3 is as follows::
+
+ time-series = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ point cloud = [[1, 4, 7],
+ [3, 6, 9]]
+
+ Given delay=1 and skip=1, a point cloud which is obtained by embedding
+ a 2D vector time-series data into R^4 is as follows::
+
+ time-series = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
+ point cloud = [[0, 1, 2, 3],
+ [2, 3, 4, 5],
+ [4, 5, 6, 7],
+ [6, 7, 8, 9]]
+ """
+
+ def __init__(self, dim=3, delay=1, skip=1):
+ self._dim = dim
+ self._delay = delay
+ self._skip = skip
+
+ def __call__(self, ts):
+ """Transform method for single time-series data.
+
+ Parameters
+ ----------
+ ts : Iterable[float] or Iterable[Iterable[float]]
+ A single time-series data, with scalar or vector values.
+
+ Returns
+ -------
+ point cloud : n x dim numpy arrays
+ Makes point cloud from a single time-series data.
+ """
+ return self._transform(np.array(ts))
+
+ def fit(self, ts, y=None):
+ return self
+
+ def _transform(self, ts):
+ """Guts of transform method."""
+ if ts.ndim == 1:
+ repeat = self._dim
+ else:
+ assert self._dim % ts.shape[1] == 0
+ repeat = self._dim // ts.shape[1]
+ end = len(ts) - self._delay * (repeat - 1)
+ short = np.arange(0, end, self._skip)
+ vertical = np.arange(0, repeat * self._delay, self._delay)
+ return ts[np.add.outer(short, vertical)].reshape(len(short), -1)
+
+ def transform(self, ts):
+ """Transform method for multiple time-series data.
+
+ Parameters
+ ----------
+ ts : Iterable[Iterable[float]] or Iterable[Iterable[Iterable[float]]]
+ Multiple time-series data, with scalar or vector values.
+
+ Returns
+ -------
+ point clouds : list of n x dim numpy arrays
+ Makes point cloud from each time-series data.
+ """
+ return [self._transform(np.array(s)) for s in ts]
diff --git a/src/python/gudhi/representations/kernel_methods.py b/src/python/gudhi/representations/kernel_methods.py
index bfc83aff..596f4f07 100644
--- a/src/python/gudhi/representations/kernel_methods.py
+++ b/src/python/gudhi/representations/kernel_methods.py
@@ -9,13 +9,83 @@
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
-from sklearn.metrics import pairwise_distances
-from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance
+from sklearn.metrics import pairwise_distances, pairwise_kernels
+from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance, _sklearn_wrapper, pairwise_persistence_diagram_distances, _sliced_wasserstein_distance, _persistence_fisher_distance
+from .preprocessing import Padding
#############################################
# Kernel methods ############################
#############################################
+def _persistence_weighted_gaussian_kernel(D1, D2, weight=lambda x: 1, kernel_approx=None, bandwidth=1.):
+ """
+ This is a function for computing the persistence weighted Gaussian kernel value from two persistence diagrams. The persistence weighted Gaussian kernel is computed by convolving the persistence diagram points with weighted Gaussian kernels. See http://proceedings.mlr.press/v48/kusano16.html for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved
+ weight: weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y].
+ kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+
+ Returns:
+ float: the persistence weighted Gaussian kernel value between persistence diagrams.
+ """
+ ws1 = np.array([weight(D1[j,:]) for j in range(len(D1))])
+ ws2 = np.array([weight(D2[j,:]) for j in range(len(D2))])
+ if kernel_approx is not None:
+ approx1 = np.sum(np.multiply(ws1[:,np.newaxis], kernel_approx.transform(D1)), axis=0)
+ approx2 = np.sum(np.multiply(ws2[:,np.newaxis], kernel_approx.transform(D2)), axis=0)
+ return (1./(np.sqrt(2*np.pi)*bandwidth)) * np.matmul(approx1, approx2.T)
+ else:
+ W = np.matmul(ws1[:,np.newaxis], ws2[np.newaxis,:])
+ E = (1./(np.sqrt(2*np.pi)*bandwidth)) * np.exp(-np.square(pairwise_distances(D1,D2))/(2*bandwidth*bandwidth))
+ return np.sum(np.multiply(W, E))
+
+def _persistence_scale_space_kernel(D1, D2, kernel_approx=None, bandwidth=1.):
+ """
+ This is a function for computing the persistence scale space kernel value from two persistence diagrams. The persistence scale space kernel is computed by adding the symmetric to the diagonal of each point in each persistence diagram, with negative weight, and then convolving the points with a Gaussian kernel. See https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Reininghaus_A_Stable_Multi-Scale_2015_CVPR_paper.pdf for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved
+ kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+
+ Returns:
+ float: the persistence scale space kernel value between persistence diagrams.
+ """
+ DD1 = np.concatenate([D1, D1[:,[1,0]]], axis=0)
+ DD2 = np.concatenate([D2, D2[:,[1,0]]], axis=0)
+ weight_pss = lambda x: 1 if x[1] >= x[0] else -1
+ return 0.5 * _persistence_weighted_gaussian_kernel(DD1, DD2, weight=weight_pss, kernel_approx=kernel_approx, bandwidth=bandwidth)
+
+def pairwise_persistence_diagram_kernels(X, Y=None, kernel="sliced_wasserstein", **kwargs):
+ """
+ This function computes the kernel matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2).
+
+ Parameters:
+ X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams.
+ Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise kernel values are computed from the first list only.
+ kernel: kernel to use. It can be either a string ("sliced_wasserstein", "persistence_scale_space", "persistence_weighted_gaussian", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. If it is a function, make sure that it is symmetric.
+ **kwargs: optional keyword parameters. Any further parameters are passed directly to the kernel function. See the docs of the various kernel classes in this module.
+
+ Returns:
+ numpy array of shape (nxm): kernel matrix.
+ """
+ XX = np.reshape(np.arange(len(X)), [-1,1])
+ YY = None if Y is None else np.reshape(np.arange(len(Y)), [-1,1])
+ if kernel == "sliced_wasserstein":
+ return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="sliced_wasserstein", num_directions=kwargs["num_directions"]) / kwargs["bandwidth"])
+ elif kernel == "persistence_fisher":
+ return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="persistence_fisher", kernel_approx=kwargs["kernel_approx"], bandwidth=kwargs["bandwidth"]) / kwargs["bandwidth_fisher"])
+ elif kernel == "persistence_scale_space":
+ return pairwise_kernels(XX, YY, metric=_sklearn_wrapper(_persistence_scale_space_kernel, X, Y, **kwargs))
+ elif kernel == "persistence_weighted_gaussian":
+ return pairwise_kernels(XX, YY, metric=_sklearn_wrapper(_persistence_weighted_gaussian_kernel, X, Y, **kwargs))
+ else:
+ return pairwise_kernels(XX, YY, metric=_sklearn_wrapper(metric, **kwargs))
+
class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
"""
This is a class for computing the sliced Wasserstein kernel matrix from a list of persistence diagrams. The sliced Wasserstein kernel is computed by exponentiating the corresponding sliced Wasserstein distance with a Gaussian kernel. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
@@ -29,7 +99,7 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the kernel computation (default 10).
"""
self.bandwidth = bandwidth
- self.sw_ = SlicedWassersteinDistance(num_directions=num_directions)
+ self.num_directions = num_directions
def fit(self, X, y=None):
"""
@@ -39,7 +109,7 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.sw_.fit(X, y)
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -52,7 +122,20 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein kernel values.
"""
- return np.exp(-self.sw_.transform(X)/self.bandwidth)
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="sliced_wasserstein", bandwidth=self.bandwidth, num_directions=self.num_directions)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply SlicedWassersteinKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: sliced Wasserstein kernel value.
+ """
+ return np.exp(-_sliced_wasserstein_distance(diag1, diag2, num_directions=self.num_directions)) / self.bandwidth
class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
"""
@@ -78,10 +161,7 @@ class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.diagrams_ = list(X)
- self.ws_ = [ np.array([self.weight(self.diagrams_[i][j,:]) for j in range(self.diagrams_[i].shape[0])]) for i in range(len(self.diagrams_)) ]
- if self.kernel_approx is not None:
- self.approx_ = np.concatenate([np.sum(np.multiply(self.ws_[i][:,np.newaxis], self.kernel_approx.transform(self.diagrams_[i])), axis=0)[np.newaxis,:] for i in range(len(self.diagrams_))])
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -94,31 +174,20 @@ class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence weighted Gaussian kernel values.
"""
- Xp = list(X)
- Xfit = np.zeros((len(Xp), len(self.diagrams_)))
- if len(self.diagrams_) == len(Xp) and np.all([np.array_equal(self.diagrams_[i], Xp[i]) for i in range(len(Xp))]):
- if self.kernel_approx is not None:
- Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(self.approx_, self.approx_.T)
- else:
- for i in range(len(self.diagrams_)):
- for j in range(i+1, len(self.diagrams_)):
- W = np.matmul(self.ws_[i][:,np.newaxis], self.ws_[j][np.newaxis,:])
- E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(self.diagrams_[i], self.diagrams_[j]))/(2*np.square(self.bandwidth)))
- Xfit[i,j] = np.sum(np.multiply(W, E))
- Xfit[j,i] = Xfit[i,j]
- else:
- ws = [ np.array([self.weight(Xp[i][j,:]) for j in range(Xp[i].shape[0])]) for i in range(len(Xp)) ]
- if self.kernel_approx is not None:
- approx = np.concatenate([np.sum(np.multiply(ws[i][:,np.newaxis], self.kernel_approx.transform(Xp[i])), axis=0)[np.newaxis,:] for i in range(len(Xp))])
- Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(approx, self.approx_.T)
- else:
- for i in range(len(Xp)):
- for j in range(len(self.diagrams_)):
- W = np.matmul(ws[i][:,np.newaxis], self.ws_[j][np.newaxis,:])
- E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(Xp[i], self.diagrams_[j]))/(2*np.square(self.bandwidth)))
- Xfit[i,j] = np.sum(np.multiply(W, E))
-
- return Xfit
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_weighted_gaussian", bandwidth=self.bandwidth, weight=self.weight, kernel_approx=self.kernel_approx)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceWeightedGaussianKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence weighted Gaussian kernel value.
+ """
+ return _persistence_weighted_gaussian_kernel(diag1, diag2, weight=self.weight, kernel_approx=self.kernel_approx, bandwidth=self.bandwidth)
class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
"""
@@ -132,7 +201,7 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved (default 1.)
kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
"""
- self.pwg_ = PersistenceWeightedGaussianKernel(bandwidth=bandwidth, weight=lambda x: 1 if x[1] >= x[0] else -1, kernel_approx=kernel_approx)
+ self.bandwidth, self.kernel_approx = bandwidth, kernel_approx
def fit(self, X, y=None):
"""
@@ -142,11 +211,7 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.diagrams_ = list(X)
- for i in range(len(self.diagrams_)):
- op_D = self.diagrams_[i][:,[1,0]]
- self.diagrams_[i] = np.concatenate([self.diagrams_[i], op_D], axis=0)
- self.pwg_.fit(X)
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -159,11 +224,20 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence scale space kernel values.
"""
- Xp = list(X)
- for i in range(len(Xp)):
- op_X = Xp[i][:,[1,0]]
- Xp[i] = np.concatenate([Xp[i], op_X], axis=0)
- return self.pwg_.transform(Xp)
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_scale_space", bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceScaleSpaceKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence scale space kernel value.
+ """
+ return _persistence_scale_space_kernel(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
"""
@@ -179,7 +253,7 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
"""
self.bandwidth = bandwidth
- self.pf_ = PersistenceFisherDistance(bandwidth=bandwidth_fisher, kernel_approx=kernel_approx)
+ self.bandwidth_fisher, self.kernel_approx = bandwidth_fisher, kernel_approx
def fit(self, X, y=None):
"""
@@ -189,7 +263,7 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.pf_.fit(X, y)
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -202,5 +276,18 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher kernel values.
"""
- return np.exp(-self.pf_.transform(X)/self.bandwidth)
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_fisher", bandwidth=self.bandwidth, bandwidth_fisher=self.bandwidth_fisher, kernel_approx=self.kernel_approx)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceFisherKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence Fisher kernel value.
+ """
+ return np.exp(-_persistence_fisher_distance(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)) / self.bandwidth_fisher
diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py
index 5f9ec6ab..8a32f7e9 100644
--- a/src/python/gudhi/representations/metrics.py
+++ b/src/python/gudhi/representations/metrics.py
@@ -10,17 +10,168 @@
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import pairwise_distances
-try:
- from .. import bottleneck_distance
- USE_GUDHI = True
-except ImportError:
- USE_GUDHI = False
- print("Gudhi built without CGAL: BottleneckDistance will return a null matrix")
+from gudhi.hera import wasserstein_distance as hera_wasserstein_distance
+from .preprocessing import Padding
#############################################
# Metrics ###################################
#############################################
+def _sliced_wasserstein_distance(D1, D2, num_directions):
+ """
+ This is a function for computing the sliced Wasserstein distance from two persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation.
+
+ Returns:
+ float: the sliced Wasserstein distance between persistence diagrams.
+ """
+ thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1]
+ lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
+ approx1 = np.matmul(D1, lines)
+ approx_diag1 = np.matmul(np.broadcast_to(D1.sum(-1,keepdims=True)/2,(len(D1),2)), lines)
+ approx2 = np.matmul(D2, lines)
+ approx_diag2 = np.matmul(np.broadcast_to(D2.sum(-1,keepdims=True)/2,(len(D2),2)), lines)
+ A = np.sort(np.concatenate([approx1, approx_diag2], axis=0), axis=0)
+ B = np.sort(np.concatenate([approx2, approx_diag1], axis=0), axis=0)
+ L1 = np.sum(np.abs(A-B), axis=0)
+ return np.mean(L1)
+
+def _compute_persistence_diagram_projections(X, num_directions):
+ """
+ This is a function for projecting the points of a list of persistence diagrams (as well as their diagonal projections) onto a fixed number of lines sampled uniformly on [-pi/2, pi/2]. This function can be used as a preprocessing step in order to speed up the running time for computing all pairwise sliced Wasserstein distances / kernel values on a list of persistence diagrams.
+
+ Parameters:
+ X (list of n numpy arrays of shape (numx2)): list of persistence diagrams.
+ num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation.
+
+ Returns:
+ list of n numpy arrays of shape (2*numx2): list of projected persistence diagrams.
+ """
+ thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1]
+ lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
+ XX = [np.vstack([np.matmul(D, lines), np.matmul(np.matmul(D, .5 * np.ones((2,2))), lines)]) for D in X]
+ return XX
+
+def _sliced_wasserstein_distance_on_projections(D1, D2):
+ """
+ This is a function for computing the sliced Wasserstein distance between two persistence diagrams that have already been projected onto some lines. It simply amounts to comparing the sorted projections with the 1-norm, and averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
+
+ Parameters:
+ D1: (2n x number_of_lines) numpy.array containing the n projected points of the first diagram, and the n projections of their diagonal projections.
+ D2: (2m x number_of_lines) numpy.array containing the m projected points of the second diagram, and the m projections of their diagonal projections.
+
+ Returns:
+ float: the sliced Wasserstein distance between the projected persistence diagrams.
+ """
+ lim1, lim2 = int(len(D1)/2), int(len(D2)/2)
+ approx1, approx_diag1, approx2, approx_diag2 = D1[:lim1], D1[lim1:], D2[:lim2], D2[lim2:]
+ A = np.sort(np.concatenate([approx1, approx_diag2], axis=0), axis=0)
+ B = np.sort(np.concatenate([approx2, approx_diag1], axis=0), axis=0)
+ L1 = np.sum(np.abs(A-B), axis=0)
+ return np.mean(L1)
+
+def _persistence_fisher_distance(D1, D2, kernel_approx=None, bandwidth=1.):
+ """
+ This is a function for computing the persistence Fisher distance from two persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram). Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ bandwidth (float): bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions.
+ kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+
+ Returns:
+ float: the persistence Fisher distance between persistence diagrams.
+ """
+ projection = (1./2) * np.ones((2,2))
+ diagonal_projections1 = np.matmul(D1, projection)
+ diagonal_projections2 = np.matmul(D2, projection)
+ if kernel_approx is not None:
+ approx1 = kernel_approx.transform(D1)
+ approx_diagonal1 = kernel_approx.transform(diagonal_projections1)
+ approx2 = kernel_approx.transform(D2)
+ approx_diagonal2 = kernel_approx.transform(diagonal_projections2)
+ Z = np.concatenate([approx1, approx_diagonal1, approx2, approx_diagonal2], axis=0)
+ U, V = np.sum(np.concatenate([approx1, approx_diagonal2], axis=0), axis=0), np.sum(np.concatenate([approx2, approx_diagonal1], axis=0), axis=0)
+ vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ return np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ else:
+ Z = np.concatenate([D1, diagonal_projections1, D2, diagonal_projections2], axis=0)
+ U, V = np.concatenate([D1, diagonal_projections2], axis=0), np.concatenate([D2, diagonal_projections1], axis=0)
+ vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(bandwidth)))/(bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(bandwidth)))/(bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ return np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+
+def _sklearn_wrapper(metric, X, Y, **kwargs):
+ """
+ This function is a wrapper for any metric between two persistence diagrams that takes two numpy arrays of shapes (nx2) and (mx2) as arguments.
+ """
+ if Y is None:
+ def flat_metric(a, b):
+ return metric(X[int(a[0])], X[int(b[0])], **kwargs)
+ else:
+ def flat_metric(a, b):
+ return metric(X[int(a[0])], Y[int(b[0])], **kwargs)
+ return flat_metric
+
+PAIRWISE_DISTANCE_FUNCTIONS = {
+ "wasserstein": hera_wasserstein_distance,
+ "hera_wasserstein": hera_wasserstein_distance,
+ "persistence_fisher": _persistence_fisher_distance,
+}
+
+def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", **kwargs):
+ """
+ This function computes the distance matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2).
+
+ Parameters:
+ X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams.
+ Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise distances are computed from the first list only.
+ metric: distance to use. It can be either a string ("sliced_wasserstein", "wasserstein", "hera_wasserstein" (Wasserstein distance computed with Hera---note that Hera is also used for the default option "wasserstein"), "pot_wasserstein" (Wasserstein distance computed with POT), "bottleneck", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. If it is a function, make sure that it is symmetric and that it outputs 0 if called on the same two arrays.
+ **kwargs: optional keyword parameters. Any further parameters are passed directly to the distance function. See the docs of the various distance classes in this module.
+
+ Returns:
+ numpy array of shape (nxm): distance matrix
+ """
+ XX = np.reshape(np.arange(len(X)), [-1,1])
+ YY = None if Y is None else np.reshape(np.arange(len(Y)), [-1,1])
+ if metric == "bottleneck":
+ try:
+ from .. import bottleneck_distance
+ return pairwise_distances(XX, YY, metric=_sklearn_wrapper(bottleneck_distance, X, Y, **kwargs))
+ except ImportError:
+ print("Gudhi built without CGAL")
+ raise
+ elif metric == "pot_wasserstein":
+ try:
+ from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance
+ return pairwise_distances(XX, YY, metric=_sklearn_wrapper(pot_wasserstein_distance, X, Y, **kwargs))
+ except ImportError:
+ print("POT (Python Optimal Transport) is not installed. Please install POT or use metric='wasserstein' or metric='hera_wasserstein'")
+ raise
+ elif metric == "sliced_wasserstein":
+ Xproj = _compute_persistence_diagram_projections(X, **kwargs)
+ Yproj = None if Y is None else _compute_persistence_diagram_projections(Y, **kwargs)
+ return pairwise_distances(XX, YY, metric=_sklearn_wrapper(_sliced_wasserstein_distance_on_projections, Xproj, Yproj))
+ elif type(metric) == str:
+ return pairwise_distances(XX, YY, metric=_sklearn_wrapper(PAIRWISE_DISTANCE_FUNCTIONS[metric], X, Y, **kwargs))
+ else:
+ return pairwise_distances(XX, YY, metric=_sklearn_wrapper(metric, X, Y, **kwargs))
+
class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
"""
This is a class for computing the sliced Wasserstein distance matrix from a list of persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally integrating over all possible lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
@@ -33,8 +184,6 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation (default 10).
"""
self.num_directions = num_directions
- thetas = np.linspace(-np.pi/2, np.pi/2, num=self.num_directions+1)[np.newaxis,:-1]
- self.lines_ = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
def fit(self, X, y=None):
"""
@@ -45,9 +194,6 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
y (n x 1 array): persistence diagram labels (unused).
"""
self.diagrams_ = X
- self.approx_ = [np.matmul(X[i], self.lines_) for i in range(len(X))]
- diag_proj = (1./2) * np.ones((2,2))
- self.approx_diag_ = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))]
return self
def transform(self, X):
@@ -60,31 +206,26 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein distances.
"""
- Xfit = np.zeros((len(X), len(self.approx_)))
- if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
- for i in range(len(self.approx_)):
- for j in range(i+1, len(self.approx_)):
- A = np.sort(np.concatenate([self.approx_[i], self.approx_diag_[j]], axis=0), axis=0)
- B = np.sort(np.concatenate([self.approx_[j], self.approx_diag_[i]], axis=0), axis=0)
- L1 = np.sum(np.abs(A-B), axis=0)
- Xfit[i,j] = np.mean(L1)
- Xfit[j,i] = Xfit[i,j]
- else:
- diag_proj = (1./2) * np.ones((2,2))
- approx = [np.matmul(X[i], self.lines_) for i in range(len(X))]
- approx_diag = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))]
- for i in range(len(approx)):
- for j in range(len(self.approx_)):
- A = np.sort(np.concatenate([approx[i], self.approx_diag_[j]], axis=0), axis=0)
- B = np.sort(np.concatenate([self.approx_[j], approx_diag[i]], axis=0), axis=0)
- L1 = np.sum(np.abs(A-B), axis=0)
- Xfit[i,j] = np.mean(L1)
+ return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="sliced_wasserstein", num_directions=self.num_directions)
- return Xfit
+ def __call__(self, diag1, diag2):
+ """
+ Apply SlicedWassersteinDistance on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: sliced Wasserstein distance.
+ """
+ return _sliced_wasserstein_distance(diag1, diag2, num_directions=self.num_directions)
class BottleneckDistance(BaseEstimator, TransformerMixin):
"""
- This is a class for computing the bottleneck distance matrix from a list of persistence diagrams.
+ This is a class for computing the bottleneck distance matrix from a list of persistence diagrams.
+
+ :Requires: `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0
"""
def __init__(self, epsilon=None):
"""
@@ -116,34 +257,26 @@ class BottleneckDistance(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise bottleneck distances.
"""
- num_diag1 = len(X)
-
- #if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
- if X is self.diagrams_:
- matrix = np.zeros((num_diag1, num_diag1))
-
- if USE_GUDHI:
- for i in range(num_diag1):
- for j in range(i+1, num_diag1):
- matrix[i,j] = bottleneck_distance(X[i], X[j], self.epsilon)
- matrix[j,i] = matrix[i,j]
- else:
- print("Gudhi built without CGAL: returning a null matrix")
-
- else:
- num_diag2 = len(self.diagrams_)
- matrix = np.zeros((num_diag1, num_diag2))
+ Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric="bottleneck", e=self.epsilon)
+ return Xfit
- if USE_GUDHI:
- for i in range(num_diag1):
- for j in range(num_diag2):
- matrix[i,j] = bottleneck_distance(X[i], self.diagrams_[j], self.epsilon)
- else:
- print("Gudhi built without CGAL: returning a null matrix")
+ def __call__(self, diag1, diag2):
+ """
+ Apply BottleneckDistance on a single pair of persistence diagrams and outputs the result.
- Xfit = matrix
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
- return Xfit
+ Returns:
+ float: bottleneck distance.
+ """
+ try:
+ from .. import bottleneck_distance
+ return bottleneck_distance(diag1, diag2, e=self.epsilon)
+ except ImportError:
+ print("Gudhi built without CGAL")
+ raise
class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
"""
@@ -168,11 +301,6 @@ class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
y (n x 1 array): persistence diagram labels (unused).
"""
self.diagrams_ = X
- projection = (1./2) * np.ones((2,2))
- self.diagonal_projections_ = [np.matmul(X[i], projection) for i in range(len(X))]
- if self.kernel_approx is not None:
- self.approx_ = [self.kernel_approx.transform(X[i]) for i in range(len(X))]
- self.approx_diagonal_ = [self.kernel_approx.transform(self.diagonal_projections_[i]) for i in range(len(X))]
return self
def transform(self, X):
@@ -185,60 +313,83 @@ class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher distances.
"""
- Xfit = np.zeros((len(X), len(self.diagrams_)))
- if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
- for i in range(len(self.diagrams_)):
- for j in range(i+1, len(self.diagrams_)):
- if self.kernel_approx is not None:
- Z = np.concatenate([self.approx_[i], self.approx_diagonal_[i], self.approx_[j], self.approx_diagonal_[j]], axis=0)
- U, V = np.sum(np.concatenate([self.approx_[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], self.approx_diagonal_[i]], axis=0), axis=0)
- vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
- Xfit[j,i] = Xfit[i,j]
- else:
- Z = np.concatenate([self.diagrams_[i], self.diagonal_projections_[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0)
- U, V = np.concatenate([self.diagrams_[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], self.diagonal_projections_[i]], axis=0)
- vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
- Xfit[j,i] = Xfit[i,j]
+ return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="persistence_fisher", bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceFisherDistance on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence Fisher distance.
+ """
+ return _persistence_fisher_distance(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
+
+class WassersteinDistance(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the Wasserstein distance matrix from a list of persistence diagrams.
+ """
+ def __init__(self, order=2, internal_p=2, mode="pot", delta=0.01):
+ """
+ Constructor for the WassersteinDistance class.
+
+ Parameters:
+ order (int): exponent for Wasserstein, default value is 2., see :func:`gudhi.wasserstein.wasserstein_distance`.
+ internal_p (int): ground metric on the (upper-half) plane (i.e. norm l_p in R^2), default value is 2 (euclidean norm), see :func:`gudhi.wasserstein.wasserstein_distance`.
+ mode (str): method for computing Wasserstein distance. Either "pot" or "hera".
+ delta (float): relative error 1+delta. Used only if mode == "hera".
+ """
+ self.order, self.internal_p, self.mode = order, internal_p, mode
+ self.metric = "pot_wasserstein" if mode == "pot" else "hera_wasserstein"
+ self.delta = delta
+
+ def fit(self, X, y=None):
+ """
+ Fit the WassersteinDistance class on a list of persistence diagrams: persistence diagrams are stored in a numpy array called **diagrams**.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.diagrams_ = X
+ return self
+
+ def transform(self, X):
+ """
+ Compute all Wasserstein distances between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise Wasserstein distances.
+ """
+ if self.metric == "hera_wasserstein":
+ Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric=self.metric, order=self.order, internal_p=self.internal_p, delta=self.delta)
else:
- projection = (1./2) * np.ones((2,2))
- diagonal_projections = [np.matmul(X[i], projection) for i in range(len(X))]
- if self.kernel_approx is not None:
- approx = [self.kernel_approx.transform(X[i]) for i in range(len(X))]
- approx_diagonal = [self.kernel_approx.transform(diagonal_projections[i]) for i in range(len(X))]
- for i in range(len(X)):
- for j in range(len(self.diagrams_)):
- if self.kernel_approx is not None:
- Z = np.concatenate([approx[i], approx_diagonal[i], self.approx_[j], self.approx_diagonal_[j]], axis=0)
- U, V = np.sum(np.concatenate([approx[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], approx_diagonal[i]], axis=0), axis=0)
- vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
- else:
- Z = np.concatenate([X[i], diagonal_projections[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0)
- U, V = np.concatenate([X[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], diagonal_projections[i]], axis=0)
- vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric=self.metric, order=self.order, internal_p=self.internal_p, matching=False)
return Xfit
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply WassersteinDistance on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: Wasserstein distance.
+ """
+ if self.metric == "hera_wasserstein":
+ return hera_wasserstein_distance(diag1, diag2, order=self.order, internal_p=self.internal_p, delta=self.delta)
+ else:
+ try:
+ from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance
+ return pot_wasserstein_distance(diag1, diag2, order=self.order, internal_p=self.internal_p, matching=False)
+ except ImportError:
+ print("POT (Python Optimal Transport) is not installed. Please install POT or use metric='wasserstein' or metric='hera_wasserstein'")
+ raise
diff --git a/src/python/gudhi/representations/preprocessing.py b/src/python/gudhi/representations/preprocessing.py
index a39b00e4..a8545349 100644
--- a/src/python/gudhi/representations/preprocessing.py
+++ b/src/python/gudhi/representations/preprocessing.py
@@ -54,6 +54,18 @@ class BirthPersistenceTransform(BaseEstimator, TransformerMixin):
Xfit.append(new_diag)
return Xfit
+ def __call__(self, diag):
+ """
+ Apply BirthPersistenceTransform on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: transformed persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class Clamping(BaseEstimator, TransformerMixin):
"""
This is a class for clamping values. It can be used as a parameter for the DiagramScaler class, for instance if you want to clamp abscissae or ordinates of persistence diagrams.
@@ -142,6 +154,18 @@ class DiagramScaler(BaseEstimator, TransformerMixin):
Xfit[i][:,I] = np.squeeze(scaler.transform(np.reshape(Xfit[i][:,I], [-1,1])))
return Xfit
+ def __call__(self, diag):
+ """
+ Apply DiagramScaler on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: transformed persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class Padding(BaseEstimator, TransformerMixin):
"""
This is a class for padding a list of persistence diagrams with dummy points, so that all persistence diagrams end up with the same number of points.
@@ -186,6 +210,18 @@ class Padding(BaseEstimator, TransformerMixin):
Xfit = X
return Xfit
+ def __call__(self, diag):
+ """
+ Apply Padding on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: padded persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class ProminentPoints(BaseEstimator, TransformerMixin):
"""
This is a class for removing points that are close or far from the diagonal in persistence diagrams. If persistence diagrams are n x 2 numpy arrays (i.e. persistence diagrams with ordinary features), points are ordered and thresholded by distance-to-diagonal. If persistence diagrams are n x 1 numpy arrays (i.e. persistence diagrams with essential features), points are not ordered and thresholded by first coordinate.
@@ -259,6 +295,18 @@ class ProminentPoints(BaseEstimator, TransformerMixin):
Xfit = X
return Xfit
+ def __call__(self, diag):
+ """
+ Apply ProminentPoints on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: thresholded persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class DiagramSelector(BaseEstimator, TransformerMixin):
"""
This is a class for extracting finite or essential points in persistence diagrams.
@@ -303,3 +351,15 @@ class DiagramSelector(BaseEstimator, TransformerMixin):
else:
Xfit = X
return Xfit
+
+ def __call__(self, diag):
+ """
+ Apply DiagramSelector on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: extracted persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py
index fe26dbe2..46fee086 100644
--- a/src/python/gudhi/representations/vector_methods.py
+++ b/src/python/gudhi/representations/vector_methods.py
@@ -81,6 +81,18 @@ class PersistenceImage(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply PersistenceImage on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (number of pixels = **resolution[0]** x **resolution[1]**):: output persistence image.
+ """
+ return self.fit_transform([diag])[0,:]
+
class Landscape(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence landscapes from a list of persistence diagrams. A persistence landscape is a collection of 1D piecewise-linear functions computed from the rank function associated to the persistence diagram. These piecewise-linear functions are then sampled evenly on a given range and the corresponding vectors of samples are concatenated and returned. See http://jmlr.org/papers/v16/bubenik15a.html for more details.
@@ -170,6 +182,18 @@ class Landscape(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply Landscape on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (number of samples = **num_landscapes** x **resolution**): output persistence landscape.
+ """
+ return self.fit_transform([diag])[0,:]
+
class Silhouette(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence silhouettes from a list of persistence diagrams. A persistence silhouette is computed by taking a weighted average of the collection of 1D piecewise-linear functions given by the persistence landscapes, and then by evenly sampling this average on a given range. Finally, the corresponding vector of samples is returned. See https://arxiv.org/abs/1312.0308 for more details.
@@ -248,6 +272,18 @@ class Silhouette(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply Silhouette on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**resolution**): output persistence silhouette.
+ """
+ return self.fit_transform([diag])[0,:]
+
class BettiCurve(BaseEstimator, TransformerMixin):
"""
This is a class for computing Betti curves from a list of persistence diagrams. A Betti curve is a 1D piecewise-constant function obtained from the rank function. It is sampled evenly on a given range and the vector of samples is returned. See https://www.researchgate.net/publication/316604237_Time_Series_Classification_via_Topological_Data_Analysis for more details.
@@ -308,6 +344,18 @@ class BettiCurve(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply BettiCurve on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**resolution**): output Betti curve.
+ """
+ return self.fit_transform([diag])[0,:]
+
class Entropy(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence entropy. Persistence entropy is a statistic for persistence diagrams inspired from Shannon entropy. This statistic can also be used to compute a feature vector, called the entropy summary function. See https://arxiv.org/pdf/1803.08304.pdf for more details. Note that a previous implementation was contributed by Manuel Soriano-Trigueros.
@@ -378,6 +426,18 @@ class Entropy(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply Entropy on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (1 if **mode** = "scalar" else **resolution**): output entropy.
+ """
+ return self.fit_transform([diag])[0,:]
+
class TopologicalVector(BaseEstimator, TransformerMixin):
"""
This is a class for computing topological vectors from a list of persistence diagrams. The topological vector associated to a persistence diagram is the sorted vector of a slight modification of the pairwise distances between the persistence diagram points. See https://diglib.eg.org/handle/10.1111/cgf12692 for more details.
@@ -431,6 +491,18 @@ class TopologicalVector(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply TopologicalVector on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**threshold**): output topological vector.
+ """
+ return self.fit_transform([diag])[0,:]
+
class ComplexPolynomial(BaseEstimator, TransformerMixin):
"""
This is a class for computing complex polynomials from a list of persistence diagrams. The persistence diagram points are seen as the roots of some complex polynomial, whose coefficients are returned in a complex vector. See https://link.springer.com/chapter/10.1007%2F978-3-319-23231-7_27 for more details.
@@ -490,3 +562,15 @@ class ComplexPolynomial(BaseEstimator, TransformerMixin):
coeff = np.array(coeff[::-1])[1:]
Xfit[d, :min(thresh, coeff.shape[0])] = coeff[:min(thresh, coeff.shape[0])]
return Xfit
+
+ def __call__(self, diag):
+ """
+ Apply ComplexPolynomial on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**threshold**): output complex vector of coefficients.
+ """
+ return self.fit_transform([diag])[0,:]
diff --git a/src/python/gudhi/rips_complex.pyx b/src/python/gudhi/rips_complex.pyx
index deb8057a..72e82c79 100644
--- a/src/python/gudhi/rips_complex.pyx
+++ b/src/python/gudhi/rips_complex.pyx
@@ -23,12 +23,12 @@ __license__ = "MIT"
cdef extern from "Rips_complex_interface.h" namespace "Gudhi":
cdef cppclass Rips_complex_interface "Gudhi::rips_complex::Rips_complex_interface":
- Rips_complex_interface()
- void init_points(vector[vector[double]] values, double threshold)
- void init_matrix(vector[vector[double]] values, double threshold)
- void init_points_sparse(vector[vector[double]] values, double threshold, double sparse)
- void init_matrix_sparse(vector[vector[double]] values, double threshold, double sparse)
- void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, int dim_max) except +
+ Rips_complex_interface() nogil
+ void init_points(vector[vector[double]] values, double threshold) nogil
+ void init_matrix(vector[vector[double]] values, double threshold) nogil
+ void init_points_sparse(vector[vector[double]] values, double threshold, double sparse) nogil
+ void init_matrix_sparse(vector[vector[double]] values, double threshold, double sparse) nogil
+ void create_simplex_tree(Simplex_tree_interface_full_featured* simplex_tree, int dim_max) nogil except +
# RipsComplex python interface
cdef class RipsComplex:
@@ -97,6 +97,7 @@ cdef class RipsComplex:
"""
stree = SimplexTree()
cdef intptr_t stree_int_ptr=stree.thisptr
- self.thisref.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr,
- max_dimension)
+ cdef int maxdim = max_dimension
+ with nogil:
+ self.thisref.create_simplex_tree(<Simplex_tree_interface_full_featured*>stree_int_ptr, maxdim)
return stree
diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd
index 96d14079..e748ac40 100644
--- a/src/python/gudhi/simplex_tree.pxd
+++ b/src/python/gudhi/simplex_tree.pxd
@@ -21,35 +21,60 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
cdef cppclass Simplex_tree_options_full_featured:
pass
+ cdef cppclass Simplex_tree_simplex_handle "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>::Simplex_handle":
+ pass
+
+ cdef cppclass Simplex_tree_simplices_iterator "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>::Complex_simplex_iterator":
+ Simplex_tree_simplices_iterator() nogil
+ Simplex_tree_simplex_handle& operator*() nogil
+ Simplex_tree_simplices_iterator operator++() nogil
+ bint operator!=(Simplex_tree_simplices_iterator) nogil
+
+ cdef cppclass Simplex_tree_skeleton_iterator "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>::Skeleton_simplex_iterator":
+ Simplex_tree_skeleton_iterator() nogil
+ Simplex_tree_simplex_handle& operator*() nogil
+ Simplex_tree_skeleton_iterator operator++() nogil
+ bint operator!=(Simplex_tree_skeleton_iterator) nogil
+
+
cdef cppclass Simplex_tree_interface_full_featured "Gudhi::Simplex_tree_interface<Gudhi::Simplex_tree_options_full_featured>":
- Simplex_tree()
- double simplex_filtration(vector[int] simplex)
- void assign_simplex_filtration(vector[int] simplex, double filtration)
- void initialize_filtration()
- int num_vertices()
- int num_simplices()
- void set_dimension(int dimension)
- int dimension()
- int upper_bound_dimension()
- bool find_simplex(vector[int] simplex)
- bool insert_simplex_and_subfaces(vector[int] simplex,
- double filtration)
- vector[pair[vector[int], double]] get_filtration()
- vector[pair[vector[int], double]] get_skeleton(int dimension)
- vector[pair[vector[int], double]] get_star(vector[int] simplex)
- vector[pair[vector[int], double]] get_cofaces(vector[int] simplex,
- int dimension)
- void expansion(int max_dim) except +
- void remove_maximal_simplex(vector[int] simplex)
- bool prune_above_filtration(double filtration)
- bool make_filtration_non_decreasing()
+ Simplex_tree() nogil
+ double simplex_filtration(vector[int] simplex) nogil
+ void assign_simplex_filtration(vector[int] simplex, double filtration) nogil
+ void initialize_filtration() nogil
+ int num_vertices() nogil
+ int num_simplices() nogil
+ void set_dimension(int dimension) nogil
+ int dimension() nogil
+ int upper_bound_dimension() nogil
+ bool find_simplex(vector[int] simplex) nogil
+ bool insert(vector[int] simplex, double filtration) nogil
+ vector[pair[vector[int], double]] get_star(vector[int] simplex) nogil
+ vector[pair[vector[int], double]] get_cofaces(vector[int] simplex, int dimension) nogil
+ void expansion(int max_dim) nogil except +
+ void remove_maximal_simplex(vector[int] simplex) nogil
+ bool prune_above_filtration(double filtration) nogil
+ bool make_filtration_non_decreasing() nogil
+ void compute_extended_filtration() nogil
+ vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(vector[pair[int, pair[double, double]]] dgm, double min_persistence) nogil
+ # Iterators over Simplex tree
+ pair[vector[int], double] get_simplex_and_filtration(Simplex_tree_simplex_handle f_simplex) nogil
+ Simplex_tree_simplices_iterator get_simplices_iterator_begin() nogil
+ Simplex_tree_simplices_iterator get_simplices_iterator_end() nogil
+ vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_begin() nogil
+ vector[Simplex_tree_simplex_handle].const_iterator get_filtration_iterator_end() nogil
+ Simplex_tree_skeleton_iterator get_skeleton_iterator_begin(int dimension) nogil
+ Simplex_tree_skeleton_iterator get_skeleton_iterator_end(int dimension) nogil
cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
cdef cppclass Simplex_tree_persistence_interface "Gudhi::Persistent_cohomology_interface<Gudhi::Simplex_tree<Gudhi::Simplex_tree_options_full_featured>>":
- Simplex_tree_persistence_interface(Simplex_tree_interface_full_featured * st, bool persistence_dim_max)
- vector[pair[int, pair[double, double]]] get_persistence(int homology_coeff_field, double min_persistence)
- vector[int] betti_numbers()
- vector[int] persistent_betti_numbers(double from_value, double to_value)
- vector[pair[double,double]] intervals_in_dimension(int dimension)
- void write_output_diagram(string diagram_file_name)
- vector[pair[vector[int], vector[int]]] persistence_pairs()
+ Simplex_tree_persistence_interface(Simplex_tree_interface_full_featured * st, bool persistence_dim_max) nogil
+ void compute_persistence(int homology_coeff_field, double min_persistence) nogil
+ vector[pair[int, pair[double, double]]] get_persistence() nogil
+ vector[int] betti_numbers() nogil
+ vector[int] persistent_betti_numbers(double from_value, double to_value) nogil
+ vector[pair[double,double]] intervals_in_dimension(int dimension) nogil
+ void write_output_diagram(string diagram_file_name) nogil except +
+ vector[pair[vector[int], vector[int]]] persistence_pairs() nogil
+ pair[vector[vector[int]], vector[vector[int]]] lower_star_generators() nogil
+ pair[vector[vector[int]], vector[vector[int]]] flag_generators() nogil
diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx
index b18627c4..9cb24221 100644
--- a/src/python/gudhi/simplex_tree.pyx
+++ b/src/python/gudhi/simplex_tree.pyx
@@ -7,7 +7,9 @@
# Modification(s):
# - YYYY/MM Author: Description of the modification
+from cython.operator import dereference, preincrement
from libc.stdint cimport intptr_t
+import numpy
from numpy import array as np_array
cimport simplex_tree
@@ -31,7 +33,7 @@ cdef class SimplexTree:
cdef public intptr_t thisptr
# Get the pointer casted as it should be
- cdef Simplex_tree_interface_full_featured* get_ptr(self):
+ cdef Simplex_tree_interface_full_featured* get_ptr(self) nogil:
return <Simplex_tree_interface_full_featured*>(self.thisptr)
cdef Simplex_tree_persistence_interface * pcohptr
@@ -89,7 +91,7 @@ cdef class SimplexTree:
(with more :meth:`assign_filtration` or
:meth:`make_filtration_non_decreasing` for instance) before calling
any function that relies on the filtration property, like
- :meth:`initialize_filtration`.
+ :meth:`persistence`.
"""
self.get_ptr().assign_simplex_filtration(simplex, filtration)
@@ -97,17 +99,10 @@ cdef class SimplexTree:
"""This function initializes and sorts the simplicial complex
filtration vector.
- .. note::
-
- This function must be launched before
- :func:`persistence()<gudhi.SimplexTree.persistence>`,
- :func:`betti_numbers()<gudhi.SimplexTree.betti_numbers>`,
- :func:`persistent_betti_numbers()<gudhi.SimplexTree.persistent_betti_numbers>`,
- or :func:`get_filtration()<gudhi.SimplexTree.get_filtration>`
- after :func:`inserting<gudhi.SimplexTree.insert>` or
- :func:`removing<gudhi.SimplexTree.remove_maximal_simplex>`
- simplices.
+ .. deprecated:: 3.2.0
"""
+ import warnings
+ warnings.warn("Since Gudhi 3.2, calling SimplexTree.initialize_filtration is unnecessary.", DeprecationWarning)
self.get_ptr().initialize_filtration()
def num_vertices(self):
@@ -138,9 +133,9 @@ cdef class SimplexTree:
This function is not constant time because it can recompute
dimension if required (can be triggered by
- :func:`remove_maximal_simplex()<gudhi.SimplexTree.remove_maximal_simplex>`
+ :func:`remove_maximal_simplex`
or
- :func:`prune_above_filtration()<gudhi.SimplexTree.prune_above_filtration>`
+ :func:`prune_above_filtration`
methods).
"""
return self.get_ptr().dimension()
@@ -165,9 +160,9 @@ cdef class SimplexTree:
This function must be used with caution because it disables
dimension recomputation when required
(this recomputation can be triggered by
- :func:`remove_maximal_simplex()<gudhi.SimplexTree.remove_maximal_simplex>`
+ :func:`remove_maximal_simplex`
or
- :func:`prune_above_filtration()<gudhi.SimplexTree.prune_above_filtration>`
+ :func:`prune_above_filtration`
).
"""
self.get_ptr().set_dimension(<int>dimension)
@@ -181,10 +176,7 @@ cdef class SimplexTree:
:returns: true if the simplex was found, false otherwise.
:rtype: bool
"""
- cdef vector[int] csimplex
- for i in simplex:
- csimplex.push_back(i)
- return self.get_ptr().find_simplex(csimplex)
+ return self.get_ptr().find_simplex(simplex)
def insert(self, simplex, filtration=0.0):
"""This function inserts the given N-simplex and its subfaces with the
@@ -201,28 +193,36 @@ cdef class SimplexTree:
otherwise (whatever its original filtration value).
:rtype: bool
"""
- cdef vector[int] csimplex
- for i in simplex:
- csimplex.push_back(i)
- return self.get_ptr().insert_simplex_and_subfaces(csimplex,
- <double>filtration)
+ return self.get_ptr().insert(simplex, <double>filtration)
- def get_filtration(self):
- """This function returns a list of all simplices with their given
+ def get_simplices(self):
+ """This function returns a generator with simplices and their given
filtration values.
+ :returns: The simplices.
+ :rtype: generator with tuples(simplex, filtration)
+ """
+ cdef Simplex_tree_simplices_iterator it = self.get_ptr().get_simplices_iterator_begin()
+ cdef Simplex_tree_simplices_iterator end = self.get_ptr().get_simplices_iterator_end()
+ cdef Simplex_tree_simplex_handle sh = dereference(it)
+
+ while it != end:
+ yield self.get_ptr().get_simplex_and_filtration(dereference(it))
+ preincrement(it)
+
+ def get_filtration(self):
+ """This function returns a generator with simplices and their given
+ filtration values sorted by increasing filtration values.
+
:returns: The simplices sorted by increasing filtration values.
- :rtype: list of tuples(simplex, filtration)
+ :rtype: generator with tuples(simplex, filtration)
"""
- cdef vector[pair[vector[int], double]] filtration \
- = self.get_ptr().get_filtration()
- ct = []
- for filtered_complex in filtration:
- v = []
- for vertex in filtered_complex.first:
- v.append(vertex)
- ct.append((v, filtered_complex.second))
- return ct
+ cdef vector[Simplex_tree_simplex_handle].const_iterator it = self.get_ptr().get_filtration_iterator_begin()
+ cdef vector[Simplex_tree_simplex_handle].const_iterator end = self.get_ptr().get_filtration_iterator_end()
+
+ while it != end:
+ yield self.get_ptr().get_simplex_and_filtration(dereference(it))
+ preincrement(it)
def get_skeleton(self, dimension):
"""This function returns the (simplices of the) skeleton of a maximum
@@ -233,15 +233,12 @@ cdef class SimplexTree:
:returns: The (simplices of the) skeleton of a maximum dimension.
:rtype: list of tuples(simplex, filtration)
"""
- cdef vector[pair[vector[int], double]] skeleton \
- = self.get_ptr().get_skeleton(<int>dimension)
- ct = []
- for filtered_simplex in skeleton:
- v = []
- for vertex in filtered_simplex.first:
- v.append(vertex)
- ct.append((v, filtered_simplex.second))
- return ct
+ cdef Simplex_tree_skeleton_iterator it = self.get_ptr().get_skeleton_iterator_begin(dimension)
+ cdef Simplex_tree_skeleton_iterator end = self.get_ptr().get_skeleton_iterator_end(dimension)
+
+ while it != end:
+ yield self.get_ptr().get_simplex_and_filtration(dereference(it))
+ preincrement(it)
def get_star(self, simplex):
"""This function returns the star of a given N-simplex.
@@ -298,17 +295,12 @@ cdef class SimplexTree:
.. note::
- Be aware that removing is shifting data in a flat_map
- (:func:`initialize_filtration()<gudhi.SimplexTree.initialize_filtration>` to be done).
-
- .. note::
-
The dimension of the simplicial complex may be lower after calling
remove_maximal_simplex than it was before. However,
- :func:`upper_bound_dimension()<gudhi.SimplexTree.upper_bound_dimension>`
+ :func:`upper_bound_dimension`
method will return the old value, which
remains a valid upper bound. If you care, you can call
- :func:`dimension()<gudhi.SimplexTree.dimension>`
+ :func:`dimension`
to recompute the exact dimension.
"""
self.get_ptr().remove_maximal_simplex(simplex)
@@ -324,24 +316,14 @@ cdef class SimplexTree:
.. note::
- Some simplex tree functions require the filtration to be valid.
- prune_above_filtration function is not launching
- :func:`initialize_filtration()<gudhi.SimplexTree.initialize_filtration>`
- but returns the filtration modification
- information. If the complex has changed , please call
- :func:`initialize_filtration()<gudhi.SimplexTree.initialize_filtration>`
- to recompute it.
-
- .. note::
-
Note that the dimension of the simplicial complex may be lower
after calling
- :func:`prune_above_filtration()<gudhi.SimplexTree.prune_above_filtration>`
+ :func:`prune_above_filtration`
than it was before. However,
- :func:`upper_bound_dimension()<gudhi.SimplexTree.upper_bound_dimension>`
+ :func:`upper_bound_dimension`
will return the old value, which remains a
valid upper bound. If you care, you can call
- :func:`dimension()<gudhi.SimplexTree.dimension>`
+ :func:`dimension`
method to recompute the exact dimension.
"""
return self.get_ptr().prune_above_filtration(filtration)
@@ -363,7 +345,9 @@ cdef class SimplexTree:
:param max_dim: The maximal dimension.
:type max_dim: int.
"""
- self.get_ptr().expansion(max_dim)
+ cdef int maxdim = max_dim
+ with nogil:
+ self.get_ptr().expansion(maxdim)
def make_filtration_non_decreasing(self):
"""This function ensures that each simplex has a higher filtration
@@ -372,22 +356,63 @@ cdef class SimplexTree:
:returns: True if any filtration value was modified,
False if the filtration was already non-decreasing.
:rtype: bool
+ """
+ return self.get_ptr().make_filtration_non_decreasing()
+ def extend_filtration(self):
+ """ Extend filtration for computing extended persistence. This function only uses the
+ filtration values at the 0-dimensional simplices, and computes the extended persistence
+ diagram induced by the lower-star filtration computed with these values.
.. note::
- Some simplex tree functions require the filtration to be valid.
- make_filtration_non_decreasing function is not launching
- :func:`initialize_filtration()<gudhi.SimplexTree.initialize_filtration>`
- but returns the filtration modification
- information. If the complex has changed , please call
- :func:`initialize_filtration()<gudhi.SimplexTree.initialize_filtration>`
- to recompute it.
+ Note that after calling this function, the filtration
+ values are actually modified within the Simplex_tree.
+ The function :func:`extended_persistence`
+ retrieves the original values.
+
+ .. note::
+
+ Note that this code creates an extra vertex internally, so you should make sure that
+ the Simplex_tree does not contain a vertex with the largest possible value (i.e., 4294967295).
"""
- return self.get_ptr().make_filtration_non_decreasing()
+ self.get_ptr().compute_extended_filtration()
+
+ def extended_persistence(self, homology_coeff_field=11, min_persistence=0):
+ """This function retrieves good values for extended persistence, and separate the diagrams
+ into the Ordinary, Relative, Extended+ and Extended- subdiagrams.
+
+ :param homology_coeff_field: The homology coefficient field. Must be a
+ prime number. Default value is 11.
+ :type homology_coeff_field: int.
+ :param min_persistence: The minimum persistence value (i.e., the absolute value of the difference between the persistence diagram point coordinates) to take into
+ account (strictly greater than min_persistence). Default value is
+ 0.0.
+ Sets min_persistence to -1.0 to see all values.
+ :type min_persistence: float.
+ :returns: A list of four persistence diagrams in the format described in :func:`persistence`. The first one is Ordinary, the second one is Relative, the third one is Extended+ and the fourth one is Extended-. See https://link.springer.com/article/10.1007/s10208-008-9027-z and/or section 2.2 in https://link.springer.com/article/10.1007/s10208-017-9370-z for a description of these subtypes.
+
+ .. note::
+
+ This function should be called only if :func:`extend_filtration` has been called first!
+
+ .. note::
+
+ The coordinates of the persistence diagram points might be a little different than the
+ original filtration values due to the internal transformation (scaling to [-2,-1]) that is
+ performed on these values during the computation of extended persistence.
+ """
+ cdef vector[pair[int, pair[double, double]]] persistence_result
+ if self.pcohptr != NULL:
+ del self.pcohptr
+ self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), False)
+ self.pcohptr.compute_persistence(homology_coeff_field, -1.)
+ persistence_result = self.pcohptr.get_persistence()
+ return self.get_ptr().compute_extended_persistence_subdiagrams(persistence_result, min_persistence)
+
def persistence(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False):
- """This function returns the persistence of the simplicial complex.
+ """This function computes and returns the persistence of the simplicial complex.
:param homology_coeff_field: The homology coefficient field. Must be a
prime number. Default value is 11.
@@ -395,7 +420,7 @@ cdef class SimplexTree:
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
0.0.
- Sets min_persistence to -1.0 to see all values.
+ Set min_persistence to -1.0 to see all values.
:type min_persistence: float.
:param persistence_dim_max: If true, the persistent homology for the
maximal dimension in the complex is computed. If false, it is
@@ -404,13 +429,36 @@ cdef class SimplexTree:
:returns: The persistence of the simplicial complex.
:rtype: list of pairs(dimension, pair(birth, death))
"""
+ self.compute_persistence(homology_coeff_field, min_persistence, persistence_dim_max)
+ return self.pcohptr.get_persistence()
+
+ def compute_persistence(self, homology_coeff_field=11, min_persistence=0, persistence_dim_max = False):
+ """This function computes the persistence of the simplicial complex, so it can be accessed through
+ :func:`persistent_betti_numbers`, :func:`persistence_pairs`, etc. This function is equivalent to :func:`persistence`
+ when you do not want the list :func:`persistence` returns.
+
+ :param homology_coeff_field: The homology coefficient field. Must be a
+ prime number. Default value is 11.
+ :type homology_coeff_field: int.
+ :param min_persistence: The minimum persistence value to take into
+ account (strictly greater than min_persistence). Default value is
+ 0.0.
+ Sets min_persistence to -1.0 to see all values.
+ :type min_persistence: float.
+ :param persistence_dim_max: If true, the persistent homology for the
+ maximal dimension in the complex is computed. If false, it is
+ ignored. Default is false.
+ :type persistence_dim_max: bool
+ :returns: Nothing.
+ """
if self.pcohptr != NULL:
del self.pcohptr
- self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), persistence_dim_max)
- cdef vector[pair[int, pair[double, double]]] persistence_result
- if self.pcohptr != NULL:
- persistence_result = self.pcohptr.get_persistence(homology_coeff_field, min_persistence)
- return persistence_result
+ cdef bool pdm = persistence_dim_max
+ cdef int coef = homology_coeff_field
+ cdef double minp = min_persistence
+ with nogil:
+ self.pcohptr = new Simplex_tree_persistence_interface(self.get_ptr(), pdm)
+ self.pcohptr.compute_persistence(coef, minp)
def betti_numbers(self):
"""This function returns the Betti numbers of the simplicial complex.
@@ -419,16 +467,11 @@ cdef class SimplexTree:
:rtype: list of int
:note: betti_numbers function requires
- :func:`persistence()<gudhi.SimplexTree.persistence>`
+ :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[int] bn_result
- if self.pcohptr != NULL:
- bn_result = self.pcohptr.betti_numbers()
- else:
- print("betti_numbers function requires persistence function"
- " to be launched first.")
- return bn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before betti_numbers()"
+ return self.pcohptr.betti_numbers()
def persistent_betti_numbers(self, from_value, to_value):
"""This function returns the persistent Betti numbers of the
@@ -445,16 +488,11 @@ cdef class SimplexTree:
:rtype: list of int
:note: persistent_betti_numbers function requires
- :func:`persistence()<gudhi.SimplexTree.persistence>`
+ :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[int] pbn_result
- if self.pcohptr != NULL:
- pbn_result = self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
- else:
- print("persistent_betti_numbers function requires persistence function"
- " to be launched first.")
- return pbn_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistent_betti_numbers()"
+ return self.pcohptr.persistent_betti_numbers(<double>from_value, <double>to_value)
def persistence_intervals_in_dimension(self, dimension):
"""This function returns the persistence intervals of the simplicial
@@ -466,16 +504,11 @@ cdef class SimplexTree:
:rtype: numpy array of dimension 2
:note: intervals_in_dim function requires
- :func:`persistence()<gudhi.SimplexTree.persistence>`
+ :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[pair[double,double]] intervals_result
- if self.pcohptr != NULL:
- intervals_result = self.pcohptr.intervals_in_dimension(dimension)
- else:
- print("intervals_in_dim function requires persistence function"
- " to be launched first.")
- return np_array(intervals_result)
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_intervals_in_dimension()"
+ return np_array(self.pcohptr.intervals_in_dimension(dimension))
def persistence_pairs(self):
"""This function returns a list of persistence birth and death simplices pairs.
@@ -484,33 +517,68 @@ cdef class SimplexTree:
:rtype: list of pair of list of int
:note: persistence_pairs function requires
- :func:`persistence()<gudhi.SimplexTree.persistence>`
+ :func:`compute_persistence`
function to be launched first.
"""
- cdef vector[pair[vector[int],vector[int]]] persistence_pairs_result
- if self.pcohptr != NULL:
- persistence_pairs_result = self.pcohptr.persistence_pairs()
- else:
- print("persistence_pairs function requires persistence function"
- " to be launched first.")
- return persistence_pairs_result
+ assert self.pcohptr != NULL, "compute_persistence() must be called before persistence_pairs()"
+ return self.pcohptr.persistence_pairs()
- def write_persistence_diagram(self, persistence_file=''):
+ def write_persistence_diagram(self, persistence_file):
"""This function writes the persistence intervals of the simplicial
complex in a user given file name.
- :param persistence_file: The specific dimension.
+ :param persistence_file: Name of the file.
:type persistence_file: string.
:note: intervals_in_dim function requires
- :func:`persistence()<gudhi.SimplexTree.persistence>`
+ :func:`compute_persistence`
function to be launched first.
"""
- if self.pcohptr != NULL:
- if persistence_file != '':
- self.pcohptr.write_output_diagram(persistence_file.encode('utf-8'))
- else:
- print("persistence_file must be specified")
+ assert self.pcohptr != NULL, "compute_persistence() must be called before write_persistence_diagram()"
+ self.pcohptr.write_output_diagram(persistence_file.encode('utf-8'))
+
+ def lower_star_persistence_generators(self):
+ """Assuming this is a lower-star filtration, this function returns the persistence pairs,
+ where each simplex is replaced with the vertex that gave it its filtration value.
+
+ :returns: First the regular persistence pairs, grouped by dimension, with one vertex per extremity,
+ and second the essential features, grouped by dimension, with one vertex each
+ :rtype: Tuple[List[numpy.array[int] of shape (n,2)], List[numpy.array[int] of shape (m,)]]
+
+ :note: lower_star_persistence_generators requires that `persistence()` be called first.
+ """
+ assert self.pcohptr != NULL, "lower_star_persistence_generators() requires that persistence() be called first."
+ gen = self.pcohptr.lower_star_generators()
+ normal = [np_array(d).reshape(-1,2) for d in gen.first]
+ infinite = [np_array(d) for d in gen.second]
+ return (normal, infinite)
+
+ def flag_persistence_generators(self):
+ """Assuming this is a flag complex, this function returns the persistence pairs,
+ where each simplex is replaced with the vertices of the edges that gave it its filtration value.
+
+ :returns: First the regular persistence pairs of dimension 0, with one vertex for birth and two for death;
+ then the other regular persistence pairs, grouped by dimension, with 2 vertices per extremity;
+ then the connected components, with one vertex each;
+ finally the other essential features, grouped by dimension, with 2 vertices for birth.
+ :rtype: Tuple[numpy.array[int] of shape (n,3), List[numpy.array[int] of shape (m,4)], numpy.array[int] of shape (l,), List[numpy.array[int] of shape (k,2)]]
+
+ :note: flag_persistence_generators requires that `persistence()` be called first.
+ """
+ assert self.pcohptr != NULL, "flag_persistence_generators() requires that persistence() be called first."
+ gen = self.pcohptr.flag_generators()
+ if len(gen.first) == 0:
+ normal0 = numpy.empty((0,3))
+ normals = []
+ else:
+ l = iter(gen.first)
+ normal0 = np_array(next(l)).reshape(-1,3)
+ normals = [np_array(d).reshape(-1,4) for d in l]
+ if len(gen.second) == 0:
+ infinite0 = numpy.empty(0)
+ infinites = []
else:
- print("intervals_in_dim function requires persistence function"
- " to be launched first.")
+ l = iter(gen.second)
+ infinite0 = np_array(next(l))
+ infinites = [np_array(d).reshape(-1,2) for d in l]
+ return (normal0, normals, infinite0, infinites)
diff --git a/src/python/gudhi/wasserstein.py b/src/python/gudhi/wasserstein.py
deleted file mode 100644
index db5ddff2..00000000
--- a/src/python/gudhi/wasserstein.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
-# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
-# Author(s): Theo Lacombe
-#
-# Copyright (C) 2019 Inria
-#
-# Modification(s):
-# - YYYY/MM Author: Description of the modification
-
-import numpy as np
-import scipy.spatial.distance as sc
-try:
- import ot
-except ImportError:
- print("POT (Python Optimal Transport) package is not installed. Try to run $ conda install -c conda-forge pot ; or $ pip install POT")
-
-def _proj_on_diag(X):
- '''
- :param X: (n x 2) array encoding the points of a persistent diagram.
- :returns: (n x 2) array encoding the (respective orthogonal) projections of the points onto the diagonal
- '''
- Z = (X[:,0] + X[:,1]) / 2.
- return np.array([Z , Z]).T
-
-
-def _build_dist_matrix(X, Y, order=2., internal_p=2.):
- '''
- :param X: (n x 2) numpy.array encoding the (points of the) first diagram.
- :param Y: (m x 2) numpy.array encoding the second diagram.
- :param internal_p: Ground metric (i.e. norm l_p).
- :param order: exponent for the Wasserstein metric.
- :returns: (n+1) x (m+1) np.array encoding the cost matrix C.
- For 1 <= i <= n, 1 <= j <= m, C[i,j] encodes the distance between X[i] and Y[j], while C[i, m+1] (resp. C[n+1, j]) encodes the distance (to the p) between X[i] (resp Y[j]) and its orthogonal proj onto the diagonal.
- note also that C[n+1, m+1] = 0 (it costs nothing to move from the diagonal to the diagonal).
- '''
- Xdiag = _proj_on_diag(X)
- Ydiag = _proj_on_diag(Y)
- if np.isinf(internal_p):
- C = sc.cdist(X,Y, metric='chebyshev')**order
- Cxd = np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order
- Cdy = np.linalg.norm(Y - Ydiag, ord=internal_p, axis=1)**order
- else:
- C = sc.cdist(X,Y, metric='minkowski', p=internal_p)**order
- Cxd = np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order
- Cdy = np.linalg.norm(Y - Ydiag, ord=internal_p, axis=1)**order
- Cf = np.hstack((C, Cxd[:,None]))
- Cdy = np.append(Cdy, 0)
-
- Cf = np.vstack((Cf, Cdy[None,:]))
-
- return Cf
-
-
-def _perstot(X, order, internal_p):
- '''
- :param X: (n x 2) numpy.array (points of a given diagram).
- :param internal_p: Ground metric on the (upper-half) plane (i.e. norm l_p in R^2); Default value is 2 (Euclidean norm).
- :param order: exponent for Wasserstein. Default value is 2.
- :returns: float, the total persistence of the diagram (that is, its distance to the empty diagram).
- '''
- Xdiag = _proj_on_diag(X)
- return (np.sum(np.linalg.norm(X - Xdiag, ord=internal_p, axis=1)**order))**(1./order)
-
-
-def wasserstein_distance(X, Y, order=2., internal_p=2.):
- '''
- :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
- :param Y: (m x 2) numpy.array encoding the second diagram.
- :param internal_p: Ground metric on the (upper-half) plane (i.e. norm l_p in R^2); Default value is 2 (euclidean norm).
- :param order: exponent for Wasserstein; Default value is 2.
- :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with respect to the internal_p-norm as ground metric.
- :rtype: float
- '''
- n = len(X)
- m = len(Y)
-
- # handle empty diagrams
- if X.size == 0:
- if Y.size == 0:
- return 0.
- else:
- return _perstot(Y, order, internal_p)
- elif Y.size == 0:
- return _perstot(X, order, internal_p)
-
- M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p)
- a = np.full(n+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here.
- a[-1] = a[-1] * m # normalized so that we have a probability measure, required by POT
- b = np.full(m+1, 1. / (n + m) ) # weight vector of the input diagram. Uniform here.
- b[-1] = b[-1] * n # so that we have a probability measure, required by POT
-
- # Comptuation of the otcost using the ot.emd2 library.
- # Note: it is the Wasserstein distance to the power q.
- # The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value?
- ot_cost = (n+m) * ot.emd2(a, b, M, numItermax=2000000)
-
- return ot_cost ** (1./order)
diff --git a/src/python/gudhi/wasserstein/__init__.py b/src/python/gudhi/wasserstein/__init__.py
new file mode 100644
index 00000000..ed225ba4
--- /dev/null
+++ b/src/python/gudhi/wasserstein/__init__.py
@@ -0,0 +1 @@
+from .wasserstein import wasserstein_distance
diff --git a/src/python/gudhi/wasserstein/barycenter.py b/src/python/gudhi/wasserstein/barycenter.py
new file mode 100644
index 00000000..d67bcde7
--- /dev/null
+++ b/src/python/gudhi/wasserstein/barycenter.py
@@ -0,0 +1,146 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Theo Lacombe
+#
+# Copyright (C) 2019 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+
+import ot
+import numpy as np
+import scipy.spatial.distance as sc
+
+from gudhi.wasserstein import wasserstein_distance
+
+
+def _mean(x, m):
+ '''
+ :param x: a list of 2D-points, off diagonal, x_0... x_{k-1}
+ :param m: total amount of points taken into account, that is we have (m-k) copies of diagonal
+ :returns: the weighted mean of x with (m-k) copies of the diagonal
+ '''
+ k = len(x)
+ if k > 0:
+ w = np.mean(x, axis=0)
+ w_delta = (w[0] + w[1]) / 2 * np.ones(2)
+ return (k * w + (m-k) * w_delta) / m
+ else:
+ return np.array([0, 0])
+
+
+def lagrangian_barycenter(pdiagset, init=None, verbose=False):
+ '''
+ :param pdiagset: a list of ``numpy.array`` of shape `(n x 2)` (`n` can variate), encoding a set of persistence
+ diagrams with only finite coordinates.
+ :param init: The initial value for barycenter estimate.
+ If ``None``, init is made on a random diagram from the dataset.
+ Otherwise, it can be an ``int`` (then initialization is made on ``pdiagset[init]``)
+ or a `(n x 2)` ``numpy.array`` enconding a persistence diagram with `n` points.
+ :type init: ``int``, or (n x 2) ``np.array``
+ :param verbose: if ``True``, returns additional information about the barycenter.
+ :type verbose: boolean
+ :returns: If not verbose (default), a ``numpy.array`` encoding the barycenter estimate of pdiagset
+ (local minimum of the energy function).
+ If ``pdiagset`` is empty, returns ``None``.
+ If verbose, returns a couple ``(Y, log)`` where ``Y`` is the barycenter estimate,
+ and ``log`` is a ``dict`` that contains additional informations:
+
+ - `"groupings"`, a list of list of pairs ``(i,j)``. Namely, ``G[k] = [...(i, j)...]``, where ``(i,j)`` indicates that `pdiagset[k][i]`` is matched to ``Y[j]`` if ``i = -1`` or ``j = -1``, it means they represent the diagonal.
+
+ - `"energy"`, ``float`` representing the Frechet energy value obtained. It is the mean of squared distances of observations to the output.
+
+ - `"nb_iter"`, ``int`` number of iterations performed before convergence of the algorithm.
+ '''
+ X = pdiagset # to shorten notations, not a copy
+ m = len(X) # number of diagrams we are averaging
+ if m == 0:
+ print("Warning: computing barycenter of empty diag set. Returns None")
+ return None
+
+ # store the number of off-diagonal point for each of the X_i
+ nb_off_diag = np.array([len(X_i) for X_i in X])
+ # Initialisation of barycenter
+ if init is None:
+ i0 = np.random.randint(m) # Index of first state for the barycenter
+ Y = X[i0].copy()
+ else:
+ if type(init)==int:
+ Y = X[init].copy()
+ else:
+ Y = init.copy()
+
+ nb_iter = 0
+
+ converged = False # stoping criterion
+ while not converged:
+ nb_iter += 1
+ K = len(Y) # current nb of points in Y (some might be on diagonal)
+ G = np.full((K, m), -1, dtype=int) # will store for each j, the (index)
+ # point matched in each other diagram
+ #(might be the diagonal).
+ # that is G[j, i] = k <=> y_j is matched to
+ # x_k in the diagram i-th diagram X[i]
+ updated_points = np.zeros((K, 2)) # will store the new positions of
+ # the points of Y.
+ # If points disappear, there thrown
+ # on [0,0] by default.
+ new_created_points = [] # will store potential new points.
+
+ # Step 1 : compute optimal matching (Y, X_i) for each X_i
+ # and create new points in Y if needed
+ for i in range(m):
+ _, indices = wasserstein_distance(Y, X[i], matching=True, order=2., internal_p=2.)
+ for y_j, x_i_j in indices:
+ if y_j >= 0: # we matched an off diagonal point to x_i_j...
+ if x_i_j >= 0: # ...which is also an off-diagonal point.
+ G[y_j, i] = x_i_j
+ else: # ...which is a diagonal point
+ G[y_j, i] = -1 # -1 stands for the diagonal (mask)
+ else: # We matched a diagonal point to x_i_j...
+ if x_i_j >= 0: # which is a off-diag point !
+ # need to create new point in Y
+ new_y = _mean(np.array([X[i][x_i_j]]), m)
+ # Average this point with (m-1) copies of Delta
+ new_created_points.append(new_y)
+
+ # Step 2 : Update current point position thanks to groupings computed
+ to_delete = []
+ for j in range(K):
+ matched_points = [X[i][G[j, i]] for i in range(m) if G[j, i] > -1]
+ new_y_j = _mean(matched_points, m)
+ if not np.array_equal(new_y_j, np.array([0,0])):
+ updated_points[j] = new_y_j
+ else: # this points is no longer of any use.
+ to_delete.append(j)
+ # we remove the point to be deleted now.
+ updated_points = np.delete(updated_points, to_delete, axis=0)
+
+ # we cannot converge if there have been new created points.
+ if new_created_points:
+ Y = np.concatenate((updated_points, new_created_points))
+ else:
+ # Step 3 : we check convergence
+ if np.array_equal(updated_points, Y):
+ converged = True
+ Y = updated_points
+
+
+ if verbose:
+ groupings = []
+ energy = 0
+ log = {}
+ n_y = len(Y)
+ for i in range(m):
+ cost, edges = wasserstein_distance(Y, X[i], matching=True, order=2., internal_p=2.)
+ groupings.append(edges)
+ energy += cost
+ log["groupings"] = groupings
+ energy = energy/m
+ log["energy"] = energy
+ log["nb_iter"] = nb_iter
+
+ return Y, log
+ else:
+ return Y
diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py
new file mode 100644
index 00000000..89ecab1c
--- /dev/null
+++ b/src/python/gudhi/wasserstein/wasserstein.py
@@ -0,0 +1,181 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Theo Lacombe
+#
+# Copyright (C) 2019 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+import scipy.spatial.distance as sc
+
+try:
+ import ot
+except ImportError:
+ print("POT (Python Optimal Transport) package is not installed. Try to run $ conda install -c conda-forge pot ; or $ pip install POT")
+
+
+# Currently unused, but Théo says it is likely to be used again.
+def _proj_on_diag(X):
+ '''
+ :param X: (n x 2) array encoding the points of a persistent diagram.
+ :returns: (n x 2) array encoding the (respective orthogonal) projections of the points onto the diagonal
+ '''
+ Z = (X[:,0] + X[:,1]) / 2.
+ return np.array([Z , Z]).T
+
+
+def _dist_to_diag(X, internal_p):
+ '''
+ :param X: (n x 2) array encoding the points of a persistent diagram.
+ :param internal_p: Ground metric (i.e. norm L^p).
+ :returns: (n) array encoding the (respective orthogonal) distances of the points to the diagonal
+
+ .. note::
+ Assumes that the points are above the diagonal.
+ '''
+ return (X[:, 1] - X[:, 0]) * 2 ** (1.0 / internal_p - 1)
+
+
+def _build_dist_matrix(X, Y, order, internal_p):
+ '''
+ :param X: (n x 2) numpy.array encoding the (points of the) first diagram.
+ :param Y: (m x 2) numpy.array encoding the second diagram.
+ :param order: exponent for the Wasserstein metric.
+ :param internal_p: Ground metric (i.e. norm L^p).
+ :returns: (n+1) x (m+1) np.array encoding the cost matrix C.
+ For 0 <= i < n, 0 <= j < m, C[i,j] encodes the distance between X[i] and Y[j],
+ while C[i, m] (resp. C[n, j]) encodes the distance (to the p) between X[i] (resp Y[j])
+ and its orthogonal projection onto the diagonal.
+ note also that C[n, m] = 0 (it costs nothing to move from the diagonal to the diagonal).
+ '''
+ Cxd = _dist_to_diag(X, internal_p)**order
+ Cdy = _dist_to_diag(Y, internal_p)**order
+ if np.isinf(internal_p):
+ C = sc.cdist(X,Y, metric='chebyshev')**order
+ else:
+ C = sc.cdist(X,Y, metric='minkowski', p=internal_p)**order
+ Cf = np.hstack((C, Cxd[:,None]))
+ Cdy = np.append(Cdy, 0)
+
+ Cf = np.vstack((Cf, Cdy[None,:]))
+
+ return Cf
+
+
+def _perstot_autodiff(X, order, internal_p):
+ '''
+ Version of _perstot that works on eagerpy tensors.
+ '''
+ return _dist_to_diag(X, internal_p).norms.lp(order)
+
+def _perstot(X, order, internal_p, enable_autodiff):
+ '''
+ :param X: (n x 2) numpy.array (points of a given diagram).
+ :param order: exponent for Wasserstein. Default value is 2.
+ :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm).
+ :param enable_autodiff: If X is torch.tensor, tensorflow.Tensor or jax.numpy.ndarray, make the computation
+ transparent to automatic differentiation.
+ :type enable_autodiff: bool
+ :returns: float, the total persistence of the diagram (that is, its distance to the empty diagram).
+ '''
+ if enable_autodiff:
+ import eagerpy as ep
+
+ return _perstot_autodiff(ep.astensor(X), order, internal_p).raw
+ else:
+ return np.linalg.norm(_dist_to_diag(X, internal_p), ord=order)
+
+
+def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2., enable_autodiff=False):
+ '''
+ :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points
+ (i.e. with infinite coordinate).
+ :param Y: (m x 2) numpy.array encoding the second diagram.
+ :param matching: if True, computes and returns the optimal matching between X and Y, encoded as
+ a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to
+ the j-th point in Y, with the convention (-1) represents the diagonal.
+ :param order: exponent for Wasserstein; Default value is 2.
+ :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2);
+ Default value is 2 (Euclidean norm).
+ :param enable_autodiff: If X and Y are torch.tensor, tensorflow.Tensor or jax.numpy.ndarray, make the computation
+ transparent to automatic differentiation. This requires the package EagerPy and is currently incompatible
+ with `matching=True`.
+
+ .. note:: This considers the function defined on the coordinates of the off-diagonal points of X and Y
+ and lets the various frameworks compute its gradient. It never pulls new points from the diagonal.
+ :type enable_autodiff: bool
+ :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with
+ respect to the internal_p-norm as ground metric.
+ If matching is set to True, also returns the optimal matching between X and Y.
+ '''
+ n = len(X)
+ m = len(Y)
+
+ # handle empty diagrams
+ if n == 0:
+ if m == 0:
+ if not matching:
+ # What if enable_autodiff?
+ return 0.
+ else:
+ return 0., np.array([])
+ else:
+ if not matching:
+ return _perstot(Y, order, internal_p, enable_autodiff)
+ else:
+ return _perstot(Y, order, internal_p, enable_autodiff), np.array([[-1, j] for j in range(m)])
+ elif m == 0:
+ if not matching:
+ return _perstot(X, order, internal_p, enable_autodiff)
+ else:
+ return _perstot(X, order, internal_p, enable_autodiff), np.array([[i, -1] for i in range(n)])
+
+ if enable_autodiff:
+ import eagerpy as ep
+
+ X_orig = ep.astensor(X)
+ Y_orig = ep.astensor(Y)
+ X = X_orig.numpy()
+ Y = Y_orig.numpy()
+ M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p)
+ a = np.ones(n+1) # weight vector of the input diagram. Uniform here.
+ a[-1] = m
+ b = np.ones(m+1) # weight vector of the input diagram. Uniform here.
+ b[-1] = n
+
+ if matching:
+ assert not enable_autodiff, "matching and enable_autodiff are currently incompatible"
+ P = ot.emd(a=a,b=b,M=M, numItermax=2000000)
+ ot_cost = np.sum(np.multiply(P,M))
+ P[-1, -1] = 0 # Remove matching corresponding to the diagonal
+ match = np.argwhere(P)
+ # Now we turn to -1 points encoding the diagonal
+ match[:,0][match[:,0] >= n] = -1
+ match[:,1][match[:,1] >= m] = -1
+ return ot_cost ** (1./order) , match
+
+ if enable_autodiff:
+ P = ot.emd(a=a, b=b, M=M, numItermax=2000000)
+ pairs_X_Y = np.argwhere(P[:-1, :-1])
+ pairs_X_diag = np.nonzero(P[:-1, -1])
+ pairs_Y_diag = np.nonzero(P[-1, :-1])
+ dists = []
+ # empty arrays are not handled properly by the helpers, so we avoid calling them
+ if len(pairs_X_Y):
+ dists.append((Y_orig[pairs_X_Y[:, 1]] - X_orig[pairs_X_Y[:, 0]]).norms.lp(internal_p, axis=-1).norms.lp(order))
+ if len(pairs_X_diag):
+ dists.append(_perstot_autodiff(X_orig[pairs_X_diag], order, internal_p))
+ if len(pairs_Y_diag):
+ dists.append(_perstot_autodiff(Y_orig[pairs_Y_diag], order, internal_p))
+ dists = [dist.reshape(1) for dist in dists]
+ return ep.concatenate(dists).norms.lp(order).raw
+ # We can also concatenate the 3 vectors to compute just one norm.
+
+ # Comptuation of the otcost using the ot.emd2 library.
+ # Note: it is the Wasserstein distance to the power q.
+ # The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value?
+ ot_cost = ot.emd2(a, b, M, numItermax=2000000)
+
+ return ot_cost ** (1./order)
diff --git a/src/python/gudhi/weighted_rips_complex.py b/src/python/gudhi/weighted_rips_complex.py
new file mode 100644
index 00000000..0541572b
--- /dev/null
+++ b/src/python/gudhi/weighted_rips_complex.py
@@ -0,0 +1,59 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Raphaël Tinarrage, Yuichi Ike, Masatoshi Takenouchi
+#
+# Copyright (C) 2020 Inria, Copyright (C) 2020 FUjitsu Laboratories Ltd.
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+from gudhi import SimplexTree
+
+class WeightedRipsComplex:
+ """
+ Class to generate a weighted Rips complex from a distance matrix and weights on vertices,
+ in the way described in :cite:`dtmfiltrations`.
+ Remark that all the filtration values are doubled compared to the definition in the paper
+ for the consistency with RipsComplex.
+ """
+ def __init__(self,
+ distance_matrix,
+ weights=None,
+ max_filtration=float('inf')):
+ """
+ Args:
+ distance_matrix (Sequence[Sequence[float]]): distance matrix (full square or lower triangular).
+ weights (Sequence[float]): (one half of) weight for each vertex.
+ max_filtration (float): specifies the maximal filtration value to be considered.
+ """
+ self.distance_matrix = distance_matrix
+ if weights is not None:
+ self.weights = weights
+ else:
+ self.weights = [0] * len(distance_matrix)
+ self.max_filtration = max_filtration
+
+ def create_simplex_tree(self, max_dimension):
+ """
+ Args:
+ max_dimension (int): graph expansion until this given dimension.
+ """
+ dist = self.distance_matrix
+ F = self.weights
+ num_pts = len(dist)
+
+ st = SimplexTree()
+
+ for i in range(num_pts):
+ if 2*F[i] <= self.max_filtration:
+ st.insert([i], 2*F[i])
+ for i in range(num_pts):
+ for j in range(i):
+ value = max(2*F[i], 2*F[j], dist[i][j] + F[i] + F[j])
+ # max is needed when F is not 1-Lipschitz
+ if value <= self.max_filtration:
+ st.insert([i,j], filtration=value)
+
+ st.expansion(max_dimension)
+ return st
+