summaryrefslogtreecommitdiff
path: root/src/python/gudhi/representations
diff options
context:
space:
mode:
Diffstat (limited to 'src/python/gudhi/representations')
-rw-r--r--src/python/gudhi/representations/kernel_methods.py200
-rw-r--r--src/python/gudhi/representations/metrics.py422
-rw-r--r--src/python/gudhi/representations/preprocessing.py117
-rw-r--r--src/python/gudhi/representations/vector_methods.py613
4 files changed, 1031 insertions, 321 deletions
diff --git a/src/python/gudhi/representations/kernel_methods.py b/src/python/gudhi/representations/kernel_methods.py
index bfc83aff..23fd23c7 100644
--- a/src/python/gudhi/representations/kernel_methods.py
+++ b/src/python/gudhi/representations/kernel_methods.py
@@ -9,27 +9,100 @@
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
-from sklearn.metrics import pairwise_distances
-from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance
+from sklearn.metrics import pairwise_distances, pairwise_kernels
+from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance, _sklearn_wrapper, _pairwise, pairwise_persistence_diagram_distances, _sliced_wasserstein_distance, _persistence_fisher_distance
+from .preprocessing import Padding
#############################################
# Kernel methods ############################
#############################################
+def _persistence_weighted_gaussian_kernel(D1, D2, weight=lambda x: 1, kernel_approx=None, bandwidth=1.):
+ """
+ This is a function for computing the persistence weighted Gaussian kernel value from two persistence diagrams. The persistence weighted Gaussian kernel is computed by convolving the persistence diagram points with weighted Gaussian kernels. See http://proceedings.mlr.press/v48/kusano16.html for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved
+ weight: weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y].
+ kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+
+ Returns:
+ float: the persistence weighted Gaussian kernel value between persistence diagrams.
+ """
+ ws1 = np.array([weight(D1[j,:]) for j in range(len(D1))])
+ ws2 = np.array([weight(D2[j,:]) for j in range(len(D2))])
+ if kernel_approx is not None:
+ approx1 = np.sum(np.multiply(ws1[:,np.newaxis], kernel_approx.transform(D1)), axis=0)
+ approx2 = np.sum(np.multiply(ws2[:,np.newaxis], kernel_approx.transform(D2)), axis=0)
+ return (1./(np.sqrt(2*np.pi)*bandwidth)) * np.matmul(approx1, approx2.T)
+ else:
+ W = np.matmul(ws1[:,np.newaxis], ws2[np.newaxis,:])
+ E = (1./(np.sqrt(2*np.pi)*bandwidth)) * np.exp(-np.square(pairwise_distances(D1,D2))/(2*bandwidth*bandwidth))
+ return np.sum(np.multiply(W, E))
+
+def _persistence_scale_space_kernel(D1, D2, kernel_approx=None, bandwidth=1.):
+ """
+ This is a function for computing the persistence scale space kernel value from two persistence diagrams. The persistence scale space kernel is computed by adding the symmetric to the diagonal of each point in each persistence diagram, with negative weight, and then convolving the points with a Gaussian kernel. See https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Reininghaus_A_Stable_Multi-Scale_2015_CVPR_paper.pdf for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved
+ kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+
+ Returns:
+ float: the persistence scale space kernel value between persistence diagrams.
+ """
+ DD1 = np.concatenate([D1, D1[:,[1,0]]], axis=0)
+ DD2 = np.concatenate([D2, D2[:,[1,0]]], axis=0)
+ weight_pss = lambda x: 1 if x[1] >= x[0] else -1
+ return 0.5 * _persistence_weighted_gaussian_kernel(DD1, DD2, weight=weight_pss, kernel_approx=kernel_approx, bandwidth=bandwidth)
+
+def pairwise_persistence_diagram_kernels(X, Y=None, kernel="sliced_wasserstein", n_jobs=None, **kwargs):
+ """
+ This function computes the kernel matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2).
+
+ Parameters:
+ X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams.
+ Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise kernel values are computed from the first list only.
+ kernel: kernel to use. It can be either a string ("sliced_wasserstein", "persistence_scale_space", "persistence_weighted_gaussian", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. If it is a function, make sure that it is symmetric.
+ n_jobs (int): number of jobs to use for the computation. This uses joblib.Parallel(prefer="threads"), so kernels that do not release the GIL may not scale unless run inside a `joblib.parallel_backend <https://joblib.readthedocs.io/en/latest/parallel.html#joblib.parallel_backend>`_ block.
+ **kwargs: optional keyword parameters. Any further parameters are passed directly to the kernel function. See the docs of the various kernel classes in this module.
+
+ Returns:
+ numpy array of shape (nxm): kernel matrix.
+ """
+ XX = np.reshape(np.arange(len(X)), [-1,1])
+ YY = None if Y is None or Y is X else np.reshape(np.arange(len(Y)), [-1,1])
+ if kernel == "sliced_wasserstein":
+ return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="sliced_wasserstein", num_directions=kwargs["num_directions"], n_jobs=n_jobs) / kwargs["bandwidth"])
+ elif kernel == "persistence_fisher":
+ return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="persistence_fisher", kernel_approx=kwargs["kernel_approx"], bandwidth=kwargs["bandwidth"], n_jobs=n_jobs) / kwargs["bandwidth_fisher"])
+ elif kernel == "persistence_scale_space":
+ return _pairwise(pairwise_kernels, False, XX, YY, metric=_sklearn_wrapper(_persistence_scale_space_kernel, X, Y, **kwargs), n_jobs=n_jobs)
+ elif kernel == "persistence_weighted_gaussian":
+ return _pairwise(pairwise_kernels, False, XX, YY, metric=_sklearn_wrapper(_persistence_weighted_gaussian_kernel, X, Y, **kwargs), n_jobs=n_jobs)
+ else:
+ return _pairwise(pairwise_kernels, False, XX, YY, metric=_sklearn_wrapper(metric, **kwargs), n_jobs=n_jobs)
+
class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
"""
This is a class for computing the sliced Wasserstein kernel matrix from a list of persistence diagrams. The sliced Wasserstein kernel is computed by exponentiating the corresponding sliced Wasserstein distance with a Gaussian kernel. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
"""
- def __init__(self, num_directions=10, bandwidth=1.0):
+ def __init__(self, num_directions=10, bandwidth=1.0, n_jobs=None):
"""
Constructor for the SlicedWassersteinKernel class.
Parameters:
bandwidth (double): bandwidth of the Gaussian kernel applied to the sliced Wasserstein distance (default 1.).
num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the kernel computation (default 10).
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_kernels` for details.
"""
self.bandwidth = bandwidth
- self.sw_ = SlicedWassersteinDistance(num_directions=num_directions)
+ self.num_directions = num_directions
+ self.n_jobs = n_jobs
def fit(self, X, y=None):
"""
@@ -39,7 +112,7 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.sw_.fit(X, y)
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -52,13 +125,26 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein kernel values.
"""
- return np.exp(-self.sw_.transform(X)/self.bandwidth)
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="sliced_wasserstein", bandwidth=self.bandwidth, num_directions=self.num_directions, n_jobs=self.n_jobs)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply SlicedWassersteinKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: sliced Wasserstein kernel value.
+ """
+ return np.exp(-_sliced_wasserstein_distance(diag1, diag2, num_directions=self.num_directions)) / self.bandwidth
class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
"""
This is a class for computing the persistence weighted Gaussian kernel matrix from a list of persistence diagrams. The persistence weighted Gaussian kernel is computed by convolving the persistence diagram points with weighted Gaussian kernels. See http://proceedings.mlr.press/v48/kusano16.html for more details.
"""
- def __init__(self, bandwidth=1., weight=lambda x: 1, kernel_approx=None):
+ def __init__(self, bandwidth=1., weight=lambda x: 1, kernel_approx=None, n_jobs=None):
"""
Constructor for the PersistenceWeightedGaussianKernel class.
@@ -66,9 +152,11 @@ class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved (default 1.)
weight (function): weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y].
kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_kernels` for details.
"""
self.bandwidth, self.weight = bandwidth, weight
self.kernel_approx = kernel_approx
+ self.n_jobs = n_jobs
def fit(self, X, y=None):
"""
@@ -78,10 +166,7 @@ class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.diagrams_ = list(X)
- self.ws_ = [ np.array([self.weight(self.diagrams_[i][j,:]) for j in range(self.diagrams_[i].shape[0])]) for i in range(len(self.diagrams_)) ]
- if self.kernel_approx is not None:
- self.approx_ = np.concatenate([np.sum(np.multiply(self.ws_[i][:,np.newaxis], self.kernel_approx.transform(self.diagrams_[i])), axis=0)[np.newaxis,:] for i in range(len(self.diagrams_))])
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -94,45 +179,36 @@ class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence weighted Gaussian kernel values.
"""
- Xp = list(X)
- Xfit = np.zeros((len(Xp), len(self.diagrams_)))
- if len(self.diagrams_) == len(Xp) and np.all([np.array_equal(self.diagrams_[i], Xp[i]) for i in range(len(Xp))]):
- if self.kernel_approx is not None:
- Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(self.approx_, self.approx_.T)
- else:
- for i in range(len(self.diagrams_)):
- for j in range(i+1, len(self.diagrams_)):
- W = np.matmul(self.ws_[i][:,np.newaxis], self.ws_[j][np.newaxis,:])
- E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(self.diagrams_[i], self.diagrams_[j]))/(2*np.square(self.bandwidth)))
- Xfit[i,j] = np.sum(np.multiply(W, E))
- Xfit[j,i] = Xfit[i,j]
- else:
- ws = [ np.array([self.weight(Xp[i][j,:]) for j in range(Xp[i].shape[0])]) for i in range(len(Xp)) ]
- if self.kernel_approx is not None:
- approx = np.concatenate([np.sum(np.multiply(ws[i][:,np.newaxis], self.kernel_approx.transform(Xp[i])), axis=0)[np.newaxis,:] for i in range(len(Xp))])
- Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(approx, self.approx_.T)
- else:
- for i in range(len(Xp)):
- for j in range(len(self.diagrams_)):
- W = np.matmul(ws[i][:,np.newaxis], self.ws_[j][np.newaxis,:])
- E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(Xp[i], self.diagrams_[j]))/(2*np.square(self.bandwidth)))
- Xfit[i,j] = np.sum(np.multiply(W, E))
-
- return Xfit
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_weighted_gaussian", bandwidth=self.bandwidth, weight=self.weight, kernel_approx=self.kernel_approx, n_jobs=self.n_jobs)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceWeightedGaussianKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence weighted Gaussian kernel value.
+ """
+ return _persistence_weighted_gaussian_kernel(diag1, diag2, weight=self.weight, kernel_approx=self.kernel_approx, bandwidth=self.bandwidth)
class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
"""
This is a class for computing the persistence scale space kernel matrix from a list of persistence diagrams. The persistence scale space kernel is computed by adding the symmetric to the diagonal of each point in each persistence diagram, with negative weight, and then convolving the points with a Gaussian kernel. See https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Reininghaus_A_Stable_Multi-Scale_2015_CVPR_paper.pdf for more details.
"""
- def __init__(self, bandwidth=1., kernel_approx=None):
+ def __init__(self, bandwidth=1., kernel_approx=None, n_jobs=None):
"""
Constructor for the PersistenceScaleSpaceKernel class.
Parameters:
bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved (default 1.)
kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_kernels` for details.
"""
- self.pwg_ = PersistenceWeightedGaussianKernel(bandwidth=bandwidth, weight=lambda x: 1 if x[1] >= x[0] else -1, kernel_approx=kernel_approx)
+ self.bandwidth, self.kernel_approx = bandwidth, kernel_approx
+ self.n_jobs = n_jobs
def fit(self, X, y=None):
"""
@@ -142,11 +218,7 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.diagrams_ = list(X)
- for i in range(len(self.diagrams_)):
- op_D = self.diagrams_[i][:,[1,0]]
- self.diagrams_[i] = np.concatenate([self.diagrams_[i], op_D], axis=0)
- self.pwg_.fit(X)
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -159,17 +231,26 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence scale space kernel values.
"""
- Xp = list(X)
- for i in range(len(Xp)):
- op_X = Xp[i][:,[1,0]]
- Xp[i] = np.concatenate([Xp[i], op_X], axis=0)
- return self.pwg_.transform(Xp)
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_scale_space", bandwidth=self.bandwidth, kernel_approx=self.kernel_approx, n_jobs=self.n_jobs)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceScaleSpaceKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence scale space kernel value.
+ """
+ return _persistence_scale_space_kernel(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
"""
This is a class for computing the persistence Fisher kernel matrix from a list of persistence diagrams. The persistence Fisher kernel is computed by exponentiating the corresponding persistence Fisher distance with a Gaussian kernel. See papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details.
"""
- def __init__(self, bandwidth_fisher=1., bandwidth=1., kernel_approx=None):
+ def __init__(self, bandwidth_fisher=1., bandwidth=1., kernel_approx=None, n_jobs=None):
"""
Constructor for the PersistenceFisherKernel class.
@@ -177,9 +258,11 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
bandwidth (double): bandwidth of the Gaussian kernel applied to the persistence Fisher distance (default 1.).
bandwidth_fisher (double): bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions by PersistenceFisherDistance class (default 1.).
kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_kernels` for details.
"""
self.bandwidth = bandwidth
- self.pf_ = PersistenceFisherDistance(bandwidth=bandwidth_fisher, kernel_approx=kernel_approx)
+ self.bandwidth_fisher, self.kernel_approx = bandwidth_fisher, kernel_approx
+ self.n_jobs = n_jobs
def fit(self, X, y=None):
"""
@@ -189,7 +272,7 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.pf_.fit(X, y)
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -202,5 +285,18 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher kernel values.
"""
- return np.exp(-self.pf_.transform(X)/self.bandwidth)
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_fisher", bandwidth=self.bandwidth, bandwidth_fisher=self.bandwidth_fisher, kernel_approx=self.kernel_approx, n_jobs=self.n_jobs)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceFisherKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence Fisher kernel value.
+ """
+ return np.exp(-_persistence_fisher_distance(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)) / self.bandwidth_fisher
diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py
index 5f9ec6ab..142ddef1 100644
--- a/src/python/gudhi/representations/metrics.py
+++ b/src/python/gudhi/representations/metrics.py
@@ -10,31 +10,198 @@
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import pairwise_distances
-try:
- from .. import bottleneck_distance
- USE_GUDHI = True
-except ImportError:
- USE_GUDHI = False
- print("Gudhi built without CGAL: BottleneckDistance will return a null matrix")
+from gudhi.hera import wasserstein_distance as hera_wasserstein_distance
+from .preprocessing import Padding
+from joblib import Parallel, delayed
#############################################
# Metrics ###################################
#############################################
+def _sliced_wasserstein_distance(D1, D2, num_directions):
+ """
+ This is a function for computing the sliced Wasserstein distance from two persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation.
+
+ Returns:
+ float: the sliced Wasserstein distance between persistence diagrams.
+ """
+ thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1]
+ lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
+ approx1 = np.matmul(D1, lines)
+ approx_diag1 = np.matmul(np.broadcast_to(D1.sum(-1,keepdims=True)/2,(len(D1),2)), lines)
+ approx2 = np.matmul(D2, lines)
+ approx_diag2 = np.matmul(np.broadcast_to(D2.sum(-1,keepdims=True)/2,(len(D2),2)), lines)
+ A = np.sort(np.concatenate([approx1, approx_diag2], axis=0), axis=0)
+ B = np.sort(np.concatenate([approx2, approx_diag1], axis=0), axis=0)
+ L1 = np.sum(np.abs(A-B), axis=0)
+ return np.mean(L1)
+
+def _compute_persistence_diagram_projections(X, num_directions):
+ """
+ This is a function for projecting the points of a list of persistence diagrams (as well as their diagonal projections) onto a fixed number of lines sampled uniformly on [-pi/2, pi/2]. This function can be used as a preprocessing step in order to speed up the running time for computing all pairwise sliced Wasserstein distances / kernel values on a list of persistence diagrams.
+
+ Parameters:
+ X (list of n numpy arrays of shape (numx2)): list of persistence diagrams.
+ num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation.
+
+ Returns:
+ list of n numpy arrays of shape (2*numx2): list of projected persistence diagrams.
+ """
+ thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1]
+ lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
+ XX = [np.vstack([np.matmul(D, lines), np.matmul(np.matmul(D, .5 * np.ones((2,2))), lines)]) for D in X]
+ return XX
+
+def _sliced_wasserstein_distance_on_projections(D1, D2):
+ """
+ This is a function for computing the sliced Wasserstein distance between two persistence diagrams that have already been projected onto some lines. It simply amounts to comparing the sorted projections with the 1-norm, and averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
+
+ Parameters:
+ D1: (2n x number_of_lines) numpy.array containing the n projected points of the first diagram, and the n projections of their diagonal projections.
+ D2: (2m x number_of_lines) numpy.array containing the m projected points of the second diagram, and the m projections of their diagonal projections.
+
+ Returns:
+ float: the sliced Wasserstein distance between the projected persistence diagrams.
+ """
+ lim1, lim2 = int(len(D1)/2), int(len(D2)/2)
+ approx1, approx_diag1, approx2, approx_diag2 = D1[:lim1], D1[lim1:], D2[:lim2], D2[lim2:]
+ A = np.sort(np.concatenate([approx1, approx_diag2], axis=0), axis=0)
+ B = np.sort(np.concatenate([approx2, approx_diag1], axis=0), axis=0)
+ L1 = np.sum(np.abs(A-B), axis=0)
+ return np.mean(L1)
+
+def _persistence_fisher_distance(D1, D2, kernel_approx=None, bandwidth=1.):
+ """
+ This is a function for computing the persistence Fisher distance from two persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram). Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ bandwidth (float): bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions.
+ kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+
+ Returns:
+ float: the persistence Fisher distance between persistence diagrams.
+ """
+ projection = (1./2) * np.ones((2,2))
+ diagonal_projections1 = np.matmul(D1, projection)
+ diagonal_projections2 = np.matmul(D2, projection)
+ if kernel_approx is not None:
+ approx1 = kernel_approx.transform(D1)
+ approx_diagonal1 = kernel_approx.transform(diagonal_projections1)
+ approx2 = kernel_approx.transform(D2)
+ approx_diagonal2 = kernel_approx.transform(diagonal_projections2)
+ Z = np.concatenate([approx1, approx_diagonal1, approx2, approx_diagonal2], axis=0)
+ U, V = np.sum(np.concatenate([approx1, approx_diagonal2], axis=0), axis=0), np.sum(np.concatenate([approx2, approx_diagonal1], axis=0), axis=0)
+ vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ return np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ else:
+ Z = np.concatenate([D1, diagonal_projections1, D2, diagonal_projections2], axis=0)
+ U, V = np.concatenate([D1, diagonal_projections2], axis=0), np.concatenate([D2, diagonal_projections1], axis=0)
+ vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(bandwidth)))/(bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(bandwidth)))/(bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ return np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+
+def _pairwise(fallback, skipdiag, X, Y, metric, n_jobs):
+ if Y is not None:
+ return fallback(X, Y, metric=metric, n_jobs=n_jobs)
+ triu = np.triu_indices(len(X), k=skipdiag)
+ tril = (triu[1], triu[0])
+ par = Parallel(n_jobs=n_jobs, prefer="threads")
+ d = par(delayed(metric)([triu[0][i]], [triu[1][i]]) for i in range(len(triu[0])))
+ m = np.empty((len(X), len(X)))
+ m[triu] = d
+ m[tril] = d
+ if skipdiag:
+ np.fill_diagonal(m, 0)
+ return m
+
+def _sklearn_wrapper(metric, X, Y, **kwargs):
+ """
+ This function is a wrapper for any metric between two persistence diagrams that takes two numpy arrays of shapes (nx2) and (mx2) as arguments.
+ """
+ if Y is None:
+ def flat_metric(a, b):
+ return metric(X[int(a[0])], X[int(b[0])], **kwargs)
+ else:
+ def flat_metric(a, b):
+ return metric(X[int(a[0])], Y[int(b[0])], **kwargs)
+ return flat_metric
+
+PAIRWISE_DISTANCE_FUNCTIONS = {
+ "wasserstein": hera_wasserstein_distance,
+ "hera_wasserstein": hera_wasserstein_distance,
+ "persistence_fisher": _persistence_fisher_distance,
+}
+
+def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", n_jobs=None, **kwargs):
+ """
+ This function computes the distance matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2).
+
+ Parameters:
+ X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams.
+ Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise distances are computed from the first list only.
+ metric: distance to use. It can be either a string ("sliced_wasserstein", "wasserstein", "hera_wasserstein" (Wasserstein distance computed with Hera---note that Hera is also used for the default option "wasserstein"), "pot_wasserstein" (Wasserstein distance computed with POT), "bottleneck", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. If it is a function, make sure that it is symmetric and that it outputs 0 if called on the same two arrays.
+ n_jobs (int): number of jobs to use for the computation. This uses joblib.Parallel(prefer="threads"), so metrics that do not release the GIL may not scale unless run inside a `joblib.parallel_backend <https://joblib.readthedocs.io/en/latest/parallel.html#joblib.parallel_backend>`_ block.
+ **kwargs: optional keyword parameters. Any further parameters are passed directly to the distance function. See the docs of the various distance classes in this module.
+
+ Returns:
+ numpy array of shape (nxm): distance matrix
+ """
+ XX = np.reshape(np.arange(len(X)), [-1,1])
+ YY = None if Y is None or Y is X else np.reshape(np.arange(len(Y)), [-1,1])
+ if metric == "bottleneck":
+ try:
+ from .. import bottleneck_distance
+ return _pairwise(pairwise_distances, True, XX, YY, metric=_sklearn_wrapper(bottleneck_distance, X, Y, **kwargs), n_jobs=n_jobs)
+ except ImportError:
+ print("Gudhi built without CGAL")
+ raise
+ elif metric == "pot_wasserstein":
+ try:
+ from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance
+ return _pairwise(pairwise_distances, True, XX, YY, metric=_sklearn_wrapper(pot_wasserstein_distance, X, Y, **kwargs), n_jobs=n_jobs)
+ except ImportError:
+ print("POT (Python Optimal Transport) is not installed. Please install POT or use metric='wasserstein' or metric='hera_wasserstein'")
+ raise
+ elif metric == "sliced_wasserstein":
+ Xproj = _compute_persistence_diagram_projections(X, **kwargs)
+ Yproj = None if Y is None else _compute_persistence_diagram_projections(Y, **kwargs)
+ return _pairwise(pairwise_distances, True, XX, YY, metric=_sklearn_wrapper(_sliced_wasserstein_distance_on_projections, Xproj, Yproj), n_jobs=n_jobs)
+ elif type(metric) == str:
+ return _pairwise(pairwise_distances, True, XX, YY, metric=_sklearn_wrapper(PAIRWISE_DISTANCE_FUNCTIONS[metric], X, Y, **kwargs), n_jobs=n_jobs)
+ else:
+ return _pairwise(pairwise_distances, True, XX, YY, metric=_sklearn_wrapper(metric, X, Y, **kwargs), n_jobs=n_jobs)
+
class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
"""
This is a class for computing the sliced Wasserstein distance matrix from a list of persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally integrating over all possible lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
"""
- def __init__(self, num_directions=10):
+ def __init__(self, num_directions=10, n_jobs=None):
"""
Constructor for the SlicedWassersteinDistance class.
Parameters:
num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation (default 10).
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_distances` for details.
"""
self.num_directions = num_directions
- thetas = np.linspace(-np.pi/2, np.pi/2, num=self.num_directions+1)[np.newaxis,:-1]
- self.lines_ = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
+ self.n_jobs = n_jobs
def fit(self, X, y=None):
"""
@@ -45,9 +212,6 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
y (n x 1 array): persistence diagram labels (unused).
"""
self.diagrams_ = X
- self.approx_ = [np.matmul(X[i], self.lines_) for i in range(len(X))]
- diag_proj = (1./2) * np.ones((2,2))
- self.approx_diag_ = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))]
return self
def transform(self, X):
@@ -60,40 +224,37 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein distances.
"""
- Xfit = np.zeros((len(X), len(self.approx_)))
- if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
- for i in range(len(self.approx_)):
- for j in range(i+1, len(self.approx_)):
- A = np.sort(np.concatenate([self.approx_[i], self.approx_diag_[j]], axis=0), axis=0)
- B = np.sort(np.concatenate([self.approx_[j], self.approx_diag_[i]], axis=0), axis=0)
- L1 = np.sum(np.abs(A-B), axis=0)
- Xfit[i,j] = np.mean(L1)
- Xfit[j,i] = Xfit[i,j]
- else:
- diag_proj = (1./2) * np.ones((2,2))
- approx = [np.matmul(X[i], self.lines_) for i in range(len(X))]
- approx_diag = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))]
- for i in range(len(approx)):
- for j in range(len(self.approx_)):
- A = np.sort(np.concatenate([approx[i], self.approx_diag_[j]], axis=0), axis=0)
- B = np.sort(np.concatenate([self.approx_[j], approx_diag[i]], axis=0), axis=0)
- L1 = np.sum(np.abs(A-B), axis=0)
- Xfit[i,j] = np.mean(L1)
+ return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="sliced_wasserstein", num_directions=self.num_directions, n_jobs=self.n_jobs)
- return Xfit
+ def __call__(self, diag1, diag2):
+ """
+ Apply SlicedWassersteinDistance on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: sliced Wasserstein distance.
+ """
+ return _sliced_wasserstein_distance(diag1, diag2, num_directions=self.num_directions)
class BottleneckDistance(BaseEstimator, TransformerMixin):
"""
- This is a class for computing the bottleneck distance matrix from a list of persistence diagrams.
+ This is a class for computing the bottleneck distance matrix from a list of persistence diagrams.
+
+ :Requires: `CGAL <installation.html#cgal>`_ :math:`\geq` 4.11.0
"""
- def __init__(self, epsilon=None):
+ def __init__(self, epsilon=None, n_jobs=None):
"""
Constructor for the BottleneckDistance class.
Parameters:
epsilon (double): absolute (additive) error tolerated on the distance (default is the smallest positive float), see :func:`gudhi.bottleneck_distance`.
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_distances` for details.
"""
self.epsilon = epsilon
+ self.n_jobs = n_jobs
def fit(self, X, y=None):
"""
@@ -116,48 +277,42 @@ class BottleneckDistance(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise bottleneck distances.
"""
- num_diag1 = len(X)
-
- #if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
- if X is self.diagrams_:
- matrix = np.zeros((num_diag1, num_diag1))
-
- if USE_GUDHI:
- for i in range(num_diag1):
- for j in range(i+1, num_diag1):
- matrix[i,j] = bottleneck_distance(X[i], X[j], self.epsilon)
- matrix[j,i] = matrix[i,j]
- else:
- print("Gudhi built without CGAL: returning a null matrix")
-
- else:
- num_diag2 = len(self.diagrams_)
- matrix = np.zeros((num_diag1, num_diag2))
+ Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric="bottleneck", e=self.epsilon, n_jobs=self.n_jobs)
+ return Xfit
- if USE_GUDHI:
- for i in range(num_diag1):
- for j in range(num_diag2):
- matrix[i,j] = bottleneck_distance(X[i], self.diagrams_[j], self.epsilon)
- else:
- print("Gudhi built without CGAL: returning a null matrix")
+ def __call__(self, diag1, diag2):
+ """
+ Apply BottleneckDistance on a single pair of persistence diagrams and outputs the result.
- Xfit = matrix
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
- return Xfit
+ Returns:
+ float: bottleneck distance.
+ """
+ try:
+ from .. import bottleneck_distance
+ return bottleneck_distance(diag1, diag2, e=self.epsilon)
+ except ImportError:
+ print("Gudhi built without CGAL")
+ raise
class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
"""
This is a class for computing the persistence Fisher distance matrix from a list of persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details.
"""
- def __init__(self, bandwidth=1., kernel_approx=None):
+ def __init__(self, bandwidth=1., kernel_approx=None, n_jobs=None):
"""
Constructor for the PersistenceFisherDistance class.
Parameters:
bandwidth (double): bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions (default 1.).
kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_distances` for details.
"""
self.bandwidth, self.kernel_approx = bandwidth, kernel_approx
+ self.n_jobs = n_jobs
def fit(self, X, y=None):
"""
@@ -168,11 +323,6 @@ class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
y (n x 1 array): persistence diagram labels (unused).
"""
self.diagrams_ = X
- projection = (1./2) * np.ones((2,2))
- self.diagonal_projections_ = [np.matmul(X[i], projection) for i in range(len(X))]
- if self.kernel_approx is not None:
- self.approx_ = [self.kernel_approx.transform(X[i]) for i in range(len(X))]
- self.approx_diagonal_ = [self.kernel_approx.transform(self.diagonal_projections_[i]) for i in range(len(X))]
return self
def transform(self, X):
@@ -185,60 +335,92 @@ class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher distances.
"""
- Xfit = np.zeros((len(X), len(self.diagrams_)))
- if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
- for i in range(len(self.diagrams_)):
- for j in range(i+1, len(self.diagrams_)):
- if self.kernel_approx is not None:
- Z = np.concatenate([self.approx_[i], self.approx_diagonal_[i], self.approx_[j], self.approx_diagonal_[j]], axis=0)
- U, V = np.sum(np.concatenate([self.approx_[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], self.approx_diagonal_[i]], axis=0), axis=0)
- vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
- Xfit[j,i] = Xfit[i,j]
- else:
- Z = np.concatenate([self.diagrams_[i], self.diagonal_projections_[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0)
- U, V = np.concatenate([self.diagrams_[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], self.diagonal_projections_[i]], axis=0)
- vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
- Xfit[j,i] = Xfit[i,j]
+ return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="persistence_fisher", bandwidth=self.bandwidth, kernel_approx=self.kernel_approx, n_jobs=self.n_jobs)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceFisherDistance on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence Fisher distance.
+ """
+ return _persistence_fisher_distance(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
+
+
+class WassersteinDistance(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the Wasserstein distance matrix from a list of persistence diagrams.
+ """
+
+ def __init__(self, order=1, internal_p=np.inf, mode="hera", delta=0.01, n_jobs=None):
+ """
+ Constructor for the WassersteinDistance class.
+
+ Parameters:
+ order (int): exponent for Wasserstein, default value is 1., see :func:`gudhi.wasserstein.wasserstein_distance`.
+ internal_p (int): ground metric on the (upper-half) plane (i.e. norm l_p in R^2), default value is `np.inf`, see :func:`gudhi.wasserstein.wasserstein_distance`.
+ mode (str): method for computing Wasserstein distance. Either "pot" or "hera". Default set to "hera".
+ delta (float): relative error 1+delta. Used only if mode == "hera".
+ n_jobs (int): number of jobs to use for the computation. See :func:`pairwise_persistence_diagram_distances` for details.
+ """
+ self.order, self.internal_p, self.mode = order, internal_p, mode
+ if mode == "pot":
+ self.metric = "pot_wasserstein"
+ elif mode == "hera":
+ self.metric = "hera_wasserstein"
+ else:
+ raise NameError("Unknown mode. Current available values for mode are 'hera' and 'pot'")
+ self.delta = delta
+ self.n_jobs = n_jobs
+
+ def fit(self, X, y=None):
+ """
+ Fit the WassersteinDistance class on a list of persistence diagrams: persistence diagrams are stored in a numpy array called **diagrams**.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.diagrams_ = X
+ return self
+
+ def transform(self, X):
+ """
+ Compute all Wasserstein distances between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise Wasserstein distances.
+ """
+ if self.metric == "hera_wasserstein":
+ Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric=self.metric, order=self.order, internal_p=self.internal_p, delta=self.delta, n_jobs=self.n_jobs)
else:
- projection = (1./2) * np.ones((2,2))
- diagonal_projections = [np.matmul(X[i], projection) for i in range(len(X))]
- if self.kernel_approx is not None:
- approx = [self.kernel_approx.transform(X[i]) for i in range(len(X))]
- approx_diagonal = [self.kernel_approx.transform(diagonal_projections[i]) for i in range(len(X))]
- for i in range(len(X)):
- for j in range(len(self.diagrams_)):
- if self.kernel_approx is not None:
- Z = np.concatenate([approx[i], approx_diagonal[i], self.approx_[j], self.approx_diagonal_[j]], axis=0)
- U, V = np.sum(np.concatenate([approx[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], approx_diagonal[i]], axis=0), axis=0)
- vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
- else:
- Z = np.concatenate([X[i], diagonal_projections[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0)
- U, V = np.concatenate([X[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], diagonal_projections[i]], axis=0)
- vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric=self.metric, order=self.order, internal_p=self.internal_p, matching=False, n_jobs=self.n_jobs)
return Xfit
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply WassersteinDistance on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: Wasserstein distance.
+ """
+ if self.metric == "hera_wasserstein":
+ return hera_wasserstein_distance(diag1, diag2, order=self.order, internal_p=self.internal_p, delta=self.delta)
+ else:
+ try:
+ from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance
+ return pot_wasserstein_distance(diag1, diag2, order=self.order, internal_p=self.internal_p, matching=False)
+ except ImportError:
+ print("POT (Python Optimal Transport) is not installed. Please install POT or use metric='wasserstein' or metric='hera_wasserstein'")
+ raise
diff --git a/src/python/gudhi/representations/preprocessing.py b/src/python/gudhi/representations/preprocessing.py
index a39b00e4..8722e162 100644
--- a/src/python/gudhi/representations/preprocessing.py
+++ b/src/python/gudhi/representations/preprocessing.py
@@ -1,10 +1,11 @@
# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
-# Author(s): Mathieu Carrière
+# Author(s): Mathieu Carrière, Vincent Rouvreau
#
# Copyright (C) 2018-2019 Inria
#
# Modification(s):
+# - 2021/10 Vincent Rouvreau: Add DimensionSelector
# - YYYY/MM Author: Description of the modification
import numpy as np
@@ -54,6 +55,18 @@ class BirthPersistenceTransform(BaseEstimator, TransformerMixin):
Xfit.append(new_diag)
return Xfit
+ def __call__(self, diag):
+ """
+ Apply BirthPersistenceTransform on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: transformed persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class Clamping(BaseEstimator, TransformerMixin):
"""
This is a class for clamping values. It can be used as a parameter for the DiagramScaler class, for instance if you want to clamp abscissae or ordinates of persistence diagrams.
@@ -63,7 +76,7 @@ class Clamping(BaseEstimator, TransformerMixin):
Constructor for the Clamping class.
Parameters:
- limit (double): clamping value (default np.inf).
+ limit (float): clamping value (default np.inf).
"""
self.minimum = minimum
self.maximum = maximum
@@ -142,6 +155,18 @@ class DiagramScaler(BaseEstimator, TransformerMixin):
Xfit[i][:,I] = np.squeeze(scaler.transform(np.reshape(Xfit[i][:,I], [-1,1])))
return Xfit
+ def __call__(self, diag):
+ """
+ Apply DiagramScaler on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: transformed persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class Padding(BaseEstimator, TransformerMixin):
"""
This is a class for padding a list of persistence diagrams with dummy points, so that all persistence diagrams end up with the same number of points.
@@ -186,6 +211,18 @@ class Padding(BaseEstimator, TransformerMixin):
Xfit = X
return Xfit
+ def __call__(self, diag):
+ """
+ Apply Padding on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: padded persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class ProminentPoints(BaseEstimator, TransformerMixin):
"""
This is a class for removing points that are close or far from the diagonal in persistence diagrams. If persistence diagrams are n x 2 numpy arrays (i.e. persistence diagrams with ordinary features), points are ordered and thresholded by distance-to-diagonal. If persistence diagrams are n x 1 numpy arrays (i.e. persistence diagrams with essential features), points are not ordered and thresholded by first coordinate.
@@ -198,7 +235,7 @@ class ProminentPoints(BaseEstimator, TransformerMixin):
use (bool): whether to use the class or not (default False).
location (string): either "upper" or "lower" (default "upper"). Whether to keep the points that are far away ("upper") or close ("lower") to the diagonal.
num_pts (int): cardinality threshold (default 10). If location == "upper", keep the top **num_pts** points that are the farthest away from the diagonal. If location == "lower", keep the top **num_pts** points that are the closest to the diagonal.
- threshold (double): distance-to-diagonal threshold (default -1). If location == "upper", keep the points that are at least at a distance **threshold** from the diagonal. If location == "lower", keep the points that are at most at a distance **threshold** from the diagonal.
+ threshold (float): distance-to-diagonal threshold (default -1). If location == "upper", keep the points that are at least at a distance **threshold** from the diagonal. If location == "lower", keep the points that are at most at a distance **threshold** from the diagonal.
"""
self.num_pts = num_pts
self.threshold = threshold
@@ -259,6 +296,18 @@ class ProminentPoints(BaseEstimator, TransformerMixin):
Xfit = X
return Xfit
+ def __call__(self, diag):
+ """
+ Apply ProminentPoints on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: thresholded persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class DiagramSelector(BaseEstimator, TransformerMixin):
"""
This is a class for extracting finite or essential points in persistence diagrams.
@@ -269,7 +318,7 @@ class DiagramSelector(BaseEstimator, TransformerMixin):
Parameters:
use (bool): whether to use the class or not (default False).
- limit (double): second coordinate value that is the criterion for being an essential point (default numpy.inf).
+ limit (float): second coordinate value that is the criterion for being an essential point (default numpy.inf).
point_type (string): either "finite" or "essential". The type of the points that are going to be extracted.
"""
self.use, self.limit, self.point_type = use, limit, point_type
@@ -303,3 +352,63 @@ class DiagramSelector(BaseEstimator, TransformerMixin):
else:
Xfit = X
return Xfit
+
+ def __call__(self, diag):
+ """
+ Apply DiagramSelector on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: extracted persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
+
+# Mermaid sequence diagram - https://mermaid-js.github.io/mermaid-live-editor/
+# sequenceDiagram
+# USER->>DimensionSelector: fit_transform(<br/>[[array( Hi(X0) ), array( Hj(X0) ), ...],<br/> [array( Hi(X1) ), array( Hj(X1) ), ...],<br/> ...])
+# DimensionSelector->>thread1: _transform([array( Hi(X0) ), array( Hj(X0) )], ...)
+# DimensionSelector->>thread2: _transform([array( Hi(X1) ), array( Hj(X1) )], ...)
+# Note right of DimensionSelector: ...
+# thread1->>DimensionSelector: array( Hn(X0) )
+# thread2->>DimensionSelector: array( Hn(X1) )
+# Note right of DimensionSelector: ...
+# DimensionSelector->>USER: [array( Hn(X0) ), <br/> array( Hn(X1) ), <br/> ...]
+
+class DimensionSelector(BaseEstimator, TransformerMixin):
+ """
+ This is a class to select persistence diagrams in a specific dimension from its index.
+ """
+
+ def __init__(self, index=0):
+ """
+ Constructor for the DimensionSelector class.
+
+ Parameters:
+ index (int): The returned persistence diagrams dimension index. Default value is `0`.
+ """
+ self.index = index
+
+ def fit(self, X, Y=None):
+ """
+ Nothing to be done, but useful when included in a scikit-learn Pipeline.
+ """
+ return self
+
+ def transform(self, X, Y=None):
+ """
+ Select persistence diagrams from its dimension.
+
+ Parameters:
+ X (list of list of tuple): List of list of persistence pairs, i.e.
+ `[[array( Hi(X0) ), array( Hj(X0) ), ...], [array( Hi(X1) ), array( Hj(X1) ), ...], ...]`
+
+ Returns:
+ list of tuple:
+ Persistence diagrams in a specific dimension. i.e. if `index` was set to `m` and `Hn` is at index `m` of
+ the input, it returns `[array( Hn(X0) ), array( Hn(X1), ...]`
+ """
+
+ return [persistence[self.index] for persistence in X]
diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py
index fe26dbe2..ce74aee5 100644
--- a/src/python/gudhi/representations/vector_methods.py
+++ b/src/python/gudhi/representations/vector_methods.py
@@ -1,16 +1,25 @@
# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
-# Author(s): Mathieu Carrière
+# Author(s): Mathieu Carrière, Martin Royer, Gard Spreemann
#
-# Copyright (C) 2018-2019 Inria
+# Copyright (C) 2018-2020 Inria
#
# Modification(s):
-# - YYYY/MM Author: Description of the modification
+# - 2020/06 Martin: ATOL integration
+# - 2020/12 Gard: A more flexible Betti curve class capable of computing exact curves.
+# - 2021/11 Vincent Rouvreau: factorize _automatic_sample_range
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
+from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler
-from sklearn.neighbors import DistanceMetric
+from sklearn.metrics import pairwise
+try:
+ # New location since 1.0
+ from sklearn.metrics import DistanceMetric
+except ImportError:
+ # Will be removed in 1.3
+ from sklearn.neighbors import DistanceMetric
from .preprocessing import DiagramScaler, BirthPersistenceTransform
@@ -44,10 +53,14 @@ class PersistenceImage(BaseEstimator, TransformerMixin):
y (n x 1 array): persistence diagram labels (unused).
"""
if np.isnan(np.array(self.im_range)).any():
- new_X = BirthPersistenceTransform().fit_transform(X)
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(new_X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.im_range = np.where(np.isnan(np.array(self.im_range)), np.array([mx, Mx, my, My]), np.array(self.im_range))
+ try:
+ new_X = BirthPersistenceTransform().fit_transform(X)
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(new_X,y)
+ [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ self.im_range = np.where(np.isnan(np.array(self.im_range)), np.array([mx, Mx, my, My]), np.array(self.im_range))
+ except ValueError:
+ # Empty persistence diagram case - https://github.com/GUDHI/gudhi-devel/issues/507
+ pass
return self
def transform(self, X):
@@ -77,15 +90,73 @@ class PersistenceImage(BaseEstimator, TransformerMixin):
Xfit.append(image.flatten()[np.newaxis,:])
- Xfit = np.concatenate(Xfit,0)
+ Xfit = np.concatenate(Xfit, 0)
return Xfit
+ def __call__(self, diag):
+ """
+ Apply PersistenceImage on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (number of pixels = **resolution[0]** x **resolution[1]**):: output persistence image.
+ """
+ return self.fit_transform([diag])[0,:]
+
+def _automatic_sample_range(sample_range, X):
+ """
+ Compute and returns sample range from the persistence diagrams if one of the sample_range values is numpy.nan.
+
+ Parameters:
+ sample_range (a numpy array of 2 float): minimum and maximum of all piecewise-linear function domains, of
+ the form [x_min, x_max].
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ nan_in_range = np.isnan(sample_range)
+ if nan_in_range.any():
+ try:
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X)
+ [mx,my] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]]
+ [Mx,My] = [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ return np.where(nan_in_range, np.array([mx, My]), sample_range)
+ except ValueError:
+ # Empty persistence diagram case - https://github.com/GUDHI/gudhi-devel/issues/507
+ pass
+ return sample_range
+
+
+def _trim_endpoints(x, are_endpoints_nan):
+ if are_endpoints_nan[0]:
+ x = x[1:]
+ if are_endpoints_nan[1]:
+ x = x[:-1]
+ return x
+
+
+def _grid_from_sample_range(self, X):
+ sample_range = np.array(self.sample_range)
+ self.nan_in_range = np.isnan(sample_range)
+ self.new_resolution = self.resolution
+ if not self.keep_endpoints:
+ self.new_resolution += self.nan_in_range.sum()
+ self.sample_range_fixed = _automatic_sample_range(sample_range, X)
+ self.grid_ = np.linspace(self.sample_range_fixed[0], self.sample_range_fixed[1], self.new_resolution)
+ if not self.keep_endpoints:
+ self.grid_ = _trim_endpoints(self.grid_, self.nan_in_range)
+
+
class Landscape(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence landscapes from a list of persistence diagrams. A persistence landscape is a collection of 1D piecewise-linear functions computed from the rank function associated to the persistence diagram. These piecewise-linear functions are then sampled evenly on a given range and the corresponding vectors of samples are concatenated and returned. See http://jmlr.org/papers/v16/bubenik15a.html for more details.
+
+ Attributes:
+ grid_ (1d array): The grid on which the landscapes are computed.
"""
- def __init__(self, num_landscapes=5, resolution=100, sample_range=[np.nan, np.nan]):
+ def __init__(self, num_landscapes=5, resolution=100, sample_range=[np.nan, np.nan], *, keep_endpoints=False):
"""
Constructor for the Landscape class.
@@ -93,10 +164,10 @@ class Landscape(BaseEstimator, TransformerMixin):
num_landscapes (int): number of piecewise-linear functions to output (default 5).
resolution (int): number of sample for all piecewise-linear functions (default 100).
sample_range ([double, double]): minimum and maximum of all piecewise-linear function domains, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ keep_endpoints (bool): when computing `sample_range`, use the exact extremities (where the value is always 0). This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
self.num_landscapes, self.resolution, self.sample_range = num_landscapes, resolution, sample_range
- self.nan_in_range = np.isnan(np.array(self.sample_range))
- self.new_resolution = self.resolution + self.nan_in_range.sum()
+ self.keep_endpoints = keep_endpoints
def fit(self, X, y=None):
"""
@@ -106,10 +177,7 @@ class Landscape(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- if self.nan_in_range.any():
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.sample_range = np.where(self.nan_in_range, np.array([mx, My]), np.array(self.sample_range))
+ _grid_from_sample_range(self, X)
return self
def transform(self, X):
@@ -122,59 +190,47 @@ class Landscape(BaseEstimator, TransformerMixin):
Returns:
numpy array with shape (number of diagrams) x (number of samples = **num_landscapes** x **resolution**): output persistence landscapes.
"""
- num_diag, Xfit = len(X), []
- x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.new_resolution)
- step_x = x_values[1] - x_values[0]
- for i in range(num_diag):
-
- diagram, num_pts_in_diag = X[i], X[i].shape[0]
-
- ls = np.zeros([self.num_landscapes, self.new_resolution])
-
- events = []
- for j in range(self.new_resolution):
- events.append([])
-
- for j in range(num_pts_in_diag):
- [px,py] = diagram[j,:2]
- min_idx = np.clip(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0, self.new_resolution)
- mid_idx = np.clip(np.ceil((0.5*(py+px) - self.sample_range[0]) / step_x).astype(int), 0, self.new_resolution)
- max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.new_resolution)
-
- if min_idx < self.new_resolution and max_idx > 0:
-
- landscape_value = self.sample_range[0] + min_idx * step_x - px
- for k in range(min_idx, mid_idx):
- events[k].append(landscape_value)
- landscape_value += step_x
+ Xfit = []
+ x_values = self.grid_
+ for diag in X:
+ midpoints, heights = (diag[:, 0] + diag[:, 1]) / 2., (diag[:, 1] - diag[:, 0]) / 2.
+ tent_functions = np.maximum(heights[None, :] - np.abs(x_values[:, None] - midpoints[None, :]), 0)
+ n_points = diag.shape[0]
+ # Complete the array with zeros to get the right number of landscapes
+ if self.num_landscapes > n_points:
+ tent_functions = np.concatenate(
+ [tent_functions, np.zeros((tent_functions.shape[0], self.num_landscapes-n_points))],
+ axis=1
+ )
+ tent_functions.partition(tent_functions.shape[1]-self.num_landscapes, axis=1)
+ landscapes = np.sort(tent_functions[:, -self.num_landscapes:], axis=1)[:, ::-1].T
- landscape_value = py - self.sample_range[0] - mid_idx * step_x
- for k in range(mid_idx, max_idx):
- events[k].append(landscape_value)
- landscape_value -= step_x
+ landscapes = np.sqrt(2) * np.ravel(landscapes)
+ Xfit.append(landscapes)
- for j in range(self.new_resolution):
- events[j].sort(reverse=True)
- for k in range( min(self.num_landscapes, len(events[j])) ):
- ls[k,j] = events[j][k]
+ return np.stack(Xfit, axis=0)
- if self.nan_in_range[0]:
- ls = ls[:,1:]
- if self.nan_in_range[1]:
- ls = ls[:,:-1]
- ls = np.sqrt(2)*np.reshape(ls,[1,-1])
- Xfit.append(ls)
+ def __call__(self, diag):
+ """
+ Apply Landscape on a single persistence diagram and outputs the result.
- Xfit = np.concatenate(Xfit,0)
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
- return Xfit
+ Returns:
+ numpy array with shape (number of samples = **num_landscapes** x **resolution**): output persistence landscape.
+ """
+ return self.fit_transform([diag])[0, :]
class Silhouette(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence silhouettes from a list of persistence diagrams. A persistence silhouette is computed by taking a weighted average of the collection of 1D piecewise-linear functions given by the persistence landscapes, and then by evenly sampling this average on a given range. Finally, the corresponding vector of samples is returned. See https://arxiv.org/abs/1312.0308 for more details.
+
+ Attributes:
+ grid_ (1d array): The grid on which the silhouette is computed.
"""
- def __init__(self, weight=lambda x: 1, resolution=100, sample_range=[np.nan, np.nan]):
+ def __init__(self, weight=lambda x: 1, resolution=100, sample_range=[np.nan, np.nan], *, keep_endpoints=False):
"""
Constructor for the Silhouette class.
@@ -182,8 +238,10 @@ class Silhouette(BaseEstimator, TransformerMixin):
weight (function): weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie on lists or numpy arrays of the form [p_x,p_y].
resolution (int): number of samples for the weighted average (default 100).
sample_range ([double, double]): minimum and maximum for the weighted average domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ keep_endpoints (bool): when computing `sample_range`, use the exact extremities (where the value is always 0). This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
self.weight, self.resolution, self.sample_range = weight, resolution, sample_range
+ self.keep_endpoints = keep_endpoints
def fit(self, X, y=None):
"""
@@ -193,10 +251,7 @@ class Silhouette(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- if np.isnan(np.array(self.sample_range)).any():
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ _grid_from_sample_range(self, X)
return self
def transform(self, X):
@@ -209,110 +264,200 @@ class Silhouette(BaseEstimator, TransformerMixin):
Returns:
numpy array with shape (number of diagrams) x (**resolution**): output persistence silhouettes.
"""
- num_diag, Xfit = len(X), []
- x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
- step_x = x_values[1] - x_values[0]
+ Xfit = []
+ x_values = self.grid_
- for i in range(num_diag):
+ for diag in X:
+ midpoints, heights = (diag[:, 0] + diag[:, 1]) / 2., (diag[:, 1] - diag[:, 0]) / 2.
+ weights = np.array([self.weight(pt) for pt in diag])
+ total_weight = np.sum(weights)
- diagram, num_pts_in_diag = X[i], X[i].shape[0]
+ tent_functions = np.maximum(heights[None, :] - np.abs(x_values[:, None] - midpoints[None, :]), 0)
+ silhouette = np.sum(weights[None, :] / total_weight * tent_functions, axis=1)
+ Xfit.append(silhouette * np.sqrt(2))
- sh, weights = np.zeros(self.resolution), np.zeros(num_pts_in_diag)
- for j in range(num_pts_in_diag):
- weights[j] = self.weight(diagram[j,:])
- total_weight = np.sum(weights)
+ return np.stack(Xfit, axis=0)
- for j in range(num_pts_in_diag):
+ def __call__(self, diag):
+ """
+ Apply Silhouette on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**resolution**): output persistence silhouette.
+ """
+ return self.fit_transform([diag])[0,:]
- [px,py] = diagram[j,:2]
- weight = weights[j] / total_weight
- min_idx = np.clip(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- mid_idx = np.clip(np.ceil((0.5*(py+px) - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- if min_idx < self.resolution and max_idx > 0:
+class BettiCurve(BaseEstimator, TransformerMixin):
+ """
+ Compute Betti curves from persistence diagrams. There are several modes of operation: with a given resolution (with or without a sample_range), with a predefined grid, and with none of the previous. With a predefined grid, the class computes the Betti numbers at those grid points. Without a predefined grid, if the resolution is set to None, it can be fit to a list of persistence diagrams and produce a grid that consists of (at least) the filtration values at which at least one of those persistence diagrams changes Betti numbers, and then compute the Betti numbers at those grid points. In the latter mode, the exact Betti curve is computed for the entire real line. Otherwise, if the resolution is given, the Betti curve is obtained by sampling evenly using either the given sample_range or based on the persistence diagrams.
- silhouette_value = self.sample_range[0] + min_idx * step_x - px
- for k in range(min_idx, mid_idx):
- sh[k] += weight * silhouette_value
- silhouette_value += step_x
+ Examples
+ --------
+ If pd is a persistence diagram and xs is a nonempty grid of finite values such that xs[0] >= pd.min(), then the results of:
- silhouette_value = py - self.sample_range[0] - mid_idx * step_x
- for k in range(mid_idx, max_idx):
- sh[k] += weight * silhouette_value
- silhouette_value -= step_x
+ >>> bc = BettiCurve(predefined_grid=xs) # doctest: +SKIP
+ >>> result = bc(pd) # doctest: +SKIP
- Xfit.append(np.reshape(np.sqrt(2) * sh, [1,-1]))
+ and
- Xfit = np.concatenate(Xfit, 0)
+ >>> from scipy.interpolate import interp1d # doctest: +SKIP
+ >>> bc = BettiCurve(resolution=None, predefined_grid=None) # doctest: +SKIP
+ >>> bettis = bc.fit_transform([pd]) # doctest: +SKIP
+ >>> interp = interp1d(bc.grid_, bettis[0, :], kind="previous", fill_value="extrapolate") # doctest: +SKIP
+ >>> result = np.array(interp(xs), dtype=int) # doctest: +SKIP
- return Xfit
+ are the same.
-class BettiCurve(BaseEstimator, TransformerMixin):
+ Attributes
+ ----------
+ grid_ : 1d array
+ The grid on which the Betti numbers are computed. If predefined_grid was specified, `grid_` will always be that grid, independently of data. If not and resolution is None, the grid is fitted to capture all filtration values at which the Betti numbers change.
"""
- This is a class for computing Betti curves from a list of persistence diagrams. A Betti curve is a 1D piecewise-constant function obtained from the rank function. It is sampled evenly on a given range and the vector of samples is returned. See https://www.researchgate.net/publication/316604237_Time_Series_Classification_via_Topological_Data_Analysis for more details.
- """
- def __init__(self, resolution=100, sample_range=[np.nan, np.nan]):
+
+ def __init__(self, resolution=100, sample_range=[np.nan, np.nan], predefined_grid=None, *, keep_endpoints=False):
"""
Constructor for the BettiCurve class.
Parameters:
- resolution (int): number of sample for the piecewise-constant function (default 100).
+ resolution (int): number of samples for the piecewise-constant function (default 100), or None for the exact curve.
sample_range ([double, double]): minimum and maximum of the piecewise-constant function domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ predefined_grid (1d array or None, default=None): Predefined filtration grid points at which to compute the Betti curves. Must be strictly ordered. Infinities are ok. If None (default), and resolution is given, the grid will be uniform from x_min to x_max in 'resolution' steps, otherwise a grid will be computed that captures all changes in Betti numbers in the provided data.
+ keep_endpoints (bool): when computing `sample_range` (fixed `resolution`, no `predefined_grid`), use the exact extremities. This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
- self.resolution, self.sample_range = resolution, sample_range
- def fit(self, X, y=None):
+ if (predefined_grid is not None) and (not isinstance(predefined_grid, np.ndarray)):
+ raise ValueError("Expected predefined_grid as array or None.")
+
+ self.predefined_grid = predefined_grid
+ self.resolution = resolution
+ self.sample_range = sample_range
+ self.keep_endpoints = keep_endpoints
+
+ def is_fitted(self):
+ return hasattr(self, "grid_")
+
+ def fit(self, X, y = None):
"""
- Fit the BettiCurve class on a list of persistence diagrams: if any of the values in **sample_range** is numpy.nan, replace it with the corresponding value computed on the given list of persistence diagrams.
+ Fit the BettiCurve class on a list of persistence diagrams: if any of the values in **sample_range** is numpy.nan, replace it with the corresponding value computed on the given list of persistence diagrams. When no predefined grid is provided and resolution set to None, compute a filtration grid that captures all changes in Betti numbers for all the given persistence diagrams.
Parameters:
- X (list of n x 2 numpy arrays): input persistence diagrams.
- y (n x 1 array): persistence diagram labels (unused).
+ X (list of 2d arrays): Persistence diagrams.
+ y (None): Ignored.
"""
- if np.isnan(np.array(self.sample_range)).any():
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+
+ if self.predefined_grid is None:
+ if self.resolution is None: # Flexible/exact version
+ events = np.unique(np.concatenate([pd.flatten() for pd in X] + [[-np.inf]], axis=0))
+ self.grid_ = np.array(events)
+ else:
+ _grid_from_sample_range(self, X)
+ else:
+ self.grid_ = self.predefined_grid # Get the predefined grid from user
+
return self
def transform(self, X):
"""
- Compute the Betti curve for each persistence diagram individually and concatenate the results.
+ Compute Betti curves.
Parameters:
- X (list of n x 2 numpy arrays): input persistence diagrams.
-
+ X (list of 2d arrays): Persistence diagrams.
+
Returns:
- numpy array with shape (number of diagrams) x (**resolution**): output Betti curves.
+ `len(X).len(self.grid_)` array of ints: Betti numbers of the given persistence diagrams at the grid points given in `self.grid_`
"""
- num_diag, Xfit = len(X), []
- x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
- step_x = x_values[1] - x_values[0]
- for i in range(num_diag):
+ if not self.is_fitted():
+ raise NotFittedError("Not fitted.")
- diagram, num_pts_in_diag = X[i], X[i].shape[0]
+ if not X:
+ X = [np.zeros((0, 2))]
+
+ N = len(X)
- bc = np.zeros(self.resolution)
- for j in range(num_pts_in_diag):
- [px,py] = diagram[j,:2]
- min_idx = np.clip(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- for k in range(min_idx, max_idx):
- bc[k] += 1
+ events = np.concatenate([pd.flatten(order="F") for pd in X], axis=0)
+ sorting = np.argsort(events)
+ offsets = np.zeros(1 + N, dtype=int)
+ for i in range(0, N):
+ offsets[i+1] = offsets[i] + 2*X[i].shape[0]
+ starts = offsets[0:N]
+ ends = offsets[1:N + 1] - 1
- Xfit.append(np.reshape(bc,[1,-1]))
+ bettis = [[0] for i in range(0, N)]
+
+ i = 0
+ for x in self.grid_:
+ while i < len(sorting) and events[sorting[i]] <= x:
+ j = np.searchsorted(ends, sorting[i])
+ delta = 1 if sorting[i] - starts[j] < len(X[j]) else -1
+ bettis[j][-1] += delta
+ i += 1
+ for k in range(0, N):
+ bettis[k].append(bettis[k][-1])
+
+ return np.array(bettis, dtype=int)[:, 0:-1]
+
+ def fit_transform(self, X):
+ """
+ The result is the same as fit(X) followed by transform(X), but potentially faster.
+ """
+
+ if self.predefined_grid is None and self.resolution is None:
+ if not X:
+ X = [np.zeros((0, 2))]
+
+ N = len(X)
+
+ events = np.concatenate([pd.flatten(order="F") for pd in X], axis=0)
+ sorting = np.argsort(events)
+ offsets = np.zeros(1 + N, dtype=int)
+ for i in range(0, N):
+ offsets[i+1] = offsets[i] + 2*X[i].shape[0]
+ starts = offsets[0:N]
+ ends = offsets[1:N + 1] - 1
+
+ xs = [-np.inf]
+ bettis = [[0] for i in range(0, N)]
+
+ for i in sorting:
+ j = np.searchsorted(ends, i)
+ delta = 1 if i - starts[j] < len(X[j]) else -1
+ if events[i] == xs[-1]:
+ bettis[j][-1] += delta
+ else:
+ xs.append(events[i])
+ for k in range(0, j):
+ bettis[k].append(bettis[k][-1])
+ bettis[j].append(bettis[j][-1] + delta)
+ for k in range(j+1, N):
+ bettis[k].append(bettis[k][-1])
+
+ self.grid_ = np.array(xs)
+ return np.array(bettis, dtype=int)
+
+ else:
+ return self.fit(X).transform(X)
+
+ def __call__(self, diag):
+ """
+ Shorthand for transform on a single persistence diagram.
+ """
+ return self.fit_transform([diag])[0, :]
- Xfit = np.concatenate(Xfit, 0)
- return Xfit
class Entropy(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence entropy. Persistence entropy is a statistic for persistence diagrams inspired from Shannon entropy. This statistic can also be used to compute a feature vector, called the entropy summary function. See https://arxiv.org/pdf/1803.08304.pdf for more details. Note that a previous implementation was contributed by Manuel Soriano-Trigueros.
+
+ Attributes:
+ grid_ (1d array): In vector mode, the grid on which the entropy summary function is computed.
"""
- def __init__(self, mode="scalar", normalized=True, resolution=100, sample_range=[np.nan, np.nan]):
+ def __init__(self, mode="scalar", normalized=True, resolution=100, sample_range=[np.nan, np.nan], *, keep_endpoints=False):
"""
Constructor for the Entropy class.
@@ -321,8 +466,10 @@ class Entropy(BaseEstimator, TransformerMixin):
normalized (bool): whether to normalize the entropy summary function (default True). Used only if **mode** = "vector".
resolution (int): number of sample for the entropy summary function (default 100). Used only if **mode** = "vector".
sample_range ([double, double]): minimum and maximum of the entropy summary function domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn evenly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method. Used only if **mode** = "vector".
+ keep_endpoints (bool): when computing `sample_range`, use the exact extremities. This is mostly useful for plotting, the default is to use a slightly smaller range.
"""
self.mode, self.normalized, self.resolution, self.sample_range = mode, normalized, resolution, sample_range
+ self.keep_endpoints = keep_endpoints
def fit(self, X, y=None):
"""
@@ -332,10 +479,9 @@ class Entropy(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- if np.isnan(np.array(self.sample_range)).any():
- pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
- [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
- self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ if self.mode == "vector":
+ _grid_from_sample_range(self, X)
+ self.step_ = self.grid_[1] - self.grid_[0]
return self
def transform(self, X):
@@ -349,34 +495,41 @@ class Entropy(BaseEstimator, TransformerMixin):
numpy array with shape (number of diagrams) x (1 if **mode** = "scalar" else **resolution**): output entropy.
"""
num_diag, Xfit = len(X), []
- x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
- step_x = x_values[1] - x_values[0]
new_X = BirthPersistenceTransform().fit_transform(X)
for i in range(num_diag):
-
- orig_diagram, diagram, num_pts_in_diag = X[i], new_X[i], X[i].shape[0]
- new_diagram = DiagramScaler(use=True, scalers=[([1], MaxAbsScaler())]).fit_transform([diagram])[0]
-
+ orig_diagram, new_diagram, num_pts_in_diag = X[i], new_X[i], X[i].shape[0]
+
+ p = new_diagram[:,1]
+ p = p/np.sum(p)
if self.mode == "scalar":
- ent = - np.sum( np.multiply(new_diagram[:,1], np.log(new_diagram[:,1])) )
+ ent = -np.dot(p, np.log(p))
Xfit.append(np.array([[ent]]))
-
else:
ent = np.zeros(self.resolution)
for j in range(num_pts_in_diag):
[px,py] = orig_diagram[j,:2]
- min_idx = np.clip(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- max_idx = np.clip(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0, self.resolution)
- for k in range(min_idx, max_idx):
- ent[k] += (-1) * new_diagram[j,1] * np.log(new_diagram[j,1])
- if self.normalized:
- ent = ent / np.linalg.norm(ent, ord=1)
- Xfit.append(np.reshape(ent,[1,-1]))
+ min_idx = np.clip(np.ceil((px - self.sample_range_fixed[0]) / self.step_).astype(int), 0, self.resolution)
+ max_idx = np.clip(np.ceil((py - self.sample_range_fixed[0]) / self.step_).astype(int), 0, self.resolution)
+ ent[min_idx:max_idx]-=p[j]*np.log(p[j])
+ if self.normalized:
+ ent = ent / np.linalg.norm(ent, ord=1)
+ Xfit.append(np.reshape(ent,[1,-1]))
+
+ Xfit = np.concatenate(Xfit, axis=0)
+ return Xfit
- Xfit = np.concatenate(Xfit, 0)
+ def __call__(self, diag):
+ """
+ Apply Entropy on a single persistence diagram and outputs the result.
- return Xfit
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (1 if **mode** = "scalar" else **resolution**): output entropy.
+ """
+ return self.fit_transform([diag])[0,:]
class TopologicalVector(BaseEstimator, TransformerMixin):
"""
@@ -424,13 +577,31 @@ class TopologicalVector(BaseEstimator, TransformerMixin):
diagram, num_pts_in_diag = X[i], X[i].shape[0]
pers = 0.5 * (diagram[:,1]-diagram[:,0])
min_pers = np.minimum(pers,np.transpose(pers))
- distances = DistanceMetric.get_metric("chebyshev").pairwise(diagram)
+ # Works fine with sklearn 1.0, but an ValueError exception is thrown on past versions
+ try:
+ distances = DistanceMetric.get_metric("chebyshev").pairwise(diagram)
+ except ValueError:
+ # Empty persistence diagram case - https://github.com/GUDHI/gudhi-devel/issues/507
+ assert len(diagram) == 0
+ distances = np.empty(shape = [0, 0])
vect = np.flip(np.sort(np.triu(np.minimum(distances, min_pers)), axis=None), 0)
dim = min(len(vect), thresh)
Xfit[i, :dim] = vect[:dim]
return Xfit
+ def __call__(self, diag):
+ """
+ Apply TopologicalVector on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**threshold**): output topological vector.
+ """
+ return self.fit_transform([diag])[0,:]
+
class ComplexPolynomial(BaseEstimator, TransformerMixin):
"""
This is a class for computing complex polynomials from a list of persistence diagrams. The persistence diagram points are seen as the roots of some complex polynomial, whose coefficients are returned in a complex vector. See https://link.springer.com/chapter/10.1007%2F978-3-319-23231-7_27 for more details.
@@ -490,3 +661,155 @@ class ComplexPolynomial(BaseEstimator, TransformerMixin):
coeff = np.array(coeff[::-1])[1:]
Xfit[d, :min(thresh, coeff.shape[0])] = coeff[:min(thresh, coeff.shape[0])]
return Xfit
+
+ def __call__(self, diag):
+ """
+ Apply ComplexPolynomial on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**threshold**): output complex vector of coefficients.
+ """
+ return self.fit_transform([diag])[0,:]
+
+def _lapl_contrast(measure, centers, inertias):
+ """contrast function for vectorising `measure` in ATOL"""
+ return np.exp(-pairwise.pairwise_distances(measure, Y=centers) / inertias)
+
+def _gaus_contrast(measure, centers, inertias):
+ """contrast function for vectorising `measure` in ATOL"""
+ return np.exp(-pairwise.pairwise_distances(measure, Y=centers, squared=True) / inertias**2)
+
+def _indicator_contrast(diags, centers, inertias):
+ """contrast function for vectorising `measure` in ATOL"""
+ robe_curve = np.clip(2-pairwise.pairwise_distances(diags, Y=centers)/inertias, 0, 1)
+ return robe_curve
+
+def _cloud_weighting(measure):
+ """automatic uniform weighting with mass 1 for `measure` in ATOL"""
+ return np.ones(shape=measure.shape[0])
+
+def _iidproba_weighting(measure):
+ """automatic uniform weighting with mass 1/N for `measure` in ATOL"""
+ return np.ones(shape=measure.shape[0]) / measure.shape[0]
+
+class Atol(BaseEstimator, TransformerMixin):
+ """
+ This class allows to vectorise measures (e.g. point clouds, persistence diagrams, etc) after a quantisation step.
+
+ ATOL paper: :cite:`royer2019atol`
+
+ Example
+ --------
+ >>> from sklearn.cluster import KMeans
+ >>> from gudhi.representations.vector_methods import Atol
+ >>> import numpy as np
+ >>> a = np.array([[1, 2, 4], [1, 4, 0], [1, 0, 4]])
+ >>> b = np.array([[4, 2, 0], [4, 4, 0], [4, 0, 2]])
+ >>> c = np.array([[3, 2, -1], [1, 2, -1]])
+ >>> atol_vectoriser = Atol(quantiser=KMeans(n_clusters=2, random_state=202006))
+ >>> atol_vectoriser.fit(X=[a, b, c]).centers
+ array([[ 2.6 , 2.8 , -0.4 ],
+ [ 2. , 0.66666667, 3.33333333]])
+ >>> atol_vectoriser(a)
+ array([0.42375966, 1.18168665])
+ >>> atol_vectoriser(c)
+ array([1.25157463, 0.02062512])
+ >>> atol_vectoriser.transform(X=[a, b, c])
+ array([[0.42375966, 1.18168665],
+ [1.06330156, 0.29861028],
+ [1.25157463, 0.02062512]])
+ """
+ # Note the example above must be up to date with the one in tests called test_atol_doc
+ def __init__(self, quantiser, weighting_method="cloud", contrast="gaussian"):
+ """
+ Constructor for the Atol measure vectorisation class.
+
+ Parameters:
+ quantiser (Object): Object with `fit` (sklearn API consistent) and `cluster_centers` and `n_clusters`
+ attributes, e.g. sklearn.cluster.KMeans. It will be fitted when the Atol object function `fit` is called.
+ weighting_method (string): constant generic function for weighting the measure points
+ choose from {"cloud", "iidproba"}
+ (default: constant function, i.e. the measure is seen as a point cloud by default).
+ This will have no impact if weights are provided along with measures all the way: `fit` and `transform`.
+ contrast (string): constant function for evaluating proximity of a measure with respect to centers
+ choose from {"gaussian", "laplacian", "indicator"}
+ (default: gaussian contrast function, see page 3 in the ATOL paper).
+ """
+ self.quantiser = quantiser
+ self.contrast = {
+ "gaussian": _gaus_contrast,
+ "laplacian": _lapl_contrast,
+ "indicator": _indicator_contrast,
+ }.get(contrast, _gaus_contrast)
+ self.weighting_method = {
+ "cloud" : _cloud_weighting,
+ "iidproba": _iidproba_weighting,
+ }.get(weighting_method, _cloud_weighting)
+
+ def fit(self, X, y=None, sample_weight=None):
+ """
+ Calibration step: fit centers to the sample measures and derive inertias between centers.
+
+ Parameters:
+ X (list N x d numpy arrays): input measures in R^d from which to learn center locations and inertias
+ (measures can have different N).
+ y: Ignored, present for API consistency by convention.
+ sample_weight (list of numpy arrays): weights for each measure point in X, optional.
+ If None, the object's weighting_method will be used.
+
+ Returns:
+ self
+ """
+ if not hasattr(self.quantiser, 'fit'):
+ raise TypeError("quantiser %s has no `fit` attribute." % (self.quantiser))
+ if sample_weight is None:
+ sample_weight = np.concatenate([self.weighting_method(measure) for measure in X])
+
+ measures_concat = np.concatenate(X)
+ self.quantiser.fit(X=measures_concat, sample_weight=sample_weight)
+ self.centers = self.quantiser.cluster_centers_
+ # Hack, but some people are unhappy if the order depends on the version of sklearn
+ self.centers = self.centers[np.lexsort(self.centers.T)]
+ if self.quantiser.n_clusters == 1:
+ dist_centers = pairwise.pairwise_distances(measures_concat)
+ np.fill_diagonal(dist_centers, 0)
+ self.inertias = np.array([np.max(dist_centers)/2])
+ else:
+ dist_centers = pairwise.pairwise_distances(self.centers)
+ dist_centers[dist_centers == 0] = np.inf
+ self.inertias = np.min(dist_centers, axis=0)/2
+ return self
+
+ def __call__(self, measure, sample_weight=None):
+ """
+ Apply measure vectorisation on a single measure.
+
+ Parameters:
+ measure (n x d numpy array): input measure in R^d.
+
+ Returns:
+ numpy array in R^self.quantiser.n_clusters.
+ """
+ if sample_weight is None:
+ sample_weight = self.weighting_method(measure)
+ return np.sum(sample_weight * self.contrast(measure, self.centers, self.inertias.T).T, axis=1)
+
+ def transform(self, X, sample_weight=None):
+ """
+ Apply measure vectorisation on a list of measures.
+
+ Parameters:
+ X (list N x d numpy arrays): input measures in R^d from which to learn center locations and inertias
+ (measures can have different N).
+ sample_weight (list of numpy arrays): weights for each measure point in X, optional.
+ If None, the object's weighting_method will be used.
+
+ Returns:
+ numpy array with shape (number of measures) x (self.quantiser.n_clusters).
+ """
+ if sample_weight is None:
+ sample_weight = [self.weighting_method(measure) for measure in X]
+ return np.stack([self(measure, sample_weight=weight) for measure, weight in zip(X, sample_weight)])