summaryrefslogtreecommitdiff
path: root/src/python/gudhi/representations
diff options
context:
space:
mode:
authorMarc Glisse <marc.glisse@inria.fr>2019-11-15 22:45:15 +0100
committerMarc Glisse <marc.glisse@inria.fr>2019-11-15 22:45:15 +0100
commit908679b72c215d1914d8e3956126fa44367b937f (patch)
tree48cda77d91f4c914ed8d26ea47adb2a9bb4eb7a6 /src/python/gudhi/representations
parentb2d81dd8ee2ed7e1269eb16816f9af6794305046 (diff)
The big rename: sktda -> representations
Diffstat (limited to 'src/python/gudhi/representations')
-rw-r--r--src/python/gudhi/representations/__init__.py6
-rw-r--r--src/python/gudhi/representations/kernel_methods.py206
-rw-r--r--src/python/gudhi/representations/metrics.py243
-rw-r--r--src/python/gudhi/representations/preprocessing.py305
-rw-r--r--src/python/gudhi/representations/vector_methods.py485
5 files changed, 1245 insertions, 0 deletions
diff --git a/src/python/gudhi/representations/__init__.py b/src/python/gudhi/representations/__init__.py
new file mode 100644
index 00000000..f020248d
--- /dev/null
+++ b/src/python/gudhi/representations/__init__.py
@@ -0,0 +1,6 @@
+from .kernel_methods import *
+from .metrics import *
+from .preprocessing import *
+from .vector_methods import *
+
+__all__ = ["kernel_methods", "metrics", "preprocessing", "vector_methods"]
diff --git a/src/python/gudhi/representations/kernel_methods.py b/src/python/gudhi/representations/kernel_methods.py
new file mode 100644
index 00000000..c855d2be
--- /dev/null
+++ b/src/python/gudhi/representations/kernel_methods.py
@@ -0,0 +1,206 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Mathieu Carrière
+#
+# Copyright (C) 2018-2019 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+from sklearn.base import BaseEstimator, TransformerMixin
+from sklearn.metrics import pairwise_distances
+from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance
+
+#############################################
+# Kernel methods ############################
+#############################################
+
+class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the sliced Wasserstein kernel matrix from a list of persistence diagrams. The sliced Wasserstein kernel is computed by exponentiating the corresponding sliced Wasserstein distance with a Gaussian kernel. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
+ """
+ def __init__(self, num_directions=10, bandwidth=1.0):
+ """
+ Constructor for the SlicedWassersteinKernel class.
+
+ Parameters:
+ bandwidth (double): bandwidth of the Gaussian kernel applied to the sliced Wasserstein distance (default 1.).
+ num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the kernel computation (default 10).
+ """
+ self.bandwidth = bandwidth
+ self.sw_ = SlicedWassersteinDistance(num_directions=num_directions)
+
+ def fit(self, X, y=None):
+ """
+ Fit the SlicedWassersteinKernel class on a list of persistence diagrams: an instance of the SlicedWassersteinDistance class is fitted on the diagrams and then stored.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.sw_.fit(X, y)
+ return self
+
+ def transform(self, X):
+ """
+ Compute all sliced Wasserstein kernel values between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein kernel values.
+ """
+ return np.exp(-self.sw_.transform(X)/self.bandwidth)
+
+class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the persistence weighted Gaussian kernel matrix from a list of persistence diagrams. The persistence weighted Gaussian kernel is computed by convolving the persistence diagram points with weighted Gaussian kernels. See http://proceedings.mlr.press/v48/kusano16.html for more details.
+ """
+ def __init__(self, bandwidth=1., weight=lambda x: 1, kernel_approx=None):
+ """
+ Constructor for the PersistenceWeightedGaussianKernel class.
+
+ Parameters:
+ bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved (default 1.)
+ weight (function): weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y].
+ kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ """
+ self.bandwidth, self.weight = bandwidth, weight
+ self.kernel_approx = kernel_approx
+
+ def fit(self, X, y=None):
+ """
+ Fit the PersistenceWeightedGaussianKernel class on a list of persistence diagrams: persistence diagrams are stored in a numpy array called **diagrams** and the kernel approximation class (if not None) is applied on them.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.diagrams_ = list(X)
+ self.ws_ = [ np.array([self.weight(self.diagrams_[i][j,:]) for j in range(self.diagrams_[i].shape[0])]) for i in range(len(self.diagrams_)) ]
+ if self.kernel_approx is not None:
+ self.approx_ = np.concatenate([np.sum(np.multiply(self.ws_[i][:,np.newaxis], self.kernel_approx.transform(self.diagrams_[i])), axis=0)[np.newaxis,:] for i in range(len(self.diagrams_))])
+ return self
+
+ def transform(self, X):
+ """
+ Compute all persistence weighted Gaussian kernel values between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence weighted Gaussian kernel values.
+ """
+ Xp = list(X)
+ Xfit = np.zeros((len(Xp), len(self.diagrams_)))
+ if len(self.diagrams_) == len(Xp) and np.all([np.array_equal(self.diagrams_[i], Xp[i]) for i in range(len(Xp))]):
+ if self.kernel_approx is not None:
+ Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(self.approx_, self.approx_.T)
+ else:
+ for i in range(len(self.diagrams_)):
+ for j in range(i+1, len(self.diagrams_)):
+ W = np.matmul(self.ws_[i][:,np.newaxis], self.ws_[j][np.newaxis,:])
+ E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(self.diagrams_[i], self.diagrams_[j]))/(2*np.square(self.bandwidth)))
+ Xfit[i,j] = np.sum(np.multiply(W, E))
+ Xfit[j,i] = Xfit[i,j]
+ else:
+ ws = [ np.array([self.weight(Xp[i][j,:]) for j in range(Xp[i].shape[0])]) for i in range(len(Xp)) ]
+ if self.kernel_approx is not None:
+ approx = np.concatenate([np.sum(np.multiply(ws[i][:,np.newaxis], self.kernel_approx.transform(Xp[i])), axis=0)[np.newaxis,:] for i in range(len(Xp))])
+ Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(approx, self.approx_.T)
+ else:
+ for i in range(len(Xp)):
+ for j in range(len(self.diagrams_)):
+ W = np.matmul(ws[i][:,np.newaxis], self.ws_[j][np.newaxis,:])
+ E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(Xp[i], self.diagrams_[j]))/(2*np.square(self.bandwidth)))
+ Xfit[i,j] = np.sum(np.multiply(W, E))
+
+ return Xfit
+
+class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the persistence scale space kernel matrix from a list of persistence diagrams. The persistence scale space kernel is computed by adding the symmetric to the diagonal of each point in each persistence diagram, with negative weight, and then convolving the points with a Gaussian kernel. See https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Reininghaus_A_Stable_Multi-Scale_2015_CVPR_paper.pdf for more details.
+ """
+ def __init__(self, bandwidth=1., kernel_approx=None):
+ """
+ Constructor for the PersistenceScaleSpaceKernel class.
+
+ Parameters:
+ bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved (default 1.)
+ kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ """
+ self.pwg_ = PersistenceWeightedGaussianKernel(bandwidth=bandwidth, weight=lambda x: 1 if x[1] >= x[0] else -1, kernel_approx=kernel_approx)
+
+ def fit(self, X, y=None):
+ """
+ Fit the PersistenceScaleSpaceKernel class on a list of persistence diagrams: symmetric to the diagonal of all points are computed and an instance of the PersistenceWeightedGaussianKernel class is fitted on the diagrams and then stored.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.diagrams_ = list(X)
+ for i in range(len(self.diagrams_)):
+ op_D = self.diagrams_[i][:,[1,0]]
+ self.diagrams_[i] = np.concatenate([self.diagrams_[i], op_D], axis=0)
+ self.pwg_.fit(X)
+ return self
+
+ def transform(self, X):
+ """
+ Compute all persistence scale space kernel values between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence scale space kernel values.
+ """
+ Xp = list(X)
+ for i in range(len(Xp)):
+ op_X = np.matmul(Xp[i], np.array([[0.,1.], [1.,0.]]))
+ Xp[i] = np.concatenate([Xp[i], op_X], axis=0)
+ return self.pwg_.transform(Xp)
+
+class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the persistence Fisher kernel matrix from a list of persistence diagrams. The persistence Fisher kernel is computed by exponentiating the corresponding persistence Fisher distance with a Gaussian kernel. See papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details.
+ """
+ def __init__(self, bandwidth_fisher=1., bandwidth=1., kernel_approx=None):
+ """
+ Constructor for the PersistenceFisherKernel class.
+
+ Parameters:
+ bandwidth (double): bandwidth of the Gaussian kernel applied to the persistence Fisher distance (default 1.).
+ bandwidth_fisher (double): bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions by PersistenceFisherDistance class (default 1.).
+ kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ """
+ self.bandwidth = bandwidth
+ self.pf_ = PersistenceFisherDistance(bandwidth=bandwidth_fisher, kernel_approx=kernel_approx)
+
+ def fit(self, X, y=None):
+ """
+ Fit the PersistenceFisherKernel class on a list of persistence diagrams: an instance of the PersistenceFisherDistance class is fitted on the diagrams and then stored.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.pf_.fit(X, y)
+ return self
+
+ def transform(self, X):
+ """
+ Compute all persistence Fisher kernel values between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher kernel values.
+ """
+ return np.exp(-self.pf_.transform(X)/self.bandwidth)
+
diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py
new file mode 100644
index 00000000..c512cb82
--- /dev/null
+++ b/src/python/gudhi/representations/metrics.py
@@ -0,0 +1,243 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Mathieu Carrière
+#
+# Copyright (C) 2018-2019 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+from sklearn.base import BaseEstimator, TransformerMixin
+from sklearn.metrics import pairwise_distances
+try:
+ from .. import bottleneck_distance
+ USE_GUDHI = True
+except ImportError:
+ USE_GUDHI = False
+ print("Gudhi built without CGAL: BottleneckDistance will return a null matrix")
+
+#############################################
+# Metrics ###################################
+#############################################
+
+class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the sliced Wasserstein distance matrix from a list of persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally integrating over all possible lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
+ """
+ def __init__(self, num_directions=10):
+ """
+ Constructor for the SlicedWassersteinDistance class.
+
+ Parameters:
+ num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation (default 10).
+ """
+ self.num_directions = num_directions
+ thetas = np.linspace(-np.pi/2, np.pi/2, num=self.num_directions+1)[np.newaxis,:-1]
+ self.lines_ = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
+
+ def fit(self, X, y=None):
+ """
+ Fit the SlicedWassersteinDistance class on a list of persistence diagrams: persistence diagrams are projected onto the different lines. The diagrams themselves and their projections are then stored in numpy arrays, called **diagrams_** and **approx_diag_**.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.diagrams_ = X
+ self.approx_ = [np.matmul(X[i], self.lines_) for i in range(len(X))]
+ diag_proj = (1./2) * np.ones((2,2))
+ self.approx_diag_ = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))]
+ return self
+
+ def transform(self, X):
+ """
+ Compute all sliced Wasserstein distances between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein distances.
+ """
+ Xfit = np.zeros((len(X), len(self.approx_)))
+ if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
+ for i in range(len(self.approx_)):
+ for j in range(i+1, len(self.approx_)):
+ A = np.sort(np.concatenate([self.approx_[i], self.approx_diag_[j]], axis=0), axis=0)
+ B = np.sort(np.concatenate([self.approx_[j], self.approx_diag_[i]], axis=0), axis=0)
+ L1 = np.sum(np.abs(A-B), axis=0)
+ Xfit[i,j] = np.mean(L1)
+ Xfit[j,i] = Xfit[i,j]
+ else:
+ diag_proj = (1./2) * np.ones((2,2))
+ approx = [np.matmul(X[i], self.lines_) for i in range(len(X))]
+ approx_diag = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))]
+ for i in range(len(approx)):
+ for j in range(len(self.approx_)):
+ A = np.sort(np.concatenate([approx[i], self.approx_diag_[j]], axis=0), axis=0)
+ B = np.sort(np.concatenate([self.approx_[j], approx_diag[i]], axis=0), axis=0)
+ L1 = np.sum(np.abs(A-B), axis=0)
+ Xfit[i,j] = np.mean(L1)
+
+ return Xfit
+
+class BottleneckDistance(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the bottleneck distance matrix from a list of persistence diagrams.
+ """
+ def __init__(self, epsilon=1e-3):
+ """
+ Constructor for the BottleneckDistance class.
+
+ Parameters:
+ epsilon (double): approximation quality (default 1e-4).
+ """
+ self.epsilon = epsilon
+
+ def fit(self, X, y=None):
+ """
+ Fit the BottleneckDistance class on a list of persistence diagrams: persistence diagrams are stored in a numpy array called **diagrams**.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.diagrams_ = X
+ return self
+
+ def transform(self, X):
+ """
+ Compute all bottleneck distances between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise bottleneck distances.
+ """
+ num_diag1 = len(X)
+
+ if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
+ matrix = np.zeros((num_diag1, num_diag1))
+
+ if USE_GUDHI:
+ for i in range(num_diag1):
+ for j in range(i+1, num_diag1):
+ matrix[i,j] = bottleneck_distance(X[i], X[j], self.epsilon)
+ matrix[j,i] = matrix[i,j]
+ else:
+ print("Gudhi required---returning null matrix")
+
+ else:
+ num_diag2 = len(self.diagrams_)
+ matrix = np.zeros((num_diag1, num_diag2))
+
+ if USE_GUDHI:
+ for i in range(num_diag1):
+ for j in range(num_diag2):
+ matrix[i,j] = bottleneck_distance(X[i], self.diagrams_[j], self.epsilon)
+ else:
+ print("Gudhi required---returning null matrix")
+
+ Xfit = matrix
+
+ return Xfit
+
+class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the persistence Fisher distance matrix from a list of persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details.
+ """
+ def __init__(self, bandwidth=1., kernel_approx=None):
+ """
+ Constructor for the PersistenceFisherDistance class.
+
+ Parameters:
+ bandwidth (double): bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions (default 1.).
+ kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+ """
+ self.bandwidth, self.kernel_approx = bandwidth, kernel_approx
+
+ def fit(self, X, y=None):
+ """
+ Fit the PersistenceFisherDistance class on a list of persistence diagrams: persistence diagrams are stored in a numpy array called **diagrams** and the kernel approximation class (if not None) is applied on them.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.diagrams_ = X
+ projection = (1./2) * np.ones((2,2))
+ self.diagonal_projections_ = [np.matmul(X[i], projection) for i in range(len(X))]
+ if self.kernel_approx is not None:
+ self.approx_ = [self.kernel_approx.transform(X[i]) for i in range(len(X))]
+ self.approx_diagonal_ = [self.kernel_approx.transform(self.diagonal_projections_[i]) for i in range(len(X))]
+ return self
+
+ def transform(self, X):
+ """
+ Compute all persistence Fisher distances between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher distances.
+ """
+ Xfit = np.zeros((len(X), len(self.diagrams_)))
+ if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
+ for i in range(len(self.diagrams_)):
+ for j in range(i+1, len(self.diagrams_)):
+ if self.kernel_approx is not None:
+ Z = np.concatenate([self.approx_[i], self.approx_diagonal_[i], self.approx_[j], self.approx_diagonal_[j]], axis=0)
+ U, V = np.sum(np.concatenate([self.approx_[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], self.approx_diagonal_[i]], axis=0), axis=0)
+ vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ Xfit[j,i] = Xfit[i,j]
+ else:
+ Z = np.concatenate([self.diagrams_[i], self.diagonal_projections_[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0)
+ U, V = np.concatenate([self.diagrams_[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], self.diagonal_projections_[i]], axis=0)
+ vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ Xfit[j,i] = Xfit[i,j]
+ else:
+ projection = (1./2) * np.ones((2,2))
+ diagonal_projections = [np.matmul(X[i], projection) for i in range(len(X))]
+ if self.kernel_approx is not None:
+ approx = [self.kernel_approx.transform(X[i]) for i in range(len(X))]
+ approx_diagonal = [self.kernel_approx.transform(diagonal_projections[i]) for i in range(len(X))]
+ for i in range(len(X)):
+ for j in range(len(self.diagrams_)):
+ if self.kernel_approx is not None:
+ Z = np.concatenate([approx[i], approx_diagonal[i], self.approx_[j], self.approx_diagonal_[j]], axis=0)
+ U, V = np.sum(np.concatenate([approx[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], approx_diagonal[i]], axis=0), axis=0)
+ vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ else:
+ Z = np.concatenate([X[i], diagonal_projections[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0)
+ U, V = np.concatenate([X[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], diagonal_projections[i]], axis=0)
+ vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ return Xfit
diff --git a/src/python/gudhi/representations/preprocessing.py b/src/python/gudhi/representations/preprocessing.py
new file mode 100644
index 00000000..83227ca1
--- /dev/null
+++ b/src/python/gudhi/representations/preprocessing.py
@@ -0,0 +1,305 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Mathieu Carrière
+#
+# Copyright (C) 2018-2019 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+from sklearn.base import BaseEstimator, TransformerMixin
+from sklearn.preprocessing import StandardScaler
+
+#############################################
+# Preprocessing #############################
+#############################################
+
+class BirthPersistenceTransform(BaseEstimator, TransformerMixin):
+ """
+ This is a class for the affine transformation (x,y) -> (x,y-x) to be applied on persistence diagrams.
+ """
+ def __init__(self):
+ """
+ Constructor for BirthPersistenceTransform class.
+ """
+ return None
+
+ def fit(self, X, y=None):
+ """
+ Fit the BirthPersistenceTransform class on a list of persistence diagrams (this function actually does nothing but is useful when BirthPersistenceTransform is included in a scikit-learn Pipeline).
+
+ Parameters:
+ X (n x 2 numpy array): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ return self
+
+ def transform(self, X):
+ """
+ Apply the BirthPersistenceTransform function on the persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy array): input persistence diagrams.
+
+ Returns:
+ list of n x 2 numpy array: transformed persistence diagrams.
+ """
+ Xfit = []
+ for diag in X:
+ #new_diag = np.empty(diag.shape)
+ #np.copyto(new_diag, diag)
+ new_diag = np.copy(diag)
+ new_diag[:,1] = new_diag[:,1] - new_diag[:,0]
+ Xfit.append(new_diag)
+ return Xfit
+
+class Clamping(BaseEstimator, TransformerMixin):
+ """
+ This is a class for clamping values. It can be used as a parameter for the DiagramScaler class, for instance if you want to clamp abscissae or ordinates of persistence diagrams.
+ """
+ def __init__(self, limit=np.inf):
+ """
+ Constructor for the Clamping class.
+
+ Parameters:
+ limit (double): clamping value (default np.inf).
+ """
+ self.limit = limit
+
+ def fit(self, X, y=None):
+ """
+ Fit the Clamping class on a list of values (this function actually does nothing but is useful when Clamping is included in a scikit-learn Pipeline).
+
+ Parameters:
+ X (numpy array of size n): input values.
+ y (n x 1 array): value labels (unused).
+ """
+ return self
+
+ def transform(self, X):
+ """
+ Clamp list of values.
+
+ Parameters:
+ X (numpy array of size n): input list of values.
+
+ Returns:
+ numpy array of size n: output list of values.
+ """
+ Xfit = np.minimum(X, self.limit)
+ #Xfit = np.where(X >= self.limit, self.limit * np.ones(X.shape), X)
+ return Xfit
+
+class DiagramScaler(BaseEstimator, TransformerMixin):
+ """
+ This is a class for preprocessing persistence diagrams with a given list of scalers, such as those included in scikit-learn.
+ """
+ def __init__(self, use=False, scalers=[]):
+ """
+ Constructor for the DiagramScaler class.
+
+ Parameters:
+ use (bool): whether to use the class or not (default False).
+ scalers (list of classes): list of scalers to be fit on the persistence diagrams (default []). Each element of the list is a tuple with two elements: the first one is a list of coordinates, and the second one is a scaler (i.e. a class with fit() and transform() methods) that is going to be applied to these coordinates. Common scalers can be found in the scikit-learn library (such as MinMaxScaler for instance).
+ """
+ self.scalers = scalers
+ self.use = use
+
+ def fit(self, X, y=None):
+ """
+ Fit the DiagramScaler class on a list of persistence diagrams: persistence diagrams are concatenated in a big numpy array, and scalers are fit (by calling their fit() method) on their corresponding coordinates in this big array.
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ if self.use:
+ if len(X) == 1:
+ P = X[0]
+ else:
+ P = np.concatenate(X,0)
+ for (indices, scaler) in self.scalers:
+ scaler.fit(np.reshape(P[:,indices], [-1, 1]))
+ return self
+
+ def transform(self, X):
+ """
+ Apply the DiagramScaler function on the persistence diagrams. The fitted scalers are applied (by calling their transform() method) to their corresponding coordinates in each persistence diagram individually.
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+
+ Returns:
+ list of n x 2 or n x 1 numpy arrays: transformed persistence diagrams.
+ """
+ Xfit = [np.copy(d) for d in X]
+ if self.use:
+ for i in range(len(Xfit)):
+ if Xfit[i].shape[0] > 0:
+ for (indices, scaler) in self.scalers:
+ for I in indices:
+ Xfit[i][:,I] = np.squeeze(scaler.transform(np.reshape(Xfit[i][:,I], [-1,1])))
+ return Xfit
+
+class Padding(BaseEstimator, TransformerMixin):
+ """
+ This is a class for padding a list of persistence diagrams with dummy points, so that all persistence diagrams end up with the same number of points.
+ """
+ def __init__(self, use=False):
+ """
+ Constructor for the Padding class.
+
+ Parameters:
+ use (bool): whether to use the class or not (default False).
+ """
+ self.use = use
+
+ def fit(self, X, y=None):
+ """
+ Fit the Padding class on a list of persistence diagrams (this function actually does nothing but is useful when Padding is included in a scikit-learn Pipeline).
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.max_pts = max([len(diag) for diag in X])
+ return self
+
+ def transform(self, X):
+ """
+ Add dummy points to each persistence diagram so that they all have the same cardinality. All points are given an additional coordinate indicating if the point was added after padding (0) or already present before (1).
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+
+ Returns:
+ list of n x 3 or n x 2 numpy arrays: padded persistence diagrams.
+ """
+ if self.use:
+ Xfit, num_diag = [], len(X)
+ for diag in X:
+ diag_pad = np.pad(diag, ((0,max(0, self.max_pts - diag.shape[0])), (0,1)), "constant", constant_values=((0,0),(0,0)))
+ diag_pad[:diag.shape[0],2] = np.ones(diag.shape[0])
+ Xfit.append(diag_pad)
+ else:
+ Xfit = X
+ return Xfit
+
+class ProminentPoints(BaseEstimator, TransformerMixin):
+ """
+ This is a class for removing points that are close or far from the diagonal in persistence diagrams. If persistence diagrams are n x 2 numpy arrays (i.e. persistence diagrams with ordinary features), points are ordered and thresholded by distance-to-diagonal. If persistence diagrams are n x 1 numpy arrays (i.e. persistence diagrams with essential features), points are not ordered and thresholded by first coordinate.
+ """
+ def __init__(self, use=False, num_pts=10, threshold=-1, location="upper"):
+ """
+ Constructor for the ProminentPoints class.
+
+ Parameters:
+ use (bool): whether to use the class or not (default False).
+ location (string): either "upper" or "lower" (default "upper"). Whether to keep the points that are far away ("upper") or close ("lower") to the diagonal.
+ num_pts (int): cardinality threshold (default 10). If location == "upper", keep the top **num_pts** points that are the farthest away from the diagonal. If location == "lower", keep the top **num_pts** points that are the closest to the diagonal.
+ threshold (double): distance-to-diagonal threshold (default -1). If location == "upper", keep the points that are at least at a distance **threshold** from the diagonal. If location == "lower", keep the points that are at most at a distance **threshold** from the diagonal.
+ """
+ self.num_pts = num_pts
+ self.threshold = threshold
+ self.use = use
+ self.location = location
+
+ def fit(self, X, y=None):
+ """
+ Fit the ProminentPoints class on a list of persistence diagrams (this function actually does nothing but is useful when ProminentPoints is included in a scikit-learn Pipeline).
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ return self
+
+ def transform(self, X):
+ """
+ If location == "upper", first select the top **num_pts** points that are the farthest away from the diagonal, then select and return from these points the ones that are at least at distance **threshold** from the diagonal for each persistence diagram individually. If location == "lower", first select the top **num_pts** points that are the closest to the diagonal, then select and return from these points the ones that are at most at distance **threshold** from the diagonal for each persistence diagram individually.
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+
+ Returns:
+ list of n x 2 or n x 1 numpy arrays: thresholded persistence diagrams.
+ """
+ if self.use:
+ Xfit, num_diag = [], len(X)
+ for i in range(num_diag):
+ diag = X[i]
+ if diag.shape[1] >= 2:
+ if diag.shape[0] > 0:
+ pers = np.abs(diag[:,1] - diag[:,0])
+ idx_thresh = pers >= self.threshold
+ thresh_diag, thresh_pers = diag[idx_thresh], pers[idx_thresh]
+ sort_index = np.flip(np.argsort(thresh_pers, axis=None), 0)
+ if self.location == "upper":
+ new_diag = thresh_diag[sort_index[:min(self.num_pts, thresh_diag.shape[0])],:]
+ if self.location == "lower":
+ new_diag = np.concatenate( [ thresh_diag[sort_index[min(self.num_pts, thresh_diag.shape[0]):],:], diag[~idx_thresh] ], axis=0)
+ else:
+ new_diag = diag
+
+ else:
+ if diag.shape[0] > 0:
+ birth = diag[:,:1]
+ idx_thresh = birth >= self.threshold
+ thresh_diag, thresh_birth = diag[idx_thresh], birth[idx_thresh]
+ if self.location == "upper":
+ new_diag = thresh_diag[:min(self.num_pts, thresh_diag.shape[0]),:]
+ if self.location == "lower":
+ new_diag = np.concatenate( [ thresh_diag[min(self.num_pts, thresh_diag.shape[0]):,:], diag[~idx_thresh] ], axis=0)
+ else:
+ new_diag = diag
+
+ Xfit.append(new_diag)
+ else:
+ Xfit = X
+ return Xfit
+
+class DiagramSelector(BaseEstimator, TransformerMixin):
+ """
+ This is a class for extracting finite or essential points in persistence diagrams.
+ """
+ def __init__(self, use=False, limit=np.inf, point_type="finite"):
+ """
+ Constructor for the DiagramSelector class.
+
+ Parameters:
+ use (bool): whether to use the class or not (default False).
+ limit (double): second coordinate value that is the criterion for being an essential point (default numpy.inf).
+ point_type (string): either "finite" or "essential". The type of the points that are going to be extracted.
+ """
+ self.use, self.limit, self.point_type = use, limit, point_type
+
+ def fit(self, X, y=None):
+ """
+ Fit the DiagramSelector class on a list of persistence diagrams (this function actually does nothing but is useful when DiagramSelector is included in a scikit-learn Pipeline).
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ return self
+
+ def transform(self, X):
+ """
+ Extract and return the finite or essential points of each persistence diagram individually.
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+
+ Returns:
+ list of n x 2 or n x 1 numpy arrays: extracted persistence diagrams.
+ """
+ if self.use:
+ Xfit, num_diag = [], len(X)
+ if self.point_type == "finite":
+ Xfit = [ diag[diag[:,1] < self.limit] if diag.shape[0] != 0 else diag for diag in X]
+ else:
+ Xfit = [ diag[diag[:,1] >= self.limit, 0:1] if diag.shape[0] != 0 else diag for diag in X]
+ else:
+ Xfit = X
+ return Xfit
diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py
new file mode 100644
index 00000000..bf32f18e
--- /dev/null
+++ b/src/python/gudhi/representations/vector_methods.py
@@ -0,0 +1,485 @@
+# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+# Author(s): Mathieu Carrière
+#
+# Copyright (C) 2018-2019 Inria
+#
+# Modification(s):
+# - YYYY/MM Author: Description of the modification
+
+import numpy as np
+from sklearn.base import BaseEstimator, TransformerMixin
+from sklearn.preprocessing import MinMaxScaler, MaxAbsScaler
+from sklearn.neighbors import DistanceMetric
+
+from .preprocessing import DiagramScaler, BirthPersistenceTransform
+
+#############################################
+# Finite Vectorization methods ##############
+#############################################
+
+class PersistenceImage(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing persistence images from a list of persistence diagrams. A persistence image is a 2D function computed from a persistence diagram by convolving the diagram points with a weighted Gaussian kernel. The plane is then discretized into an image with pixels, which is flattened and returned as a vector. See http://jmlr.org/papers/v18/16-337.html for more details.
+ """
+ def __init__(self, bandwidth=1., weight=lambda x: 1, resolution=[20,20], im_range=[np.nan, np.nan, np.nan, np.nan]):
+ """
+ Constructor for the PersistenceImage class.
+
+ Parameters:
+ bandwidth (double): bandwidth of the Gaussian kernel (default 1.).
+ weight (function): weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y].
+ resolution ([int,int]): size (in pixels) of the persistence image (default [20,20]).
+ im_range ([double,double,double,double]): minimum and maximum of each axis of the persistence image, of the form [x_min, x_max, y_min, y_max] (default [numpy.nan, numpy.nan, numpy.nan, numpyp.nan]). If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ """
+ self.bandwidth, self.weight = bandwidth, weight
+ self.resolution, self.im_range = resolution, im_range
+
+ def fit(self, X, y=None):
+ """
+ Fit the PersistenceImage class on a list of persistence diagrams: if any of the values in **im_range** is numpy.nan, replace it with the corresponding value computed on the given list of persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ if np.isnan(np.array(self.im_range)).any():
+ new_X = BirthPersistenceTransform().fit_transform(X)
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(new_X,y)
+ [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ self.im_range = np.where(np.isnan(np.array(self.im_range)), np.array([mx, Mx, my, My]), np.array(self.im_range))
+ return self
+
+ def transform(self, X):
+ """
+ Compute the persistence image for each persistence diagram individually and store the results in a single numpy array.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array with shape (number of diagrams) x (number of pixels = **resolution[0]** x **resolution[1]**): output persistence images.
+ """
+ num_diag, Xfit = len(X), []
+ new_X = BirthPersistenceTransform().fit_transform(X)
+
+ for i in range(num_diag):
+
+ diagram, num_pts_in_diag = new_X[i], X[i].shape[0]
+
+ w = np.empty(num_pts_in_diag)
+ for j in range(num_pts_in_diag):
+ w[j] = self.weight(diagram[j,:])
+
+ x_values, y_values = np.linspace(self.im_range[0], self.im_range[1], self.resolution[0]), np.linspace(self.im_range[2], self.im_range[3], self.resolution[1])
+ Xs, Ys = np.tile((diagram[:,0][:,np.newaxis,np.newaxis]-x_values[np.newaxis,np.newaxis,:]),[1,self.resolution[1],1]), np.tile(diagram[:,1][:,np.newaxis,np.newaxis]-y_values[np.newaxis,:,np.newaxis],[1,1,self.resolution[0]])
+ image = np.tensordot(w, np.exp((-np.square(Xs)-np.square(Ys))/(2*np.square(self.bandwidth)))/(np.square(self.bandwidth)*2*np.pi), 1)
+
+ Xfit.append(image.flatten()[np.newaxis,:])
+
+ Xfit = np.concatenate(Xfit,0)
+
+ return Xfit
+
+class Landscape(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing persistence landscapes from a list of persistence diagrams. A persistence landscape is a collection of 1D piecewise-linear functions computed from the rank function associated to the persistence diagram. These piecewise-linear functions are then sampled uniformly on a given range and the corresponding vectors of samples are concatenated and returned. See http://jmlr.org/papers/v16/bubenik15a.html for more details.
+ """
+ def __init__(self, num_landscapes=5, resolution=100, sample_range=[np.nan, np.nan]):
+ """
+ Constructor for the Landscape class.
+
+ Parameters:
+ num_landscapes (int): number of piecewise-linear functions to output (default 5).
+ resolution (int): number of sample for all piecewise-linear functions (default 100).
+ sample_range ([double, double]): minimum and maximum of all piecewise-linear function domains, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn uniformly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ """
+ self.num_landscapes, self.resolution, self.sample_range = num_landscapes, resolution, sample_range
+
+ def fit(self, X, y=None):
+ """
+ Fit the Landscape class on a list of persistence diagrams: if any of the values in **sample_range** is numpy.nan, replace it with the corresponding value computed on the given list of persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ if np.isnan(np.array(self.sample_range)).any():
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
+ [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ return self
+
+ def transform(self, X):
+ """
+ Compute the persistence landscape for each persistence diagram individually and concatenate the results.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array with shape (number of diagrams) x (number of samples = **num_landscapes** x **resolution**): output persistence landscapes.
+ """
+ num_diag, Xfit = len(X), []
+ x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
+ step_x = x_values[1] - x_values[0]
+
+ for i in range(num_diag):
+
+ diagram, num_pts_in_diag = X[i], X[i].shape[0]
+
+ ls = np.zeros([self.num_landscapes, self.resolution])
+
+ events = []
+ for j in range(self.resolution):
+ events.append([])
+
+ for j in range(num_pts_in_diag):
+ [px,py] = diagram[j,:2]
+ min_idx = np.minimum(np.maximum(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0), self.resolution)
+ mid_idx = np.minimum(np.maximum(np.ceil((0.5*(py+px) - self.sample_range[0]) / step_x).astype(int), 0), self.resolution)
+ max_idx = np.minimum(np.maximum(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0), self.resolution)
+
+ if min_idx < self.resolution and max_idx > 0:
+
+ landscape_value = self.sample_range[0] + min_idx * step_x - px
+ for k in range(min_idx, mid_idx):
+ events[k].append(landscape_value)
+ landscape_value += step_x
+
+ landscape_value = py - self.sample_range[0] - mid_idx * step_x
+ for k in range(mid_idx, max_idx):
+ events[k].append(landscape_value)
+ landscape_value -= step_x
+
+ for j in range(self.resolution):
+ events[j].sort(reverse=True)
+ for k in range( min(self.num_landscapes, len(events[j])) ):
+ ls[k,j] = events[j][k]
+
+ Xfit.append(np.sqrt(2)*np.reshape(ls,[1,-1]))
+
+ Xfit = np.concatenate(Xfit,0)
+
+ return Xfit
+
+class Silhouette(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing persistence silhouettes from a list of persistence diagrams. A persistence silhouette is computed by taking a weighted average of the collection of 1D piecewise-linear functions given by the persistence landscapes, and then by uniformly sampling this average on a given range. Finally, the corresponding vector of samples is returned. See https://arxiv.org/abs/1312.0308 for more details.
+ """
+ def __init__(self, weight=lambda x: 1, resolution=100, sample_range=[np.nan, np.nan]):
+ """
+ Constructor for the Silhouette class.
+
+ Parameters:
+ weight (function): weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie on lists or numpy arrays of the form [p_x,p_y].
+ resolution (int): number of samples for the weighted average (default 100).
+ sample_range ([double, double]): minimum and maximum for the weighted average domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn uniformly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ """
+ self.weight, self.resolution, self.sample_range = weight, resolution, sample_range
+
+ def fit(self, X, y=None):
+ """
+ Fit the Silhouette class on a list of persistence diagrams: if any of the values in **sample_range** is numpy.nan, replace it with the corresponding value computed on the given list of persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ if np.isnan(np.array(self.sample_range)).any():
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
+ [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ return self
+
+ def transform(self, X):
+ """
+ Compute the persistence silhouette for each persistence diagram individually and concatenate the results.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array with shape (number of diagrams) x (**resolution**): output persistence silhouettes.
+ """
+ num_diag, Xfit = len(X), []
+ x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
+ step_x = x_values[1] - x_values[0]
+
+ for i in range(num_diag):
+
+ diagram, num_pts_in_diag = X[i], X[i].shape[0]
+
+ sh, weights = np.zeros(self.resolution), np.zeros(num_pts_in_diag)
+ for j in range(num_pts_in_diag):
+ weights[j] = self.weight(diagram[j,:])
+ total_weight = np.sum(weights)
+
+ for j in range(num_pts_in_diag):
+
+ [px,py] = diagram[j,:2]
+ weight = weights[j] / total_weight
+ min_idx = np.minimum(np.maximum(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0), self.resolution)
+ mid_idx = np.minimum(np.maximum(np.ceil((0.5*(py+px) - self.sample_range[0]) / step_x).astype(int), 0), self.resolution)
+ max_idx = np.minimum(np.maximum(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0), self.resolution)
+
+ if min_idx < self.resolution and max_idx > 0:
+
+ silhouette_value = self.sample_range[0] + min_idx * step_x - px
+ for k in range(min_idx, mid_idx):
+ sh[k] += weight * silhouette_value
+ silhouette_value += step_x
+
+ silhouette_value = py - self.sample_range[0] - mid_idx * step_x
+ for k in range(mid_idx, max_idx):
+ sh[k] += weight * silhouette_value
+ silhouette_value -= step_x
+
+ Xfit.append(np.reshape(np.sqrt(2) * sh, [1,-1]))
+
+ Xfit = np.concatenate(Xfit, 0)
+
+ return Xfit
+
+class BettiCurve(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing Betti curves from a list of persistence diagrams. A Betti curve is a 1D piecewise-constant function obtained from the rank function. It is sampled uniformly on a given range and the vector of samples is returned. See https://www.researchgate.net/publication/316604237_Time_Series_Classification_via_Topological_Data_Analysis for more details.
+ """
+ def __init__(self, resolution=100, sample_range=[np.nan, np.nan]):
+ """
+ Constructor for the BettiCurve class.
+
+ Parameters:
+ resolution (int): number of sample for the piecewise-constant function (default 100).
+ sample_range ([double, double]): minimum and maximum of the piecewise-constant function domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn uniformly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method.
+ """
+ self.resolution, self.sample_range = resolution, sample_range
+
+ def fit(self, X, y=None):
+ """
+ Fit the BettiCurve class on a list of persistence diagrams: if any of the values in **sample_range** is numpy.nan, replace it with the corresponding value computed on the given list of persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ if np.isnan(np.array(self.sample_range)).any():
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
+ [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ return self
+
+ def transform(self, X):
+ """
+ Compute the Betti curve for each persistence diagram individually and concatenate the results.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array with shape (number of diagrams) x (**resolution**): output Betti curves.
+ """
+ num_diag, Xfit = len(X), []
+ x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
+ step_x = x_values[1] - x_values[0]
+
+ for i in range(num_diag):
+
+ diagram, num_pts_in_diag = X[i], X[i].shape[0]
+
+ bc = np.zeros(self.resolution)
+ for j in range(num_pts_in_diag):
+ [px,py] = diagram[j,:2]
+ min_idx = np.minimum(np.maximum(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0), self.resolution)
+ max_idx = np.minimum(np.maximum(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0), self.resolution)
+ for k in range(min_idx, max_idx):
+ bc[k] += 1
+
+ Xfit.append(np.reshape(bc,[1,-1]))
+
+ Xfit = np.concatenate(Xfit, 0)
+
+ return Xfit
+
+class Entropy(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing persistence entropy. Persistence entropy is a statistic for persistence diagrams inspired from Shannon entropy. This statistic can also be used to compute a feature vector, called the entropy summary function. See https://arxiv.org/pdf/1803.08304.pdf for more details. Note that a previous implementation was contributed by Manuel Soriano-Trigueros.
+ """
+ def __init__(self, mode="scalar", normalized=True, resolution=100, sample_range=[np.nan, np.nan]):
+ """
+ Constructor for the Entropy class.
+
+ Parameters:
+ mode (string): what entropy to compute: either "scalar" for computing the entropy statistics, or "vector" for computing the entropy summary functions (default "scalar").
+ normalized (bool): whether to normalize the entropy summary function (default True). Used only if **mode** = "vector".
+ resolution (int): number of sample for the entropy summary function (default 100). Used only if **mode** = "vector".
+ sample_range ([double, double]): minimum and maximum of the entropy summary function domain, of the form [x_min, x_max] (default [numpy.nan, numpy.nan]). It is the interval on which samples will be drawn uniformly. If one of the values is numpy.nan, it can be computed from the persistence diagrams with the fit() method. Used only if **mode** = "vector".
+ """
+ self.mode, self.normalized, self.resolution, self.sample_range = mode, normalized, resolution, sample_range
+
+ def fit(self, X, y=None):
+ """
+ Fit the Entropy class on a list of persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ if np.isnan(np.array(self.sample_range)).any():
+ pre = DiagramScaler(use=True, scalers=[([0], MinMaxScaler()), ([1], MinMaxScaler())]).fit(X,y)
+ [mx,my],[Mx,My] = [pre.scalers[0][1].data_min_[0], pre.scalers[1][1].data_min_[0]], [pre.scalers[0][1].data_max_[0], pre.scalers[1][1].data_max_[0]]
+ self.sample_range = np.where(np.isnan(np.array(self.sample_range)), np.array([mx, My]), np.array(self.sample_range))
+ return self
+
+ def transform(self, X):
+ """
+ Compute the entropy for each persistence diagram individually and concatenate the results.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array with shape (number of diagrams) x (1 if **mode** = "scalar" else **resolution**): output entropy.
+ """
+ num_diag, Xfit = len(X), []
+ x_values = np.linspace(self.sample_range[0], self.sample_range[1], self.resolution)
+ step_x = x_values[1] - x_values[0]
+ new_X = BirthPersistenceTransform().fit_transform(X)
+
+ for i in range(num_diag):
+
+ orig_diagram, diagram, num_pts_in_diag = X[i], new_X[i], X[i].shape[0]
+ new_diagram = DiagramScaler(use=True, scalers=[([1], MaxAbsScaler())]).fit_transform([diagram])[0]
+
+ if self.mode == "scalar":
+ ent = - np.sum( np.multiply(new_diagram[:,1], np.log(new_diagram[:,1])) )
+ Xfit.append(np.array([[ent]]))
+
+ else:
+ ent = np.zeros(self.resolution)
+ for j in range(num_pts_in_diag):
+ [px,py] = orig_diagram[j,:2]
+ min_idx = np.minimum(np.maximum(np.ceil((px - self.sample_range[0]) / step_x).astype(int), 0), self.resolution)
+ max_idx = np.minimum(np.maximum(np.ceil((py - self.sample_range[0]) / step_x).astype(int), 0), self.resolution)
+ for k in range(min_idx, max_idx):
+ ent[k] += (-1) * new_diagram[j,1] * np.log(new_diagram[j,1])
+ if self.normalized:
+ ent = ent / np.linalg.norm(ent, ord=1)
+ Xfit.append(np.reshape(ent,[1,-1]))
+
+ Xfit = np.concatenate(Xfit, 0)
+
+ return Xfit
+
+class TopologicalVector(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing topological vectors from a list of persistence diagrams. The topological vector associated to a persistence diagram is the sorted vector of a slight modification of the pairwise distances between the persistence diagram points. See https://diglib.eg.org/handle/10.1111/cgf12692 for more details.
+ """
+ def __init__(self, threshold=10):
+ """
+ Constructor for the TopologicalVector class.
+
+ Parameters:
+ threshold (int): number of distances to keep (default 10). This is the dimension of the topological vector. If -1, this threshold is computed from the list of persistence diagrams by considering the one with the largest number of points and using the dimension of its corresponding topological vector as threshold.
+ """
+ self.threshold = threshold
+
+ def fit(self, X, y=None):
+ """
+ Fit the TopologicalVector class on a list of persistence diagrams (this function actually does nothing but is useful when TopologicalVector is included in a scikit-learn Pipeline).
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ return self
+
+ def transform(self, X):
+ """
+ Compute the topological vector for each persistence diagram individually and concatenate the results.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array with shape (number of diagrams) x (**threshold**): output topological vectors.
+ """
+ if self.threshold == -1:
+ thresh = np.array([X[i].shape[0] for i in range(len(X))]).max()
+ else:
+ thresh = self.threshold
+
+ num_diag = len(X)
+ Xfit = np.zeros([num_diag, thresh])
+
+ for i in range(num_diag):
+
+ diagram, num_pts_in_diag = X[i], X[i].shape[0]
+ pers = 0.5 * (diagram[:,1]-diagram[:,0])
+ min_pers = np.minimum(pers,np.transpose(pers))
+ distances = DistanceMetric.get_metric("chebyshev").pairwise(diagram)
+ vect = np.flip(np.sort(np.triu(np.minimum(distances, min_pers)), axis=None), 0)
+ dim = min(len(vect), thresh)
+ Xfit[i, :dim] = vect[:dim]
+
+ return Xfit
+
+class ComplexPolynomial(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing complex polynomials from a list of persistence diagrams. The persistence diagram points are seen as the roots of some complex polynomial, whose coefficients are returned in a complex vector. See https://link.springer.com/chapter/10.1007%2F978-3-319-23231-7_27 for more details.
+ """
+ def __init__(self, polynomial_type="R", threshold=10):
+ """
+ Constructor for the ComplexPolynomial class.
+
+ Parameters:
+ polynomial_type (char): either "R", "S" or "T" (default "R"). Type of complex polynomial that is going to be computed (explained in https://link.springer.com/chapter/10.1007%2F978-3-319-23231-7_27).
+ threshold (int): number of coefficients (default 10). This is the dimension of the complex vector of coefficients, i.e. the number of coefficients corresponding to the largest degree terms of the polynomial. If -1, this threshold is computed from the list of persistence diagrams by considering the one with the largest number of points and using the dimension of its corresponding complex vector of coefficients as threshold.
+ """
+ self.threshold, self.polynomial_type = threshold, polynomial_type
+
+ def fit(self, X, y=None):
+ """
+ Fit the ComplexPolynomial class on a list of persistence diagrams (this function actually does nothing but is useful when ComplexPolynomial is included in a scikit-learn Pipeline).
+
+ Parameters:
+ X (list of n x 2 or n x 1 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ return self
+
+ def transform(self, X):
+ """
+ Compute the complex vector of coefficients for each persistence diagram individually and concatenate the results.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array with shape (number of diagrams) x (**threshold**): output complex vectors of coefficients.
+ """
+ if self.threshold == -1:
+ thresh = np.array([X[i].shape[0] for i in range(len(X))]).max()
+ else:
+ thresh = self.threshold
+
+ Xfit = np.zeros([len(X), thresh]) + 1j * np.zeros([len(X), thresh])
+ for d in range(len(X)):
+ D, N = X[d], X[d].shape[0]
+ if self.polynomial_type == "R":
+ roots = D[:,0] + 1j * D[:,1]
+ elif self.polynomial_type == "S":
+ alpha = np.linalg.norm(D, axis=1)
+ alpha = np.where(alpha==0, np.ones(N), alpha)
+ roots = np.multiply( np.multiply( (D[:,0]+1j*D[:,1]), (D[:,1]-D[:,0]) ), 1./(np.sqrt(2)*alpha) )
+ elif self.polynomial_type == "T":
+ alpha = np.linalg.norm(D, axis=1)
+ roots = np.multiply( (D[:,1]-D[:,0])/2, np.cos(alpha) - np.sin(alpha) + 1j * (np.cos(alpha) + np.sin(alpha)) )
+ coeff = [0] * (N+1)
+ coeff[N] = 1
+ for i in range(1, N+1):
+ for j in range(N-i-1, N):
+ coeff[j] += ((-1) * roots[i-1] * coeff[j+1])
+ coeff = np.array(coeff[::-1])[1:]
+ Xfit[d, :min(thresh, coeff.shape[0])] = coeff[:min(thresh, coeff.shape[0])]
+ return Xfit