summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--azure-pipelines.yml43
-rw-r--r--src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h2
-rw-r--r--src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp13
-rw-r--r--src/python/CMakeLists.txt38
-rw-r--r--src/python/doc/representations.rst2
-rwxr-xr-xsrc/python/example/diagram_vectorizations_distances_kernels.py98
-rw-r--r--src/python/gudhi/bottleneck.cc50
-rw-r--r--src/python/gudhi/bottleneck.pyx48
-rw-r--r--src/python/gudhi/hera.cc27
-rw-r--r--src/python/gudhi/point_cloud/knn.py8
-rw-r--r--src/python/gudhi/representations/kernel_methods.py183
-rw-r--r--src/python/gudhi/representations/metrics.py381
-rw-r--r--src/python/gudhi/representations/preprocessing.py60
-rw-r--r--src/python/gudhi/representations/vector_methods.py84
-rw-r--r--src/python/gudhi/simplex_tree.pxd2
-rw-r--r--src/python/gudhi/simplex_tree.pyx51
-rw-r--r--src/python/gudhi/wasserstein/wasserstein.py67
-rw-r--r--src/python/include/Persistent_cohomology_interface.h128
-rw-r--r--src/python/include/pybind11_diagram_utils.h39
-rw-r--r--src/python/setup.py.in29
-rwxr-xr-xsrc/python/test/test_simplex_generators.py64
-rwxr-xr-xsrc/python/test/test_wasserstein_distance.py38
22 files changed, 1056 insertions, 399 deletions
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 95b15db2..7b5334a7 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -4,35 +4,34 @@ jobs:
displayName: "Build and test"
timeoutInMinutes: 0
cancelTimeoutInMinutes: 60
-
- strategy:
- matrix:
- macOSrelease:
- imageName: 'macos-10.14'
- CMakeBuildType: Release
- customInstallation: 'brew update && brew install graphviz doxygen boost eigen gmp mpfr tbb cgal'
-
pool:
- vmImage: $(imageName)
-
+ vmImage: macOS-10.14
+ variables:
+ pythonVersion: '3.6'
+ cmakeBuildType: Release
+ customInstallation: 'brew update && brew install graphviz doxygen boost eigen gmp mpfr tbb cgal'
+
steps:
- - task: UsePythonVersion@0
- inputs:
- versionSpec: '3.7'
- architecture: 'x64'
+ - bash: echo "##vso[task.prependpath]$CONDA/bin"
+ displayName: Add conda to PATH
- - script: |
- $(customInstallation)
- git submodule update --init
- python -m pip install --upgrade pip
+ - bash: sudo conda create --yes --quiet --name gudhi_build_env
+ displayName: Create Anaconda environment
+
+ - bash: |
+ source activate gudhi_build_env
+ sudo conda install --yes --quiet --name gudhi_build_env python=$(pythonVersion)
python -m pip install --user -r .github/build-requirements.txt
python -m pip install --user -r .github/test-requirements.txt
+ $(customInstallation)
displayName: 'Install build dependencies'
- - script: |
+ - bash: |
+ source activate gudhi_build_env
+ git submodule update --init
mkdir build
cd build
- cmake -DCMAKE_BUILD_TYPE:STRING=$(CMakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..
- make
+ cmake -DCMAKE_BUILD_TYPE:STRING=$(cmakeBuildType) -DWITH_GUDHI_TEST=ON -DWITH_GUDHI_UTILITIES=ON -DWITH_GUDHI_PYTHON=ON -DPython_ADDITIONAL_VERSIONS=3 ..
+ make -j 4
make doxygen
- ctest -j 8 --output-on-failure -E sphinx # remove sphinx build as it fails
+ ctest -j 4 --output-on-failure -E sphinx # remove sphinx build as it fails
displayName: 'Build, test and documentation generation'
diff --git a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h
index 1eb77c9c..e6a78a6d 100644
--- a/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h
+++ b/src/Bitmap_cubical_complex/include/gudhi/Bitmap_cubical_complex_base.h
@@ -197,7 +197,7 @@ class Bitmap_cubical_complex_base {
/**
* Returns number of all cubes in the data structure.
**/
- inline unsigned size() const { return this->data.size(); }
+ inline std::size_t size() const { return this->data.size(); }
/**
* Writing to stream operator. By using it we get the values T of cells in order in which they are stored in the
diff --git a/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp b/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp
index db456f70..8c5742aa 100644
--- a/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp
+++ b/src/Persistent_cohomology/example/rips_persistence_via_boundary_matrix.cpp
@@ -17,10 +17,6 @@
#include <boost/program_options.hpp>
-#ifdef GUDHI_USE_TBB
-#include <tbb/task_scheduler_init.h>
-#endif
-
#include <string>
#include <vector>
@@ -67,11 +63,6 @@ int main(int argc, char * argv[]) {
std::clog << "The complex contains " << st.num_simplices() << " simplices \n";
std::clog << " and has dimension " << st.dimension() << " \n";
-#ifdef GUDHI_USE_TBB
- // Unnecessary, but clarifies which operations are parallel.
- tbb::task_scheduler_init ts;
-#endif
-
// Sort the simplices in the order of the filtration
st.initialize_filtration();
int count = 0;
@@ -81,10 +72,6 @@ int main(int argc, char * argv[]) {
// Convert to a more convenient representation.
Gudhi::Hasse_complex<> hcpx(st);
-#ifdef GUDHI_USE_TBB
- ts.terminate();
-#endif
-
// Free some space.
delete &st;
diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt
index 055d5b23..976a8b52 100644
--- a/src/python/CMakeLists.txt
+++ b/src/python/CMakeLists.txt
@@ -34,6 +34,7 @@ endfunction( add_gudhi_debug_info )
if(PYTHONINTERP_FOUND)
if(PYBIND11_FOUND)
add_gudhi_debug_info("Pybind11 version ${PYBIND11_VERSION}")
+ set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ")
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'hera', ")
endif()
if(CYTHON_FOUND)
@@ -46,7 +47,6 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'reader_utils', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'witness_complex', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'strong_witness_complex', ")
- set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'nerve_gic', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'subsampling', ")
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'tangential_complex', ")
@@ -120,24 +120,25 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DCGAL_EIGEN3_ENABLED', ")
endif (EIGEN3_FOUND)
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'off_reader', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'simplex_tree', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'rips_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'cubical_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'periodic_cubical_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'reader_utils', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'witness_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'strong_witness_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'off_reader', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'simplex_tree', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'rips_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'cubical_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'periodic_cubical_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'reader_utils', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'witness_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'strong_witness_complex', ")
+ set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera', ")
if (NOT CGAL_VERSION VERSION_LESS 4.11.0)
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'bottleneck', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'nerve_gic', ")
+ set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'bottleneck', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'nerve_gic', ")
endif ()
if (NOT CGAL_WITH_EIGEN3_VERSION VERSION_LESS 4.11.0)
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'alpha_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'subsampling', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'tangential_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'euclidean_witness_complex', ")
- set(GUDHI_PYTHON_MODULES_TO_COMPILE "${GUDHI_PYTHON_MODULES_TO_COMPILE}'euclidean_strong_witness_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'alpha_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'subsampling', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'tangential_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'euclidean_witness_complex', ")
+ set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'euclidean_strong_witness_complex', ")
endif ()
if(CGAL_FOUND)
@@ -452,6 +453,7 @@ if(PYTHONINTERP_FOUND)
${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/example/simplex_tree_example.py)
add_gudhi_py_test(test_simplex_tree)
+ add_gudhi_py_test(test_simplex_generators)
# Witness
add_test(NAME witness_complex_from_nearest_landmark_table_py_test
@@ -466,7 +468,9 @@ if(PYTHONINTERP_FOUND)
# Wasserstein
if(OT_FOUND AND PYBIND11_FOUND)
- add_gudhi_py_test(test_wasserstein_distance)
+ if(TORCH_FOUND AND EAGERPY_FOUND)
+ add_gudhi_py_test(test_wasserstein_distance)
+ endif()
add_gudhi_py_test(test_wasserstein_barycenter)
endif()
diff --git a/src/python/doc/representations.rst b/src/python/doc/representations.rst
index 11dcbcf9..041e3247 100644
--- a/src/python/doc/representations.rst
+++ b/src/python/doc/representations.rst
@@ -10,7 +10,7 @@ Representations manual
This module, originally available at https://github.com/MathieuCarriere/sklearn-tda and named sklearn_tda, aims at bridging the gap between persistence diagrams and machine learning, by providing implementations of most of the vector representations for persistence diagrams in the literature, in a scikit-learn format. More specifically, it provides tools, using the scikit-learn standard interface, to compute distances and kernels on persistence diagrams, and to convert these diagrams into vectors in Euclidean space.
-A diagram is represented as a numpy array of shape (n,2), as can be obtained from :func:`~gudhi.SimplexTree.persistence_intervals_in_dimension` for instance. Points at infinity are represented as a numpy array of shape (n,1), storing only the birth time.
+A diagram is represented as a numpy array of shape (n,2), as can be obtained from :func:`~gudhi.SimplexTree.persistence_intervals_in_dimension` for instance. Points at infinity are represented as a numpy array of shape (n,1), storing only the birth time. The classes in this module can handle several persistence diagrams at once. In that case, the diagrams are provided as a list of numpy arrays. Note that it is not necessary for the diagrams to have the same number of points, i.e., for the corresponding arrays to have the same number of rows: all classes can handle arrays with different shapes.
A small example is provided
diff --git a/src/python/example/diagram_vectorizations_distances_kernels.py b/src/python/example/diagram_vectorizations_distances_kernels.py
index 119072eb..c4a71a7a 100755
--- a/src/python/example/diagram_vectorizations_distances_kernels.py
+++ b/src/python/example/diagram_vectorizations_distances_kernels.py
@@ -9,26 +9,25 @@ from gudhi.representations import DiagramSelector, Clamping, Landscape, Silhouet
TopologicalVector, DiagramScaler, BirthPersistenceTransform,\
PersistenceImage, PersistenceWeightedGaussianKernel, Entropy, \
PersistenceScaleSpaceKernel, SlicedWassersteinDistance,\
- SlicedWassersteinKernel, BottleneckDistance, PersistenceFisherKernel
+ SlicedWassersteinKernel, BottleneckDistance, PersistenceFisherKernel, WassersteinDistance
-D = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.], [0., np.inf], [5., np.inf]])
-diags = [D]
+D1 = np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.], [0., np.inf], [5., np.inf]])
-diags = DiagramSelector(use=True, point_type="finite").fit_transform(diags)
-diags = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diags)
-diags = DiagramScaler(use=True, scalers=[([1], Clamping(maximum=.9))]).fit_transform(diags)
+proc1 = DiagramSelector(use=True, point_type="finite")
+proc2 = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())])
+proc3 = DiagramScaler(use=True, scalers=[([1], Clamping(maximum=.9))])
+D1 = proc3(proc2(proc1(D1)))
-D = diags[0]
-plt.scatter(D[:,0],D[:,1])
+plt.scatter(D1[:,0], D1[:,1])
plt.plot([0.,1.],[0.,1.])
plt.title("Test Persistence Diagram for vector methods")
plt.show()
LS = Landscape(resolution=1000)
-L = LS.fit_transform(diags)
-plt.plot(L[0][:1000])
-plt.plot(L[0][1000:2000])
-plt.plot(L[0][2000:3000])
+L = LS(D1)
+plt.plot(L[:1000])
+plt.plot(L[1000:2000])
+plt.plot(L[2000:3000])
plt.title("Landscape")
plt.show()
@@ -36,50 +35,39 @@ def pow(n):
return lambda x: np.power(x[1]-x[0],n)
SH = Silhouette(resolution=1000, weight=pow(2))
-sh = SH.fit_transform(diags)
-plt.plot(sh[0])
+plt.plot(SH(D1))
plt.title("Silhouette")
plt.show()
BC = BettiCurve(resolution=1000)
-bc = BC.fit_transform(diags)
-plt.plot(bc[0])
+plt.plot(BC(D1))
plt.title("Betti Curve")
plt.show()
CP = ComplexPolynomial(threshold=-1, polynomial_type="T")
-cp = CP.fit_transform(diags)
-print("Complex polynomial is " + str(cp[0,:]))
+print("Complex polynomial is " + str(CP(D1)))
TV = TopologicalVector(threshold=-1)
-tv = TV.fit_transform(diags)
-print("Topological vector is " + str(tv[0,:]))
+print("Topological vector is " + str(TV(D1)))
PI = PersistenceImage(bandwidth=.1, weight=lambda x: x[1], im_range=[0,1,0,1], resolution=[100,100])
-pi = PI.fit_transform(diags)
-plt.imshow(np.flip(np.reshape(pi[0], [100,100]), 0))
+plt.imshow(np.flip(np.reshape(PI(D1), [100,100]), 0))
plt.title("Persistence Image")
plt.show()
ET = Entropy(mode="scalar")
-et = ET.fit_transform(diags)
-print("Entropy statistic is " + str(et[0,:]))
+print("Entropy statistic is " + str(ET(D1)))
ET = Entropy(mode="vector", normalized=False)
-et = ET.fit_transform(diags)
-plt.plot(et[0])
+plt.plot(ET(D1))
plt.title("Entropy function")
plt.show()
-D = np.array([[1.,5.],[3.,6.],[2.,7.]])
-diags2 = [D]
+D2 = np.array([[1.,5.],[3.,6.],[2.,7.]])
+D2 = proc3(proc2(proc1(D2)))
-diags2 = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diags2)
-
-D = diags[0]
-plt.scatter(D[:,0],D[:,1])
-D = diags2[0]
-plt.scatter(D[:,0],D[:,1])
+plt.scatter(D1[:,0], D1[:,1])
+plt.scatter(D2[:,0], D2[:,1])
plt.plot([0.,1.],[0.,1.])
plt.title("Test Persistence Diagrams for kernel methods")
plt.show()
@@ -88,46 +76,34 @@ def arctan(C,p):
return lambda x: C*np.arctan(np.power(x[1], p))
PWG = PersistenceWeightedGaussianKernel(bandwidth=1., kernel_approx=None, weight=arctan(1.,1.))
-X = PWG.fit(diags)
-Y = PWG.transform(diags2)
-print("PWG kernel is " + str(Y[0][0]))
+print("PWG kernel is " + str(PWG(D1, D2)))
PWG = PersistenceWeightedGaussianKernel(kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])), weight=arctan(1.,1.))
-X = PWG.fit(diags)
-Y = PWG.transform(diags2)
-print("Approximate PWG kernel is " + str(Y[0][0]))
+print("Approximate PWG kernel is " + str(PWG(D1, D2)))
PSS = PersistenceScaleSpaceKernel(bandwidth=1.)
-X = PSS.fit(diags)
-Y = PSS.transform(diags2)
-print("PSS kernel is " + str(Y[0][0]))
+print("PSS kernel is " + str(PSS(D1, D2)))
PSS = PersistenceScaleSpaceKernel(kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])))
-X = PSS.fit(diags)
-Y = PSS.transform(diags2)
-print("Approximate PSS kernel is " + str(Y[0][0]))
+print("Approximate PSS kernel is " + str(PSS(D1, D2)))
sW = SlicedWassersteinDistance(num_directions=100)
-X = sW.fit(diags)
-Y = sW.transform(diags2)
-print("SW distance is " + str(Y[0][0]))
+print("SW distance is " + str(sW(D1, D2)))
SW = SlicedWassersteinKernel(num_directions=100, bandwidth=1.)
-X = SW.fit(diags)
-Y = SW.transform(diags2)
-print("SW kernel is " + str(Y[0][0]))
+print("SW kernel is " + str(SW(D1, D2)))
+
+W = WassersteinDistance(order=2, internal_p=2, mode="pot")
+print("Wasserstein distance (POT) is " + str(W(D1, D2)))
+
+W = WassersteinDistance(order=2, internal_p=2, mode="hera", delta=0.0001)
+print("Wasserstein distance (hera) is " + str(W(D1, D2)))
W = BottleneckDistance(epsilon=.001)
-X = W.fit(diags)
-Y = W.transform(diags2)
-print("Bottleneck distance is " + str(Y[0][0]))
+print("Bottleneck distance is " + str(W(D1, D2)))
PF = PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1.)
-X = PF.fit(diags)
-Y = PF.transform(diags2)
-print("PF kernel is " + str(Y[0][0]))
+print("PF kernel is " + str(PF(D1, D2)))
PF = PersistenceFisherKernel(bandwidth_fisher=1., bandwidth=1., kernel_approx=RBFSampler(gamma=1./2, n_components=100000).fit(np.ones([1,2])))
-X = PF.fit(diags)
-Y = PF.transform(diags2)
-print("Approximate PF kernel is " + str(Y[0][0]))
+print("Approximate PF kernel is " + str(PF(D1, D2)))
diff --git a/src/python/gudhi/bottleneck.cc b/src/python/gudhi/bottleneck.cc
new file mode 100644
index 00000000..732cb9a8
--- /dev/null
+++ b/src/python/gudhi/bottleneck.cc
@@ -0,0 +1,50 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Marc Glisse
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <gudhi/Bottleneck.h>
+
+#include <pybind11_diagram_utils.h>
+
+double bottleneck(Dgm d1, Dgm d2, double epsilon)
+{
+ // I *think* the call to request() has to be before releasing the GIL.
+ auto diag1 = numpy_to_range_of_pairs(d1);
+ auto diag2 = numpy_to_range_of_pairs(d2);
+
+ py::gil_scoped_release release;
+
+ return Gudhi::persistence_diagram::bottleneck_distance(diag1, diag2, epsilon);
+}
+
+PYBIND11_MODULE(bottleneck, m) {
+ m.attr("__license__") = "GPL v3";
+ m.def("bottleneck_distance", &bottleneck,
+ py::arg("diagram_1"), py::arg("diagram_2"),
+ py::arg("e") = (std::numeric_limits<double>::min)(),
+ R"pbdoc(
+ This function returns the point corresponding to a given vertex.
+
+ :param diagram_1: The first diagram.
+ :type diagram_1: numpy array of shape (m,2)
+ :param diagram_2: The second diagram.
+ :type diagram_2: numpy array of shape (n,2)
+ :param e: If `e` is 0, this uses an expensive algorithm to compute the
+ exact distance.
+ If `e` is not 0, it asks for an additive `e`-approximation, and
+ currently also allows a small multiplicative error (the last 2 or 3
+ bits of the mantissa may be wrong). This version of the algorithm takes
+ advantage of the limited precision of `double` and is usually a lot
+ faster to compute, whatever the value of `e`.
+ Thus, by default, `e` is the smallest positive double.
+ :type e: float
+ :rtype: float
+ :returns: the bottleneck distance.
+ )pbdoc");
+}
diff --git a/src/python/gudhi/bottleneck.pyx b/src/python/gudhi/bottleneck.pyx
deleted file mode 100644
index af011e88..00000000
--- a/src/python/gudhi/bottleneck.pyx
+++ /dev/null
@@ -1,48 +0,0 @@
-# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
-# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
-# Author(s): Vincent Rouvreau
-#
-# Copyright (C) 2016 Inria
-#
-# Modification(s):
-# - YYYY/MM Author: Description of the modification
-
-from cython cimport numeric
-from libcpp.vector cimport vector
-from libcpp.utility cimport pair
-import os
-
-__author__ = "Vincent Rouvreau"
-__copyright__ = "Copyright (C) 2016 Inria"
-__license__ = "GPL v3"
-
-cdef extern from "Bottleneck_distance_interface.h" namespace "Gudhi::persistence_diagram":
- double bottleneck(vector[pair[double, double]], vector[pair[double, double]], double)
- double bottleneck(vector[pair[double, double]], vector[pair[double, double]])
-
-def bottleneck_distance(diagram_1, diagram_2, e=None):
- """This function returns the point corresponding to a given vertex.
-
- :param diagram_1: The first diagram.
- :type diagram_1: vector[pair[double, double]]
- :param diagram_2: The second diagram.
- :type diagram_2: vector[pair[double, double]]
- :param e: If `e` is 0, this uses an expensive algorithm to compute the
- exact distance.
- If `e` is not 0, it asks for an additive `e`-approximation, and
- currently also allows a small multiplicative error (the last 2 or 3
- bits of the mantissa may be wrong). This version of the algorithm takes
- advantage of the limited precision of `double` and is usually a lot
- faster to compute, whatever the value of `e`.
-
- Thus, by default, `e` is the smallest positive double.
- :type e: float
- :rtype: float
- :returns: the bottleneck distance.
- """
- if e is None:
- # Default value is the smallest double value (not 0, 0 is for exact version)
- return bottleneck(diagram_1, diagram_2)
- else:
- # Can be 0 for exact version
- return bottleneck(diagram_1, diagram_2, e)
diff --git a/src/python/gudhi/hera.cc b/src/python/gudhi/hera.cc
index 0d562b4c..ea80a9a8 100644
--- a/src/python/gudhi/hera.cc
+++ b/src/python/gudhi/hera.cc
@@ -8,35 +8,20 @@
* - YYYY/MM Author: Description of the modification
*/
-#include <pybind11/pybind11.h>
-#include <pybind11/numpy.h>
-
-#include <boost/range/iterator_range.hpp>
-
#include <wasserstein.h> // Hera
-#include <array>
-
-namespace py = pybind11;
-typedef py::array_t<double, py::array::c_style | py::array::forcecast> Dgm;
+#include <pybind11_diagram_utils.h>
double wasserstein_distance(
Dgm d1, Dgm d2,
double wasserstein_power, double internal_p,
double delta)
{
- py::buffer_info buf1 = d1.request();
- py::buffer_info buf2 = d2.request();
- // shape (n,2) or (0) for empty
- if((buf1.ndim!=2 || buf1.shape[1]!=2) && (buf1.ndim!=1 || buf1.shape[0]!=0))
- throw std::runtime_error("Diagram 1 must be an array of size n x 2");
- if((buf2.ndim!=2 || buf2.shape[1]!=2) && (buf2.ndim!=1 || buf2.shape[0]!=0))
- throw std::runtime_error("Diagram 2 must be an array of size n x 2");
- typedef std::array<double, 2> Point;
- auto p1 = (Point*)buf1.ptr;
- auto p2 = (Point*)buf2.ptr;
- auto diag1 = boost::make_iterator_range(p1, p1+buf1.shape[0]);
- auto diag2 = boost::make_iterator_range(p2, p2+buf2.shape[0]);
+ // I *think* the call to request() has to be before releasing the GIL.
+ auto diag1 = numpy_to_range_of_pairs(d1);
+ auto diag2 = numpy_to_range_of_pairs(d2);
+
+ py::gil_scoped_release release;
hera::AuctionParams<double> params;
params.wasserstein_power = wasserstein_power;
diff --git a/src/python/gudhi/point_cloud/knn.py b/src/python/gudhi/point_cloud/knn.py
index 07553d6d..34e80b5d 100644
--- a/src/python/gudhi/point_cloud/knn.py
+++ b/src/python/gudhi/point_cloud/knn.py
@@ -200,8 +200,8 @@ class KNearestNeighbors:
from joblib import Parallel, delayed, effective_n_jobs
from sklearn.utils import gen_even_slices
- slices = gen_even_slices(len(X), effective_n_jobs(-1))
- parallel = Parallel(backend="threading", n_jobs=-1)
+ slices = gen_even_slices(len(X), effective_n_jobs(n_jobs))
+ parallel = Parallel(prefer="threads", n_jobs=n_jobs)
if self.params.get("sort_results", True):
def func(M):
@@ -242,8 +242,8 @@ class KNearestNeighbors:
else:
func = lambda M: numpy.partition(M, k - 1)[:, 0:k]
- slices = gen_even_slices(len(X), effective_n_jobs(-1))
- parallel = Parallel(backend="threading", n_jobs=-1)
+ slices = gen_even_slices(len(X), effective_n_jobs(n_jobs))
+ parallel = Parallel(prefer="threads", n_jobs=n_jobs)
distances = numpy.concatenate(parallel(delayed(func)(X[s]) for s in slices))
return distances
return None
diff --git a/src/python/gudhi/representations/kernel_methods.py b/src/python/gudhi/representations/kernel_methods.py
index bfc83aff..596f4f07 100644
--- a/src/python/gudhi/representations/kernel_methods.py
+++ b/src/python/gudhi/representations/kernel_methods.py
@@ -9,13 +9,83 @@
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
-from sklearn.metrics import pairwise_distances
-from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance
+from sklearn.metrics import pairwise_distances, pairwise_kernels
+from .metrics import SlicedWassersteinDistance, PersistenceFisherDistance, _sklearn_wrapper, pairwise_persistence_diagram_distances, _sliced_wasserstein_distance, _persistence_fisher_distance
+from .preprocessing import Padding
#############################################
# Kernel methods ############################
#############################################
+def _persistence_weighted_gaussian_kernel(D1, D2, weight=lambda x: 1, kernel_approx=None, bandwidth=1.):
+ """
+ This is a function for computing the persistence weighted Gaussian kernel value from two persistence diagrams. The persistence weighted Gaussian kernel is computed by convolving the persistence diagram points with weighted Gaussian kernels. See http://proceedings.mlr.press/v48/kusano16.html for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved
+ weight: weight function for the persistence diagram points (default constant function, ie lambda x: 1). This function must be defined on 2D points, ie lists or numpy arrays of the form [p_x,p_y].
+ kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+
+ Returns:
+ float: the persistence weighted Gaussian kernel value between persistence diagrams.
+ """
+ ws1 = np.array([weight(D1[j,:]) for j in range(len(D1))])
+ ws2 = np.array([weight(D2[j,:]) for j in range(len(D2))])
+ if kernel_approx is not None:
+ approx1 = np.sum(np.multiply(ws1[:,np.newaxis], kernel_approx.transform(D1)), axis=0)
+ approx2 = np.sum(np.multiply(ws2[:,np.newaxis], kernel_approx.transform(D2)), axis=0)
+ return (1./(np.sqrt(2*np.pi)*bandwidth)) * np.matmul(approx1, approx2.T)
+ else:
+ W = np.matmul(ws1[:,np.newaxis], ws2[np.newaxis,:])
+ E = (1./(np.sqrt(2*np.pi)*bandwidth)) * np.exp(-np.square(pairwise_distances(D1,D2))/(2*bandwidth*bandwidth))
+ return np.sum(np.multiply(W, E))
+
+def _persistence_scale_space_kernel(D1, D2, kernel_approx=None, bandwidth=1.):
+ """
+ This is a function for computing the persistence scale space kernel value from two persistence diagrams. The persistence scale space kernel is computed by adding the symmetric to the diagonal of each point in each persistence diagram, with negative weight, and then convolving the points with a Gaussian kernel. See https://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Reininghaus_A_Stable_Multi-Scale_2015_CVPR_paper.pdf for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved
+ kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+
+ Returns:
+ float: the persistence scale space kernel value between persistence diagrams.
+ """
+ DD1 = np.concatenate([D1, D1[:,[1,0]]], axis=0)
+ DD2 = np.concatenate([D2, D2[:,[1,0]]], axis=0)
+ weight_pss = lambda x: 1 if x[1] >= x[0] else -1
+ return 0.5 * _persistence_weighted_gaussian_kernel(DD1, DD2, weight=weight_pss, kernel_approx=kernel_approx, bandwidth=bandwidth)
+
+def pairwise_persistence_diagram_kernels(X, Y=None, kernel="sliced_wasserstein", **kwargs):
+ """
+ This function computes the kernel matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2).
+
+ Parameters:
+ X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams.
+ Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise kernel values are computed from the first list only.
+ kernel: kernel to use. It can be either a string ("sliced_wasserstein", "persistence_scale_space", "persistence_weighted_gaussian", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. If it is a function, make sure that it is symmetric.
+ **kwargs: optional keyword parameters. Any further parameters are passed directly to the kernel function. See the docs of the various kernel classes in this module.
+
+ Returns:
+ numpy array of shape (nxm): kernel matrix.
+ """
+ XX = np.reshape(np.arange(len(X)), [-1,1])
+ YY = None if Y is None else np.reshape(np.arange(len(Y)), [-1,1])
+ if kernel == "sliced_wasserstein":
+ return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="sliced_wasserstein", num_directions=kwargs["num_directions"]) / kwargs["bandwidth"])
+ elif kernel == "persistence_fisher":
+ return np.exp(-pairwise_persistence_diagram_distances(X, Y, metric="persistence_fisher", kernel_approx=kwargs["kernel_approx"], bandwidth=kwargs["bandwidth"]) / kwargs["bandwidth_fisher"])
+ elif kernel == "persistence_scale_space":
+ return pairwise_kernels(XX, YY, metric=_sklearn_wrapper(_persistence_scale_space_kernel, X, Y, **kwargs))
+ elif kernel == "persistence_weighted_gaussian":
+ return pairwise_kernels(XX, YY, metric=_sklearn_wrapper(_persistence_weighted_gaussian_kernel, X, Y, **kwargs))
+ else:
+ return pairwise_kernels(XX, YY, metric=_sklearn_wrapper(metric, **kwargs))
+
class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
"""
This is a class for computing the sliced Wasserstein kernel matrix from a list of persistence diagrams. The sliced Wasserstein kernel is computed by exponentiating the corresponding sliced Wasserstein distance with a Gaussian kernel. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
@@ -29,7 +99,7 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the kernel computation (default 10).
"""
self.bandwidth = bandwidth
- self.sw_ = SlicedWassersteinDistance(num_directions=num_directions)
+ self.num_directions = num_directions
def fit(self, X, y=None):
"""
@@ -39,7 +109,7 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.sw_.fit(X, y)
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -52,7 +122,20 @@ class SlicedWassersteinKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein kernel values.
"""
- return np.exp(-self.sw_.transform(X)/self.bandwidth)
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="sliced_wasserstein", bandwidth=self.bandwidth, num_directions=self.num_directions)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply SlicedWassersteinKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: sliced Wasserstein kernel value.
+ """
+ return np.exp(-_sliced_wasserstein_distance(diag1, diag2, num_directions=self.num_directions)) / self.bandwidth
class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
"""
@@ -78,10 +161,7 @@ class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.diagrams_ = list(X)
- self.ws_ = [ np.array([self.weight(self.diagrams_[i][j,:]) for j in range(self.diagrams_[i].shape[0])]) for i in range(len(self.diagrams_)) ]
- if self.kernel_approx is not None:
- self.approx_ = np.concatenate([np.sum(np.multiply(self.ws_[i][:,np.newaxis], self.kernel_approx.transform(self.diagrams_[i])), axis=0)[np.newaxis,:] for i in range(len(self.diagrams_))])
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -94,31 +174,20 @@ class PersistenceWeightedGaussianKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence weighted Gaussian kernel values.
"""
- Xp = list(X)
- Xfit = np.zeros((len(Xp), len(self.diagrams_)))
- if len(self.diagrams_) == len(Xp) and np.all([np.array_equal(self.diagrams_[i], Xp[i]) for i in range(len(Xp))]):
- if self.kernel_approx is not None:
- Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(self.approx_, self.approx_.T)
- else:
- for i in range(len(self.diagrams_)):
- for j in range(i+1, len(self.diagrams_)):
- W = np.matmul(self.ws_[i][:,np.newaxis], self.ws_[j][np.newaxis,:])
- E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(self.diagrams_[i], self.diagrams_[j]))/(2*np.square(self.bandwidth)))
- Xfit[i,j] = np.sum(np.multiply(W, E))
- Xfit[j,i] = Xfit[i,j]
- else:
- ws = [ np.array([self.weight(Xp[i][j,:]) for j in range(Xp[i].shape[0])]) for i in range(len(Xp)) ]
- if self.kernel_approx is not None:
- approx = np.concatenate([np.sum(np.multiply(ws[i][:,np.newaxis], self.kernel_approx.transform(Xp[i])), axis=0)[np.newaxis,:] for i in range(len(Xp))])
- Xfit = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.matmul(approx, self.approx_.T)
- else:
- for i in range(len(Xp)):
- for j in range(len(self.diagrams_)):
- W = np.matmul(ws[i][:,np.newaxis], self.ws_[j][np.newaxis,:])
- E = (1./(np.sqrt(2*np.pi)*self.bandwidth)) * np.exp(-np.square(pairwise_distances(Xp[i], self.diagrams_[j]))/(2*np.square(self.bandwidth)))
- Xfit[i,j] = np.sum(np.multiply(W, E))
-
- return Xfit
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_weighted_gaussian", bandwidth=self.bandwidth, weight=self.weight, kernel_approx=self.kernel_approx)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceWeightedGaussianKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence weighted Gaussian kernel value.
+ """
+ return _persistence_weighted_gaussian_kernel(diag1, diag2, weight=self.weight, kernel_approx=self.kernel_approx, bandwidth=self.bandwidth)
class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
"""
@@ -132,7 +201,7 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
bandwidth (double): bandwidth of the Gaussian kernel with which persistence diagrams will be convolved (default 1.)
kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
"""
- self.pwg_ = PersistenceWeightedGaussianKernel(bandwidth=bandwidth, weight=lambda x: 1 if x[1] >= x[0] else -1, kernel_approx=kernel_approx)
+ self.bandwidth, self.kernel_approx = bandwidth, kernel_approx
def fit(self, X, y=None):
"""
@@ -142,11 +211,7 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.diagrams_ = list(X)
- for i in range(len(self.diagrams_)):
- op_D = self.diagrams_[i][:,[1,0]]
- self.diagrams_[i] = np.concatenate([self.diagrams_[i], op_D], axis=0)
- self.pwg_.fit(X)
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -159,11 +224,20 @@ class PersistenceScaleSpaceKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence scale space kernel values.
"""
- Xp = list(X)
- for i in range(len(Xp)):
- op_X = Xp[i][:,[1,0]]
- Xp[i] = np.concatenate([Xp[i], op_X], axis=0)
- return self.pwg_.transform(Xp)
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_scale_space", bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceScaleSpaceKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence scale space kernel value.
+ """
+ return _persistence_scale_space_kernel(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
"""
@@ -179,7 +253,7 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
kernel_approx (class): kernel approximation class used to speed up computation (default None). Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
"""
self.bandwidth = bandwidth
- self.pf_ = PersistenceFisherDistance(bandwidth=bandwidth_fisher, kernel_approx=kernel_approx)
+ self.bandwidth_fisher, self.kernel_approx = bandwidth_fisher, kernel_approx
def fit(self, X, y=None):
"""
@@ -189,7 +263,7 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
X (list of n x 2 numpy arrays): input persistence diagrams.
y (n x 1 array): persistence diagram labels (unused).
"""
- self.pf_.fit(X, y)
+ self.diagrams_ = X
return self
def transform(self, X):
@@ -202,5 +276,18 @@ class PersistenceFisherKernel(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher kernel values.
"""
- return np.exp(-self.pf_.transform(X)/self.bandwidth)
+ return pairwise_persistence_diagram_kernels(X, self.diagrams_, kernel="persistence_fisher", bandwidth=self.bandwidth, bandwidth_fisher=self.bandwidth_fisher, kernel_approx=self.kernel_approx)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceFisherKernel on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence Fisher kernel value.
+ """
+ return np.exp(-_persistence_fisher_distance(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)) / self.bandwidth_fisher
diff --git a/src/python/gudhi/representations/metrics.py b/src/python/gudhi/representations/metrics.py
index 5f9ec6ab..ce416fb1 100644
--- a/src/python/gudhi/representations/metrics.py
+++ b/src/python/gudhi/representations/metrics.py
@@ -10,17 +10,168 @@
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import pairwise_distances
-try:
- from .. import bottleneck_distance
- USE_GUDHI = True
-except ImportError:
- USE_GUDHI = False
- print("Gudhi built without CGAL: BottleneckDistance will return a null matrix")
+from gudhi.hera import wasserstein_distance as hera_wasserstein_distance
+from .preprocessing import Padding
#############################################
# Metrics ###################################
#############################################
+def _sliced_wasserstein_distance(D1, D2, num_directions):
+ """
+ This is a function for computing the sliced Wasserstein distance from two persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation.
+
+ Returns:
+ float: the sliced Wasserstein distance between persistence diagrams.
+ """
+ thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1]
+ lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
+ approx1 = np.matmul(D1, lines)
+ approx_diag1 = np.matmul(np.broadcast_to(D1.sum(-1,keepdims=True)/2,(len(D1),2)), lines)
+ approx2 = np.matmul(D2, lines)
+ approx_diag2 = np.matmul(np.broadcast_to(D2.sum(-1,keepdims=True)/2,(len(D2),2)), lines)
+ A = np.sort(np.concatenate([approx1, approx_diag2], axis=0), axis=0)
+ B = np.sort(np.concatenate([approx2, approx_diag1], axis=0), axis=0)
+ L1 = np.sum(np.abs(A-B), axis=0)
+ return np.mean(L1)
+
+def _compute_persistence_diagram_projections(X, num_directions):
+ """
+ This is a function for projecting the points of a list of persistence diagrams (as well as their diagonal projections) onto a fixed number of lines sampled uniformly on [-pi/2, pi/2]. This function can be used as a preprocessing step in order to speed up the running time for computing all pairwise sliced Wasserstein distances / kernel values on a list of persistence diagrams.
+
+ Parameters:
+ X (list of n numpy arrays of shape (numx2)): list of persistence diagrams.
+ num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation.
+
+ Returns:
+ list of n numpy arrays of shape (2*numx2): list of projected persistence diagrams.
+ """
+ thetas = np.linspace(-np.pi/2, np.pi/2, num=num_directions+1)[np.newaxis,:-1]
+ lines = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
+ XX = [np.vstack([np.matmul(D, lines), np.matmul(np.matmul(D, .5 * np.ones((2,2))), lines)]) for D in X]
+ return XX
+
+def _sliced_wasserstein_distance_on_projections(D1, D2):
+ """
+ This is a function for computing the sliced Wasserstein distance between two persistence diagrams that have already been projected onto some lines. It simply amounts to comparing the sorted projections with the 1-norm, and averaging over the lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
+
+ Parameters:
+ D1: (2n x number_of_lines) numpy.array containing the n projected points of the first diagram, and the n projections of their diagonal projections.
+ D2: (2m x number_of_lines) numpy.array containing the m projected points of the second diagram, and the m projections of their diagonal projections.
+
+ Returns:
+ float: the sliced Wasserstein distance between the projected persistence diagrams.
+ """
+ lim1, lim2 = int(len(D1)/2), int(len(D2)/2)
+ approx1, approx_diag1, approx2, approx_diag2 = D1[:lim1], D1[lim1:], D2[:lim2], D2[lim2:]
+ A = np.sort(np.concatenate([approx1, approx_diag2], axis=0), axis=0)
+ B = np.sort(np.concatenate([approx2, approx_diag1], axis=0), axis=0)
+ L1 = np.sum(np.abs(A-B), axis=0)
+ return np.mean(L1)
+
+def _persistence_fisher_distance(D1, D2, kernel_approx=None, bandwidth=1.):
+ """
+ This is a function for computing the persistence Fisher distance from two persistence diagrams. The persistence Fisher distance is obtained by computing the original Fisher distance between the probability distributions associated to the persistence diagrams given by convolving them with a Gaussian kernel. See http://papers.nips.cc/paper/8205-persistence-fisher-kernel-a-riemannian-manifold-kernel-for-persistence-diagrams for more details.
+
+ Parameters:
+ D1: (n x 2) numpy.array encoding the (finite points of the) first diagram). Must not contain essential points (i.e. with infinite coordinate).
+ D2: (m x 2) numpy.array encoding the second diagram.
+ bandwidth (float): bandwidth of the Gaussian kernel used to turn persistence diagrams into probability distributions.
+ kernel_approx: kernel approximation class used to speed up computation. Common kernel approximations classes can be found in the scikit-learn library (such as RBFSampler for instance).
+
+ Returns:
+ float: the persistence Fisher distance between persistence diagrams.
+ """
+ projection = (1./2) * np.ones((2,2))
+ diagonal_projections1 = np.matmul(D1, projection)
+ diagonal_projections2 = np.matmul(D2, projection)
+ if kernel_approx is not None:
+ approx1 = kernel_approx.transform(D1)
+ approx_diagonal1 = kernel_approx.transform(diagonal_projections1)
+ approx2 = kernel_approx.transform(D2)
+ approx_diagonal2 = kernel_approx.transform(diagonal_projections2)
+ Z = np.concatenate([approx1, approx_diagonal1, approx2, approx_diagonal2], axis=0)
+ U, V = np.sum(np.concatenate([approx1, approx_diagonal2], axis=0), axis=0), np.sum(np.concatenate([approx2, approx_diagonal1], axis=0), axis=0)
+ vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ return np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ else:
+ Z = np.concatenate([D1, diagonal_projections1, D2, diagonal_projections2], axis=0)
+ U, V = np.concatenate([D1, diagonal_projections2], axis=0), np.concatenate([D2, diagonal_projections1], axis=0)
+ vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(bandwidth)))/(bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(bandwidth)))/(bandwidth * np.sqrt(2*np.pi)), axis=1)
+ vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
+ if vectori_sum != 0:
+ vectori = vectori/vectori_sum
+ if vectorj_sum != 0:
+ vectorj = vectorj/vectorj_sum
+ return np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+
+def _sklearn_wrapper(metric, X, Y, **kwargs):
+ """
+ This function is a wrapper for any metric between two persistence diagrams that takes two numpy arrays of shapes (nx2) and (mx2) as arguments.
+ """
+ if Y is None:
+ def flat_metric(a, b):
+ return metric(X[int(a[0])], X[int(b[0])], **kwargs)
+ else:
+ def flat_metric(a, b):
+ return metric(X[int(a[0])], Y[int(b[0])], **kwargs)
+ return flat_metric
+
+PAIRWISE_DISTANCE_FUNCTIONS = {
+ "wasserstein": hera_wasserstein_distance,
+ "hera_wasserstein": hera_wasserstein_distance,
+ "persistence_fisher": _persistence_fisher_distance,
+}
+
+def pairwise_persistence_diagram_distances(X, Y=None, metric="bottleneck", **kwargs):
+ """
+ This function computes the distance matrix between two lists of persistence diagrams given as numpy arrays of shape (nx2).
+
+ Parameters:
+ X (list of n numpy arrays of shape (numx2)): first list of persistence diagrams.
+ Y (list of m numpy arrays of shape (numx2)): second list of persistence diagrams (optional). If None, pairwise distances are computed from the first list only.
+ metric: distance to use. It can be either a string ("sliced_wasserstein", "wasserstein", "hera_wasserstein" (Wasserstein distance computed with Hera---note that Hera is also used for the default option "wasserstein"), "pot_wasserstein" (Wasserstein distance computed with POT), "bottleneck", "persistence_fisher") or a function taking two numpy arrays of shape (nx2) and (mx2) as inputs. If it is a function, make sure that it is symmetric and that it outputs 0 if called on the same two arrays.
+ **kwargs: optional keyword parameters. Any further parameters are passed directly to the distance function. See the docs of the various distance classes in this module.
+
+ Returns:
+ numpy array of shape (nxm): distance matrix
+ """
+ XX = np.reshape(np.arange(len(X)), [-1,1])
+ YY = None if Y is None else np.reshape(np.arange(len(Y)), [-1,1])
+ if metric == "bottleneck":
+ try:
+ from .. import bottleneck_distance
+ return pairwise_distances(XX, YY, metric=_sklearn_wrapper(bottleneck_distance, X, Y, **kwargs))
+ except ImportError:
+ print("Gudhi built without CGAL")
+ raise
+ elif metric == "pot_wasserstein":
+ try:
+ from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance
+ return pairwise_distances(XX, YY, metric=_sklearn_wrapper(pot_wasserstein_distance, X, Y, **kwargs))
+ except ImportError:
+ print("POT (Python Optimal Transport) is not installed. Please install POT or use metric='wasserstein' or metric='hera_wasserstein'")
+ raise
+ elif metric == "sliced_wasserstein":
+ Xproj = _compute_persistence_diagram_projections(X, **kwargs)
+ Yproj = None if Y is None else _compute_persistence_diagram_projections(Y, **kwargs)
+ return pairwise_distances(XX, YY, metric=_sklearn_wrapper(_sliced_wasserstein_distance_on_projections, Xproj, Yproj))
+ elif type(metric) == str:
+ return pairwise_distances(XX, YY, metric=_sklearn_wrapper(PAIRWISE_DISTANCE_FUNCTIONS[metric], X, Y, **kwargs))
+ else:
+ return pairwise_distances(XX, YY, metric=_sklearn_wrapper(metric, X, Y, **kwargs))
+
class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
"""
This is a class for computing the sliced Wasserstein distance matrix from a list of persistence diagrams. The Sliced Wasserstein distance is computed by projecting the persistence diagrams onto lines, comparing the projections with the 1-norm, and finally integrating over all possible lines. See http://proceedings.mlr.press/v70/carriere17a.html for more details.
@@ -33,8 +184,6 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
num_directions (int): number of lines evenly sampled from [-pi/2,pi/2] in order to approximate and speed up the distance computation (default 10).
"""
self.num_directions = num_directions
- thetas = np.linspace(-np.pi/2, np.pi/2, num=self.num_directions+1)[np.newaxis,:-1]
- self.lines_ = np.concatenate([np.cos(thetas), np.sin(thetas)], axis=0)
def fit(self, X, y=None):
"""
@@ -45,9 +194,6 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
y (n x 1 array): persistence diagram labels (unused).
"""
self.diagrams_ = X
- self.approx_ = [np.matmul(X[i], self.lines_) for i in range(len(X))]
- diag_proj = (1./2) * np.ones((2,2))
- self.approx_diag_ = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))]
return self
def transform(self, X):
@@ -60,27 +206,20 @@ class SlicedWassersteinDistance(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise sliced Wasserstein distances.
"""
- Xfit = np.zeros((len(X), len(self.approx_)))
- if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
- for i in range(len(self.approx_)):
- for j in range(i+1, len(self.approx_)):
- A = np.sort(np.concatenate([self.approx_[i], self.approx_diag_[j]], axis=0), axis=0)
- B = np.sort(np.concatenate([self.approx_[j], self.approx_diag_[i]], axis=0), axis=0)
- L1 = np.sum(np.abs(A-B), axis=0)
- Xfit[i,j] = np.mean(L1)
- Xfit[j,i] = Xfit[i,j]
- else:
- diag_proj = (1./2) * np.ones((2,2))
- approx = [np.matmul(X[i], self.lines_) for i in range(len(X))]
- approx_diag = [np.matmul(np.matmul(X[i], diag_proj), self.lines_) for i in range(len(X))]
- for i in range(len(approx)):
- for j in range(len(self.approx_)):
- A = np.sort(np.concatenate([approx[i], self.approx_diag_[j]], axis=0), axis=0)
- B = np.sort(np.concatenate([self.approx_[j], approx_diag[i]], axis=0), axis=0)
- L1 = np.sum(np.abs(A-B), axis=0)
- Xfit[i,j] = np.mean(L1)
+ return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="sliced_wasserstein", num_directions=self.num_directions)
- return Xfit
+ def __call__(self, diag1, diag2):
+ """
+ Apply SlicedWassersteinDistance on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: sliced Wasserstein distance.
+ """
+ return _sliced_wasserstein_distance(diag1, diag2, num_directions=self.num_directions)
class BottleneckDistance(BaseEstimator, TransformerMixin):
"""
@@ -116,34 +255,26 @@ class BottleneckDistance(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise bottleneck distances.
"""
- num_diag1 = len(X)
-
- #if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
- if X is self.diagrams_:
- matrix = np.zeros((num_diag1, num_diag1))
-
- if USE_GUDHI:
- for i in range(num_diag1):
- for j in range(i+1, num_diag1):
- matrix[i,j] = bottleneck_distance(X[i], X[j], self.epsilon)
- matrix[j,i] = matrix[i,j]
- else:
- print("Gudhi built without CGAL: returning a null matrix")
-
- else:
- num_diag2 = len(self.diagrams_)
- matrix = np.zeros((num_diag1, num_diag2))
+ Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric="bottleneck", e=self.epsilon)
+ return Xfit
- if USE_GUDHI:
- for i in range(num_diag1):
- for j in range(num_diag2):
- matrix[i,j] = bottleneck_distance(X[i], self.diagrams_[j], self.epsilon)
- else:
- print("Gudhi built without CGAL: returning a null matrix")
+ def __call__(self, diag1, diag2):
+ """
+ Apply BottleneckDistance on a single pair of persistence diagrams and outputs the result.
- Xfit = matrix
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
- return Xfit
+ Returns:
+ float: bottleneck distance.
+ """
+ try:
+ from .. import bottleneck_distance
+ return bottleneck_distance(diag1, diag2, e=self.epsilon)
+ except ImportError:
+ print("Gudhi built without CGAL")
+ raise
class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
"""
@@ -168,11 +299,6 @@ class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
y (n x 1 array): persistence diagram labels (unused).
"""
self.diagrams_ = X
- projection = (1./2) * np.ones((2,2))
- self.diagonal_projections_ = [np.matmul(X[i], projection) for i in range(len(X))]
- if self.kernel_approx is not None:
- self.approx_ = [self.kernel_approx.transform(X[i]) for i in range(len(X))]
- self.approx_diagonal_ = [self.kernel_approx.transform(self.diagonal_projections_[i]) for i in range(len(X))]
return self
def transform(self, X):
@@ -185,60 +311,83 @@ class PersistenceFisherDistance(BaseEstimator, TransformerMixin):
Returns:
numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise persistence Fisher distances.
"""
- Xfit = np.zeros((len(X), len(self.diagrams_)))
- if len(self.diagrams_) == len(X) and np.all([np.array_equal(self.diagrams_[i], X[i]) for i in range(len(X))]):
- for i in range(len(self.diagrams_)):
- for j in range(i+1, len(self.diagrams_)):
- if self.kernel_approx is not None:
- Z = np.concatenate([self.approx_[i], self.approx_diagonal_[i], self.approx_[j], self.approx_diagonal_[j]], axis=0)
- U, V = np.sum(np.concatenate([self.approx_[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], self.approx_diagonal_[i]], axis=0), axis=0)
- vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
- Xfit[j,i] = Xfit[i,j]
- else:
- Z = np.concatenate([self.diagrams_[i], self.diagonal_projections_[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0)
- U, V = np.concatenate([self.diagrams_[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], self.diagonal_projections_[i]], axis=0)
- vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
- Xfit[j,i] = Xfit[i,j]
+ return pairwise_persistence_diagram_distances(X, self.diagrams_, metric="persistence_fisher", bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply PersistenceFisherDistance on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: persistence Fisher distance.
+ """
+ return _persistence_fisher_distance(diag1, diag2, bandwidth=self.bandwidth, kernel_approx=self.kernel_approx)
+
+class WassersteinDistance(BaseEstimator, TransformerMixin):
+ """
+ This is a class for computing the Wasserstein distance matrix from a list of persistence diagrams.
+ """
+ def __init__(self, order=2, internal_p=2, mode="pot", delta=0.01):
+ """
+ Constructor for the WassersteinDistance class.
+
+ Parameters:
+ order (int): exponent for Wasserstein, default value is 2., see :func:`gudhi.wasserstein.wasserstein_distance`.
+ internal_p (int): ground metric on the (upper-half) plane (i.e. norm l_p in R^2), default value is 2 (euclidean norm), see :func:`gudhi.wasserstein.wasserstein_distance`.
+ mode (str): method for computing Wasserstein distance. Either "pot" or "hera".
+ delta (float): relative error 1+delta. Used only if mode == "hera".
+ """
+ self.order, self.internal_p, self.mode = order, internal_p, mode
+ self.metric = "pot_wasserstein" if mode == "pot" else "hera_wasserstein"
+ self.delta = delta
+
+ def fit(self, X, y=None):
+ """
+ Fit the WassersteinDistance class on a list of persistence diagrams: persistence diagrams are stored in a numpy array called **diagrams**.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+ y (n x 1 array): persistence diagram labels (unused).
+ """
+ self.diagrams_ = X
+ return self
+
+ def transform(self, X):
+ """
+ Compute all Wasserstein distances between the persistence diagrams that were stored after calling the fit() method, and a given list of (possibly different) persistence diagrams.
+
+ Parameters:
+ X (list of n x 2 numpy arrays): input persistence diagrams.
+
+ Returns:
+ numpy array of shape (number of diagrams in **diagrams**) x (number of diagrams in X): matrix of pairwise Wasserstein distances.
+ """
+ if self.metric == "hera_wasserstein":
+ Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric=self.metric, order=self.order, internal_p=self.internal_p, delta=self.delta)
else:
- projection = (1./2) * np.ones((2,2))
- diagonal_projections = [np.matmul(X[i], projection) for i in range(len(X))]
- if self.kernel_approx is not None:
- approx = [self.kernel_approx.transform(X[i]) for i in range(len(X))]
- approx_diagonal = [self.kernel_approx.transform(diagonal_projections[i]) for i in range(len(X))]
- for i in range(len(X)):
- for j in range(len(self.diagrams_)):
- if self.kernel_approx is not None:
- Z = np.concatenate([approx[i], approx_diagonal[i], self.approx_[j], self.approx_diagonal_[j]], axis=0)
- U, V = np.sum(np.concatenate([approx[i], self.approx_diagonal_[j]], axis=0), axis=0), np.sum(np.concatenate([self.approx_[j], approx_diagonal[i]], axis=0), axis=0)
- vectori, vectorj = np.abs(np.matmul(Z, U.T)), np.abs(np.matmul(Z, V.T))
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
- else:
- Z = np.concatenate([X[i], diagonal_projections[i], self.diagrams_[j], self.diagonal_projections_[j]], axis=0)
- U, V = np.concatenate([X[i], self.diagonal_projections_[j]], axis=0), np.concatenate([self.diagrams_[j], diagonal_projections[i]], axis=0)
- vectori = np.sum(np.exp(-np.square(pairwise_distances(Z,U))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectorj = np.sum(np.exp(-np.square(pairwise_distances(Z,V))/(2 * np.square(self.bandwidth)))/(self.bandwidth * np.sqrt(2*np.pi)), axis=1)
- vectori_sum, vectorj_sum = np.sum(vectori), np.sum(vectorj)
- if vectori_sum != 0:
- vectori = vectori/vectori_sum
- if vectorj_sum != 0:
- vectorj = vectorj/vectorj_sum
- Xfit[i,j] = np.arccos( min(np.dot(np.sqrt(vectori), np.sqrt(vectorj)), 1.) )
+ Xfit = pairwise_persistence_diagram_distances(X, self.diagrams_, metric=self.metric, order=self.order, internal_p=self.internal_p, matching=False)
return Xfit
+
+ def __call__(self, diag1, diag2):
+ """
+ Apply WassersteinDistance on a single pair of persistence diagrams and outputs the result.
+
+ Parameters:
+ diag1 (n x 2 numpy array): first input persistence diagram.
+ diag2 (n x 2 numpy array): second input persistence diagram.
+
+ Returns:
+ float: Wasserstein distance.
+ """
+ if self.metric == "hera_wasserstein":
+ return hera_wasserstein_distance(diag1, diag2, order=self.order, internal_p=self.internal_p, delta=self.delta)
+ else:
+ try:
+ from gudhi.wasserstein import wasserstein_distance as pot_wasserstein_distance
+ return pot_wasserstein_distance(diag1, diag2, order=self.order, internal_p=self.internal_p, matching=False)
+ except ImportError:
+ print("POT (Python Optimal Transport) is not installed. Please install POT or use metric='wasserstein' or metric='hera_wasserstein'")
+ raise
diff --git a/src/python/gudhi/representations/preprocessing.py b/src/python/gudhi/representations/preprocessing.py
index a39b00e4..a8545349 100644
--- a/src/python/gudhi/representations/preprocessing.py
+++ b/src/python/gudhi/representations/preprocessing.py
@@ -54,6 +54,18 @@ class BirthPersistenceTransform(BaseEstimator, TransformerMixin):
Xfit.append(new_diag)
return Xfit
+ def __call__(self, diag):
+ """
+ Apply BirthPersistenceTransform on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: transformed persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class Clamping(BaseEstimator, TransformerMixin):
"""
This is a class for clamping values. It can be used as a parameter for the DiagramScaler class, for instance if you want to clamp abscissae or ordinates of persistence diagrams.
@@ -142,6 +154,18 @@ class DiagramScaler(BaseEstimator, TransformerMixin):
Xfit[i][:,I] = np.squeeze(scaler.transform(np.reshape(Xfit[i][:,I], [-1,1])))
return Xfit
+ def __call__(self, diag):
+ """
+ Apply DiagramScaler on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: transformed persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class Padding(BaseEstimator, TransformerMixin):
"""
This is a class for padding a list of persistence diagrams with dummy points, so that all persistence diagrams end up with the same number of points.
@@ -186,6 +210,18 @@ class Padding(BaseEstimator, TransformerMixin):
Xfit = X
return Xfit
+ def __call__(self, diag):
+ """
+ Apply Padding on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: padded persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class ProminentPoints(BaseEstimator, TransformerMixin):
"""
This is a class for removing points that are close or far from the diagonal in persistence diagrams. If persistence diagrams are n x 2 numpy arrays (i.e. persistence diagrams with ordinary features), points are ordered and thresholded by distance-to-diagonal. If persistence diagrams are n x 1 numpy arrays (i.e. persistence diagrams with essential features), points are not ordered and thresholded by first coordinate.
@@ -259,6 +295,18 @@ class ProminentPoints(BaseEstimator, TransformerMixin):
Xfit = X
return Xfit
+ def __call__(self, diag):
+ """
+ Apply ProminentPoints on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: thresholded persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
+
class DiagramSelector(BaseEstimator, TransformerMixin):
"""
This is a class for extracting finite or essential points in persistence diagrams.
@@ -303,3 +351,15 @@ class DiagramSelector(BaseEstimator, TransformerMixin):
else:
Xfit = X
return Xfit
+
+ def __call__(self, diag):
+ """
+ Apply DiagramSelector on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ n x 2 numpy array: extracted persistence diagram.
+ """
+ return self.fit_transform([diag])[0]
diff --git a/src/python/gudhi/representations/vector_methods.py b/src/python/gudhi/representations/vector_methods.py
index fe26dbe2..46fee086 100644
--- a/src/python/gudhi/representations/vector_methods.py
+++ b/src/python/gudhi/representations/vector_methods.py
@@ -81,6 +81,18 @@ class PersistenceImage(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply PersistenceImage on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (number of pixels = **resolution[0]** x **resolution[1]**):: output persistence image.
+ """
+ return self.fit_transform([diag])[0,:]
+
class Landscape(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence landscapes from a list of persistence diagrams. A persistence landscape is a collection of 1D piecewise-linear functions computed from the rank function associated to the persistence diagram. These piecewise-linear functions are then sampled evenly on a given range and the corresponding vectors of samples are concatenated and returned. See http://jmlr.org/papers/v16/bubenik15a.html for more details.
@@ -170,6 +182,18 @@ class Landscape(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply Landscape on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (number of samples = **num_landscapes** x **resolution**): output persistence landscape.
+ """
+ return self.fit_transform([diag])[0,:]
+
class Silhouette(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence silhouettes from a list of persistence diagrams. A persistence silhouette is computed by taking a weighted average of the collection of 1D piecewise-linear functions given by the persistence landscapes, and then by evenly sampling this average on a given range. Finally, the corresponding vector of samples is returned. See https://arxiv.org/abs/1312.0308 for more details.
@@ -248,6 +272,18 @@ class Silhouette(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply Silhouette on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**resolution**): output persistence silhouette.
+ """
+ return self.fit_transform([diag])[0,:]
+
class BettiCurve(BaseEstimator, TransformerMixin):
"""
This is a class for computing Betti curves from a list of persistence diagrams. A Betti curve is a 1D piecewise-constant function obtained from the rank function. It is sampled evenly on a given range and the vector of samples is returned. See https://www.researchgate.net/publication/316604237_Time_Series_Classification_via_Topological_Data_Analysis for more details.
@@ -308,6 +344,18 @@ class BettiCurve(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply BettiCurve on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**resolution**): output Betti curve.
+ """
+ return self.fit_transform([diag])[0,:]
+
class Entropy(BaseEstimator, TransformerMixin):
"""
This is a class for computing persistence entropy. Persistence entropy is a statistic for persistence diagrams inspired from Shannon entropy. This statistic can also be used to compute a feature vector, called the entropy summary function. See https://arxiv.org/pdf/1803.08304.pdf for more details. Note that a previous implementation was contributed by Manuel Soriano-Trigueros.
@@ -378,6 +426,18 @@ class Entropy(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply Entropy on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (1 if **mode** = "scalar" else **resolution**): output entropy.
+ """
+ return self.fit_transform([diag])[0,:]
+
class TopologicalVector(BaseEstimator, TransformerMixin):
"""
This is a class for computing topological vectors from a list of persistence diagrams. The topological vector associated to a persistence diagram is the sorted vector of a slight modification of the pairwise distances between the persistence diagram points. See https://diglib.eg.org/handle/10.1111/cgf12692 for more details.
@@ -431,6 +491,18 @@ class TopologicalVector(BaseEstimator, TransformerMixin):
return Xfit
+ def __call__(self, diag):
+ """
+ Apply TopologicalVector on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**threshold**): output topological vector.
+ """
+ return self.fit_transform([diag])[0,:]
+
class ComplexPolynomial(BaseEstimator, TransformerMixin):
"""
This is a class for computing complex polynomials from a list of persistence diagrams. The persistence diagram points are seen as the roots of some complex polynomial, whose coefficients are returned in a complex vector. See https://link.springer.com/chapter/10.1007%2F978-3-319-23231-7_27 for more details.
@@ -490,3 +562,15 @@ class ComplexPolynomial(BaseEstimator, TransformerMixin):
coeff = np.array(coeff[::-1])[1:]
Xfit[d, :min(thresh, coeff.shape[0])] = coeff[:min(thresh, coeff.shape[0])]
return Xfit
+
+ def __call__(self, diag):
+ """
+ Apply ComplexPolynomial on a single persistence diagram and outputs the result.
+
+ Parameters:
+ diag (n x 2 numpy array): input persistence diagram.
+
+ Returns:
+ numpy array with shape (**threshold**): output complex vector of coefficients.
+ """
+ return self.fit_transform([diag])[0,:]
diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd
index 5dea2449..1d4ed926 100644
--- a/src/python/gudhi/simplex_tree.pxd
+++ b/src/python/gudhi/simplex_tree.pxd
@@ -77,3 +77,5 @@ cdef extern from "Persistent_cohomology_interface.h" namespace "Gudhi":
vector[pair[double,double]] intervals_in_dimension(int dimension)
void write_output_diagram(string diagram_file_name) except +
vector[pair[vector[int], vector[int]]] persistence_pairs()
+ pair[vector[vector[int]], vector[vector[int]]] lower_star_generators()
+ pair[vector[vector[int]], vector[vector[int]]] flag_generators()
diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx
index 9479118a..b23885b4 100644
--- a/src/python/gudhi/simplex_tree.pyx
+++ b/src/python/gudhi/simplex_tree.pyx
@@ -9,6 +9,7 @@
from cython.operator import dereference, preincrement
from libc.stdint cimport intptr_t
+import numpy
from numpy import array as np_array
cimport simplex_tree
@@ -100,6 +101,8 @@ cdef class SimplexTree:
.. deprecated:: 3.2.0
"""
+ import warnings
+ warnings.warn("Since Gudhi 3.2, calling SimplexTree.initialize_filtration is unnecessary.", DeprecationWarning)
self.get_ptr().initialize_filtration()
def num_vertices(self):
@@ -415,7 +418,7 @@ cdef class SimplexTree:
:param min_persistence: The minimum persistence value to take into
account (strictly greater than min_persistence). Default value is
0.0.
- Sets min_persistence to -1.0 to see all values.
+ Set min_persistence to -1.0 to see all values.
:type min_persistence: float.
:param persistence_dim_max: If true, the persistent homology for the
maximal dimension in the complex is computed. If false, it is
@@ -527,3 +530,49 @@ cdef class SimplexTree:
"""
assert self.pcohptr != NULL, "compute_persistence() must be called before write_persistence_diagram()"
self.pcohptr.write_output_diagram(persistence_file.encode('utf-8'))
+
+ def lower_star_persistence_generators(self):
+ """Assuming this is a lower-star filtration, this function returns the persistence pairs,
+ where each simplex is replaced with the vertex that gave it its filtration value.
+
+ :returns: First the regular persistence pairs, grouped by dimension, with one vertex per extremity,
+ and second the essential features, grouped by dimension, with one vertex each
+ :rtype: Tuple[List[numpy.array[int] of shape (n,2)], List[numpy.array[int] of shape (m,)]]
+
+ :note: lower_star_persistence_generators requires that `persistence()` be called first.
+ """
+ assert self.pcohptr != NULL, "lower_star_persistence_generators() requires that persistence() be called first."
+ gen = self.pcohptr.lower_star_generators()
+ normal = [np_array(d).reshape(-1,2) for d in gen.first]
+ infinite = [np_array(d) for d in gen.second]
+ return (normal, infinite)
+
+ def flag_persistence_generators(self):
+ """Assuming this is a flag complex, this function returns the persistence pairs,
+ where each simplex is replaced with the vertices of the edges that gave it its filtration value.
+
+ :returns: First the regular persistence pairs of dimension 0, with one vertex for birth and two for death;
+ then the other regular persistence pairs, grouped by dimension, with 2 vertices per extremity;
+ then the connected components, with one vertex each;
+ finally the other essential features, grouped by dimension, with 2 vertices for birth.
+ :rtype: Tuple[numpy.array[int] of shape (n,3), List[numpy.array[int] of shape (m,4)], numpy.array[int] of shape (l,), List[numpy.array[int] of shape (k,2)]]
+
+ :note: flag_persistence_generators requires that `persistence()` be called first.
+ """
+ assert self.pcohptr != NULL, "flag_persistence_generators() requires that persistence() be called first."
+ gen = self.pcohptr.flag_generators()
+ if len(gen.first) == 0:
+ normal0 = numpy.empty((0,3))
+ normals = []
+ else:
+ l = iter(gen.first)
+ normal0 = np_array(next(l)).reshape(-1,3)
+ normals = [np_array(d).reshape(-1,4) for d in l]
+ if len(gen.second) == 0:
+ infinite0 = numpy.empty(0)
+ infinites = []
+ else:
+ l = iter(gen.second)
+ infinite0 = np_array(next(l))
+ infinites = [np_array(d).reshape(-1,2) for d in l]
+ return (normal0, normals, infinite0, infinites)
diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py
index efc851a0..89ecab1c 100644
--- a/src/python/gudhi/wasserstein/wasserstein.py
+++ b/src/python/gudhi/wasserstein/wasserstein.py
@@ -64,17 +64,31 @@ def _build_dist_matrix(X, Y, order, internal_p):
return Cf
-def _perstot(X, order, internal_p):
+def _perstot_autodiff(X, order, internal_p):
+ '''
+ Version of _perstot that works on eagerpy tensors.
+ '''
+ return _dist_to_diag(X, internal_p).norms.lp(order)
+
+def _perstot(X, order, internal_p, enable_autodiff):
'''
:param X: (n x 2) numpy.array (points of a given diagram).
:param order: exponent for Wasserstein. Default value is 2.
:param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); Default value is 2 (Euclidean norm).
+ :param enable_autodiff: If X is torch.tensor, tensorflow.Tensor or jax.numpy.ndarray, make the computation
+ transparent to automatic differentiation.
+ :type enable_autodiff: bool
:returns: float, the total persistence of the diagram (that is, its distance to the empty diagram).
'''
- return np.linalg.norm(_dist_to_diag(X, internal_p), ord=order)
+ if enable_autodiff:
+ import eagerpy as ep
+
+ return _perstot_autodiff(ep.astensor(X), order, internal_p).raw
+ else:
+ return np.linalg.norm(_dist_to_diag(X, internal_p), ord=order)
-def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.):
+def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2., enable_autodiff=False):
'''
:param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points
(i.e. with infinite coordinate).
@@ -85,6 +99,13 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.):
:param order: exponent for Wasserstein; Default value is 2.
:param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2);
Default value is 2 (Euclidean norm).
+ :param enable_autodiff: If X and Y are torch.tensor, tensorflow.Tensor or jax.numpy.ndarray, make the computation
+ transparent to automatic differentiation. This requires the package EagerPy and is currently incompatible
+ with `matching=True`.
+
+ .. note:: This considers the function defined on the coordinates of the off-diagonal points of X and Y
+ and lets the various frameworks compute its gradient. It never pulls new points from the diagonal.
+ :type enable_autodiff: bool
:returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with
respect to the internal_p-norm as ground metric.
If matching is set to True, also returns the optimal matching between X and Y.
@@ -93,23 +114,31 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.):
m = len(Y)
# handle empty diagrams
- if X.size == 0:
- if Y.size == 0:
+ if n == 0:
+ if m == 0:
if not matching:
+ # What if enable_autodiff?
return 0.
else:
return 0., np.array([])
else:
if not matching:
- return _perstot(Y, order, internal_p)
+ return _perstot(Y, order, internal_p, enable_autodiff)
else:
- return _perstot(Y, order, internal_p), np.array([[-1, j] for j in range(m)])
- elif Y.size == 0:
+ return _perstot(Y, order, internal_p, enable_autodiff), np.array([[-1, j] for j in range(m)])
+ elif m == 0:
if not matching:
- return _perstot(X, order, internal_p)
+ return _perstot(X, order, internal_p, enable_autodiff)
else:
- return _perstot(X, order, internal_p), np.array([[i, -1] for i in range(n)])
+ return _perstot(X, order, internal_p, enable_autodiff), np.array([[i, -1] for i in range(n)])
+ if enable_autodiff:
+ import eagerpy as ep
+
+ X_orig = ep.astensor(X)
+ Y_orig = ep.astensor(Y)
+ X = X_orig.numpy()
+ Y = Y_orig.numpy()
M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p)
a = np.ones(n+1) # weight vector of the input diagram. Uniform here.
a[-1] = m
@@ -117,6 +146,7 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.):
b[-1] = n
if matching:
+ assert not enable_autodiff, "matching and enable_autodiff are currently incompatible"
P = ot.emd(a=a,b=b,M=M, numItermax=2000000)
ot_cost = np.sum(np.multiply(P,M))
P[-1, -1] = 0 # Remove matching corresponding to the diagonal
@@ -126,6 +156,23 @@ def wasserstein_distance(X, Y, matching=False, order=2., internal_p=2.):
match[:,1][match[:,1] >= m] = -1
return ot_cost ** (1./order) , match
+ if enable_autodiff:
+ P = ot.emd(a=a, b=b, M=M, numItermax=2000000)
+ pairs_X_Y = np.argwhere(P[:-1, :-1])
+ pairs_X_diag = np.nonzero(P[:-1, -1])
+ pairs_Y_diag = np.nonzero(P[-1, :-1])
+ dists = []
+ # empty arrays are not handled properly by the helpers, so we avoid calling them
+ if len(pairs_X_Y):
+ dists.append((Y_orig[pairs_X_Y[:, 1]] - X_orig[pairs_X_Y[:, 0]]).norms.lp(internal_p, axis=-1).norms.lp(order))
+ if len(pairs_X_diag):
+ dists.append(_perstot_autodiff(X_orig[pairs_X_diag], order, internal_p))
+ if len(pairs_Y_diag):
+ dists.append(_perstot_autodiff(Y_orig[pairs_Y_diag], order, internal_p))
+ dists = [dist.reshape(1) for dist in dists]
+ return ep.concatenate(dists).norms.lp(order).raw
+ # We can also concatenate the 3 vectors to compute just one norm.
+
# Comptuation of the otcost using the ot.emd2 library.
# Note: it is the Wasserstein distance to the power q.
# The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value?
diff --git a/src/python/include/Persistent_cohomology_interface.h b/src/python/include/Persistent_cohomology_interface.h
index e2b69a52..0de9bd5c 100644
--- a/src/python/include/Persistent_cohomology_interface.h
+++ b/src/python/include/Persistent_cohomology_interface.h
@@ -28,18 +28,17 @@ persistent_cohomology::Persistent_cohomology<FilteredComplex, persistent_cohomol
* Compare two intervals by dimension, then by length.
*/
struct cmp_intervals_by_dim_then_length {
- explicit cmp_intervals_by_dim_then_length(FilteredComplex * sc)
- : sc_(sc) { }
-
template<typename Persistent_interval>
bool operator()(const Persistent_interval & p1, const Persistent_interval & p2) {
- if (sc_->dimension(get < 0 > (p1)) == sc_->dimension(get < 0 > (p2)))
- return (sc_->filtration(get < 1 > (p1)) - sc_->filtration(get < 0 > (p1))
- > sc_->filtration(get < 1 > (p2)) - sc_->filtration(get < 0 > (p2)));
+ if (std::get<0>(p1) == std::get<0>(p2)) {
+ auto& i1 = std::get<1>(p1);
+ auto& i2 = std::get<1>(p2);
+ return std::get<1>(i1) - std::get<0>(i1) > std::get<1>(i2) - std::get<0>(i2);
+ }
else
- return (sc_->dimension(get < 0 > (p1)) > sc_->dimension(get < 0 > (p2)));
+ return (std::get<0>(p1) > std::get<0>(p2));
+ // Why does this sort by decreasing dimension?
}
- FilteredComplex* sc_;
};
public:
@@ -54,45 +53,132 @@ persistent_cohomology::Persistent_cohomology<FilteredComplex, persistent_cohomol
}
std::vector<std::pair<int, std::pair<double, double>>> get_persistence() {
- // Custom sort and output persistence
- cmp_intervals_by_dim_then_length cmp(stptr_);
- auto persistent_pairs = Base::get_persistent_pairs();
- std::sort(std::begin(persistent_pairs), std::end(persistent_pairs), cmp);
-
std::vector<std::pair<int, std::pair<double, double>>> persistence;
+ auto const& persistent_pairs = Base::get_persistent_pairs();
+ persistence.reserve(persistent_pairs.size());
for (auto pair : persistent_pairs) {
- persistence.push_back(std::make_pair(stptr_->dimension(get<0>(pair)),
- std::make_pair(stptr_->filtration(get<0>(pair)),
- stptr_->filtration(get<1>(pair)))));
+ persistence.emplace_back(stptr_->dimension(get<0>(pair)),
+ std::make_pair(stptr_->filtration(get<0>(pair)),
+ stptr_->filtration(get<1>(pair))));
}
+ // Custom sort and output persistence
+ cmp_intervals_by_dim_then_length cmp;
+ std::sort(std::begin(persistence), std::end(persistence), cmp);
return persistence;
}
std::vector<std::pair<std::vector<int>, std::vector<int>>> persistence_pairs() {
- auto pairs = Base::get_persistent_pairs();
-
std::vector<std::pair<std::vector<int>, std::vector<int>>> persistence_pairs;
+ auto const& pairs = Base::get_persistent_pairs();
persistence_pairs.reserve(pairs.size());
+ std::vector<int> birth;
+ std::vector<int> death;
for (auto pair : pairs) {
- std::vector<int> birth;
+ birth.clear();
if (get<0>(pair) != stptr_->null_simplex()) {
for (auto vertex : stptr_->simplex_vertex_range(get<0>(pair))) {
birth.push_back(vertex);
}
}
- std::vector<int> death;
+ death.clear();
if (get<1>(pair) != stptr_->null_simplex()) {
+ death.reserve(birth.size()+1);
for (auto vertex : stptr_->simplex_vertex_range(get<1>(pair))) {
death.push_back(vertex);
}
}
- persistence_pairs.push_back(std::make_pair(birth, death));
+ persistence_pairs.emplace_back(birth, death);
}
return persistence_pairs;
}
+ // TODO: (possibly at the python level)
+ // - an option to return only some of those vectors?
+ typedef std::pair<std::vector<std::vector<int>>, std::vector<std::vector<int>>> Generators;
+
+ Generators lower_star_generators() {
+ Generators out;
+ // diags[i] should be interpreted as vector<array<int,2>>
+ auto& diags = out.first;
+ // diagsinf[i] should be interpreted as vector<int>
+ auto& diagsinf = out.second;
+ for (auto pair : Base::get_persistent_pairs()) {
+ auto s = std::get<0>(pair);
+ auto t = std::get<1>(pair);
+ int dim = stptr_->dimension(s);
+ auto v = stptr_->vertex_with_same_filtration(s);
+ if(t == stptr_->null_simplex()) {
+ while(diagsinf.size() < dim+1) diagsinf.emplace_back();
+ diagsinf[dim].push_back(v);
+ } else {
+ while(diags.size() < dim+1) diags.emplace_back();
+ auto w = stptr_->vertex_with_same_filtration(t);
+ auto& d = diags[dim];
+ d.insert(d.end(), { v, w });
+ }
+ }
+ return out;
+ }
+
+ // An alternative, to avoid those different sizes, would be to "pad" vertex generator v as (v, v) or (v, -1). When using it as index, this corresponds to adding the vertex filtration values either on the diagonal of the distance matrix, or as an extra row or column.
+ // We could also merge the vectors for different dimensions into a single one, with an extra column for the dimension (converted to type double).
+ Generators flag_generators() {
+ Generators out;
+ // diags[0] should be interpreted as vector<array<int,3>> and other diags[i] as vector<array<int,4>>
+ auto& diags = out.first;
+ // diagsinf[0] should be interpreted as vector<int> and other diagsinf[i] as vector<array<int,2>>
+ auto& diagsinf = out.second;
+ for (auto pair : Base::get_persistent_pairs()) {
+ auto s = std::get<0>(pair);
+ auto t = std::get<1>(pair);
+ int dim = stptr_->dimension(s);
+ bool infinite = t == stptr_->null_simplex();
+ if(infinite) {
+ if(dim == 0) {
+ auto v = *std::begin(stptr_->simplex_vertex_range(s));
+ if(diagsinf.size()==0)diagsinf.emplace_back();
+ diagsinf[0].push_back(v);
+ } else {
+ auto e = stptr_->edge_with_same_filtration(s);
+ auto&& e_vertices = stptr_->simplex_vertex_range(e);
+ auto i = std::begin(e_vertices);
+ auto v1 = *i;
+ auto v2 = *++i;
+ GUDHI_CHECK(++i==std::end(e_vertices), "must be an edge");
+ while(diagsinf.size() < dim+1) diagsinf.emplace_back();
+ auto& d = diagsinf[dim];
+ d.insert(d.end(), { v1, v2 });
+ }
+ } else {
+ auto et = stptr_->edge_with_same_filtration(t);
+ auto&& et_vertices = stptr_->simplex_vertex_range(et);
+ auto it = std::begin(et_vertices);
+ auto w1 = *it;
+ auto w2 = *++it;
+ GUDHI_CHECK(++it==std::end(et_vertices), "must be an edge");
+ if(dim == 0) {
+ auto v = *std::begin(stptr_->simplex_vertex_range(s));
+ if(diags.size()==0)diags.emplace_back();
+ auto& d = diags[0];
+ d.insert(d.end(), { v, w1, w2 });
+ } else {
+ auto es = stptr_->edge_with_same_filtration(s);
+ auto&& es_vertices = stptr_->simplex_vertex_range(es);
+ auto is = std::begin(es_vertices);
+ auto v1 = *is;
+ auto v2 = *++is;
+ GUDHI_CHECK(++is==std::end(es_vertices), "must be an edge");
+ while(diags.size() < dim+1) diags.emplace_back();
+ auto& d = diags[dim];
+ d.insert(d.end(), { v1, v2, w1, w2 });
+ }
+ }
+ }
+ return out;
+ }
+
private:
// A copy
FilteredComplex* stptr_;
diff --git a/src/python/include/pybind11_diagram_utils.h b/src/python/include/pybind11_diagram_utils.h
new file mode 100644
index 00000000..d9627258
--- /dev/null
+++ b/src/python/include/pybind11_diagram_utils.h
@@ -0,0 +1,39 @@
+/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ * Author(s): Marc Glisse
+ *
+ * Copyright (C) 2020 Inria
+ *
+ * Modification(s):
+ * - YYYY/MM Author: Description of the modification
+ */
+
+#include <pybind11/pybind11.h>
+#include <pybind11/numpy.h>
+
+#include <boost/range/counting_range.hpp>
+#include <boost/range/adaptor/transformed.hpp>
+
+namespace py = pybind11;
+typedef py::array_t<double> Dgm;
+
+// Get m[i,0] and m[i,1] as a pair
+static auto pairify(void* p, ssize_t h, ssize_t w) {
+ return [=](ssize_t i){
+ char* birth = (char*)p + i * h;
+ char* death = birth + w;
+ return std::make_pair(*(double*)birth, *(double*)death);
+ };
+}
+
+inline auto numpy_to_range_of_pairs(py::array_t<double> dgm) {
+ py::buffer_info buf = dgm.request();
+ // shape (n,2) or (0) for empty
+ if((buf.ndim!=2 || buf.shape[1]!=2) && (buf.ndim!=1 || buf.shape[0]!=0))
+ throw std::runtime_error("Diagram must be an array of size n x 2");
+ // In the case of shape (0), avoid reading non-existing strides[1] even if we won't use it.
+ ssize_t stride1 = buf.ndim == 2 ? buf.strides[1] : 0;
+ auto cnt = boost::counting_range<ssize_t>(0, buf.shape[0]);
+ return boost::adaptors::transform(cnt, pairify(buf.ptr, buf.strides[0], stride1));
+ // Be careful that the returned range cannot contain references to dead temporaries.
+}
diff --git a/src/python/setup.py.in b/src/python/setup.py.in
index f968bd59..b9f4e3f0 100644
--- a/src/python/setup.py.in
+++ b/src/python/setup.py.in
@@ -18,7 +18,8 @@ __author__ = "Vincent Rouvreau"
__copyright__ = "Copyright (C) 2016 Inria"
__license__ = "MIT"
-modules = [@GUDHI_PYTHON_MODULES_TO_COMPILE@]
+cython_modules = [@GUDHI_CYTHON_MODULES@]
+pybind11_modules = [@GUDHI_PYBIND11_MODULES@]
source_dir='@CMAKE_CURRENT_SOURCE_DIR@/gudhi/'
extra_compile_args=[@GUDHI_PYTHON_EXTRA_COMPILE_ARGS@]
@@ -30,7 +31,7 @@ runtime_library_dirs=[@GUDHI_PYTHON_RUNTIME_LIBRARY_DIRS@]
# Create ext_modules list from module list
ext_modules = []
-for module in modules:
+for module in cython_modules:
ext_modules.append(Extension(
'gudhi.' + module,
sources = [source_dir + module + '.pyx',],
@@ -45,15 +46,21 @@ for module in modules:
ext_modules = cythonize(ext_modules)
-ext_modules.append(Extension(
- 'gudhi.hera',
- sources = [source_dir + 'hera.cc'],
- language = 'c++',
- include_dirs = include_dirs +
- ['@HERA_WASSERSTEIN_INCLUDE_DIR@',
- pybind11.get_include(False), pybind11.get_include(True)],
- extra_compile_args=extra_compile_args + [@GUDHI_PYBIND11_EXTRA_COMPILE_ARGS@],
- ))
+for module in pybind11_modules:
+ my_include_dirs = include_dirs + [pybind11.get_include(False), pybind11.get_include(True)]
+ if module == 'hera':
+ my_include_dirs = ['@HERA_WASSERSTEIN_INCLUDE_DIR@'] + my_include_dirs
+ ext_modules.append(Extension(
+ 'gudhi.' + module,
+ sources = [source_dir + module + '.cc'],
+ language = 'c++',
+ include_dirs = my_include_dirs,
+ extra_compile_args=extra_compile_args + [@GUDHI_PYBIND11_EXTRA_COMPILE_ARGS@],
+ extra_link_args=extra_link_args,
+ libraries=libraries,
+ library_dirs=library_dirs,
+ runtime_library_dirs=runtime_library_dirs,
+ ))
setup(
name = 'gudhi',
diff --git a/src/python/test/test_simplex_generators.py b/src/python/test/test_simplex_generators.py
new file mode 100755
index 00000000..8a9b4844
--- /dev/null
+++ b/src/python/test/test_simplex_generators.py
@@ -0,0 +1,64 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Marc Glisse
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+import gudhi
+import numpy as np
+
+
+def test_flag_generators():
+ pts = np.array([[0, 0], [0, 1.01], [1, 0], [1.02, 1.03], [100, 0], [100, 3.01], [103, 0], [103.02, 3.03]])
+ r = gudhi.RipsComplex(pts, max_edge_length=4)
+ st = r.create_simplex_tree(max_dimension=50)
+ st.persistence()
+ g = st.flag_persistence_generators()
+ assert np.array_equal(g[0], [[2, 2, 0], [1, 1, 0], [3, 3, 1], [6, 6, 4], [5, 5, 4], [7, 7, 5]])
+ assert len(g[1]) == 1
+ assert np.array_equal(g[1][0], [[3, 2, 2, 1]])
+ assert np.array_equal(g[2], [0, 4])
+ assert len(g[3]) == 1
+ assert np.array_equal(g[3][0], [[7, 6]])
+ # Compare trivial cases (where the simplex is the generator) with persistence_pairs.
+ # This still makes assumptions on the order of vertices in a simplex and could be more robust.
+ pairs = st.persistence_pairs()
+ assert {tuple(i) for i in g[0]} == {(i[0][0],) + tuple(i[1]) for i in pairs if len(i[0]) == 1 and len(i[1]) != 0}
+ assert {(i[0], i[1]) for i in g[1][0]} == {tuple(i[0]) for i in pairs if len(i[0]) == 2 and len(i[1]) != 0}
+ assert set(g[2]) == {i[0][0] for i in pairs if len(i[0]) == 1 and len(i[1]) == 0}
+ assert {(i[0], i[1]) for i in g[3][0]} == {tuple(i[0]) for i in pairs if len(i[0]) == 2 and len(i[1]) == 0}
+
+
+def test_lower_star_generators():
+ st = gudhi.SimplexTree()
+ st.insert([0, 1, 2], -10)
+ st.insert([0, 3], -10)
+ st.insert([1, 3], -10)
+ st.assign_filtration([2], -1)
+ st.assign_filtration([3], 0)
+ st.assign_filtration([0], 1)
+ st.assign_filtration([1], 2)
+ st.make_filtration_non_decreasing()
+ st.persistence(min_persistence=-1)
+ g = st.lower_star_persistence_generators()
+ assert len(g[0]) == 2
+ assert np.array_equal(g[0][0], [[0, 0], [3, 0], [1, 1]])
+ assert np.array_equal(g[0][1], [[1, 1]])
+ assert len(g[1]) == 2
+ assert np.array_equal(g[1][0], [2])
+ assert np.array_equal(g[1][1], [1])
+
+
+def test_empty():
+ st = gudhi.SimplexTree()
+ st.persistence()
+ assert st.lower_star_persistence_generators() == ([], [])
+ g = st.flag_persistence_generators()
+ assert np.array_equal(g[0], np.empty((0, 3)))
+ assert g[1] == []
+ assert np.array_equal(g[2], [])
+ assert g[3] == []
diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py
index 1a4acc1d..90d26809 100755
--- a/src/python/test/test_wasserstein_distance.py
+++ b/src/python/test/test_wasserstein_distance.py
@@ -80,14 +80,44 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_mat
-def hera_wrap(delta):
+def hera_wrap(**extra):
def fun(*kargs,**kwargs):
- return hera(*kargs,**kwargs,delta=delta)
+ return hera(*kargs,**kwargs,**extra)
+ return fun
+
+def pot_wrap(**extra):
+ def fun(*kargs,**kwargs):
+ return pot(*kargs,**kwargs,**extra)
return fun
def test_wasserstein_distance_pot():
_basic_wasserstein(pot, 1e-15, test_infinity=False, test_matching=True)
+ _basic_wasserstein(pot_wrap(enable_autodiff=True), 1e-15, test_infinity=False, test_matching=False)
def test_wasserstein_distance_hera():
- _basic_wasserstein(hera_wrap(1e-12), 1e-12, test_matching=False)
- _basic_wasserstein(hera_wrap(.1), .1, test_matching=False)
+ _basic_wasserstein(hera_wrap(delta=1e-12), 1e-12, test_matching=False)
+ _basic_wasserstein(hera_wrap(delta=.1), .1, test_matching=False)
+
+def test_wasserstein_distance_grad():
+ import torch
+
+ diag1 = torch.tensor([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]], requires_grad=True)
+ diag2 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True)
+ diag3 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True)
+ assert diag1.grad is None and diag2.grad is None and diag3.grad is None
+ dist12 = pot(diag1, diag2, internal_p=2, order=2, enable_autodiff=True)
+ dist30 = pot(diag3, torch.tensor([]), internal_p=2, order=2, enable_autodiff=True)
+ dist12.backward()
+ dist30.backward()
+ assert not torch.isnan(diag1.grad).any() and not torch.isnan(diag2.grad).any() and not torch.isnan(diag3.grad).any()
+ diag4 = torch.tensor([[0., 10.]], requires_grad=True)
+ diag5 = torch.tensor([[1., 11.], [3., 4.]], requires_grad=True)
+ dist45 = pot(diag4, diag5, internal_p=1, order=1, enable_autodiff=True)
+ assert dist45 == 3.
+ dist45.backward()
+ assert np.array_equal(diag4.grad, [[-1., -1.]])
+ assert np.array_equal(diag5.grad, [[1., 1.], [-1., 1.]])
+ diag6 = torch.tensor([[5., 10.]], requires_grad=True)
+ pot(diag6, diag6, internal_p=2, order=2, enable_autodiff=True).backward()
+ # https://github.com/jonasrauber/eagerpy/issues/6
+ # assert np.array_equal(diag6.grad, [[0., 0.]])