summaryrefslogtreecommitdiff
path: root/src/python
diff options
context:
space:
mode:
Diffstat (limited to 'src/python')
-rw-r--r--src/python/CMakeLists.txt11
-rw-r--r--src/python/doc/installation.rst9
-rw-r--r--src/python/doc/rips_complex_sum.inc22
-rw-r--r--src/python/doc/rips_complex_user.rst6
-rw-r--r--src/python/gudhi/simplex_tree.pxd1
-rw-r--r--src/python/gudhi/simplex_tree.pyx22
-rw-r--r--src/python/gudhi/subsampling.pyx21
-rwxr-xr-xsrc/python/test/test_simplex_tree.py22
-rwxr-xr-xsrc/python/test/test_wasserstein_distance.py24
-rwxr-xr-xsrc/python/test/test_wasserstein_with_tensors.py47
10 files changed, 130 insertions, 55 deletions
diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt
index 4f26481e..c09996fe 100644
--- a/src/python/CMakeLists.txt
+++ b/src/python/CMakeLists.txt
@@ -103,6 +103,9 @@ if(PYTHONINTERP_FOUND)
if(EAGERPY_FOUND)
add_gudhi_debug_info("EagerPy version ${EAGERPY_VERSION}")
endif()
+ if(TENSORFLOW_FOUND)
+ add_gudhi_debug_info("TensorFlow version ${TENSORFLOW_VERSION}")
+ endif()
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_RESULT_OF_USE_DECLTYPE', ")
set(GUDHI_PYTHON_EXTRA_COMPILE_ARGS "${GUDHI_PYTHON_EXTRA_COMPILE_ARGS}'-DBOOST_ALL_NO_LIB', ")
@@ -496,11 +499,17 @@ if(PYTHONINTERP_FOUND)
# Wasserstein
if(OT_FOUND AND PYBIND11_FOUND)
- if(TORCH_FOUND AND EAGERPY_FOUND)
+ # EagerPy dependency because of enable_autodiff=True
+ if(EAGERPY_FOUND)
add_gudhi_py_test(test_wasserstein_distance)
endif()
add_gudhi_py_test(test_wasserstein_barycenter)
endif()
+ if(OT_FOUND)
+ if(TORCH_FOUND AND TENSORFLOW_FOUND AND EAGERPY_FOUND)
+ add_gudhi_py_test(test_wasserstein_with_tensors)
+ endif()
+ endif()
# Representations
if(SKLEARN_FOUND AND MATPLOTLIB_FOUND)
diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst
index 78e1af73..66efe45a 100644
--- a/src/python/doc/installation.rst
+++ b/src/python/doc/installation.rst
@@ -40,7 +40,7 @@ different, and in particular the `python/` subdirectory is actually `src/python/
there.
The library uses c++14 and requires `Boost <https://www.boost.org/>`_ :math:`\geq` 1.56.0,
-`CMake <https://www.cmake.org/>`_ :math:`\geq` 3.1 to generate makefiles,
+`CMake <https://www.cmake.org/>`_ :math:`\geq` 3.5 to generate makefiles,
`NumPy <http://numpy.org>`_, `Cython <https://www.cython.org/>`_ and
`pybind11 <https://github.com/pybind/pybind11>`_ to compile
the GUDHI Python module.
@@ -65,7 +65,7 @@ one can build the GUDHI Python module, by running the following commands in a te
cd /path-to-gudhi/
mkdir build
cd build/
- cmake ..
+ cmake -DCMAKE_BUILD_TYPE=Release ..
cd python
make
@@ -394,6 +394,11 @@ mathematics, science, and engineering.
:class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package
`SciPy <http://scipy.org>`_ as a backend if explicitly requested.
+TensorFlow
+----------
+
+`TensorFlow <https://www.tensorflow.org>`_ is currently only used in some automatic differentiation tests.
+
Bug reports and contributions
*****************************
diff --git a/src/python/doc/rips_complex_sum.inc b/src/python/doc/rips_complex_sum.inc
index c123ea2a..2cb24990 100644
--- a/src/python/doc/rips_complex_sum.inc
+++ b/src/python/doc/rips_complex_sum.inc
@@ -1,14 +1,14 @@
.. table::
:widths: 30 40 30
- +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------+
- | .. figure:: | The Vietoris-Rips complex is a simplicial complex built as the | :Authors: Clément Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse |
- | ../../doc/Rips_complex/rips_complex_representation.png | clique-complex of a proximity graph. | |
- | :figclass: align-center | | :Since: GUDHI 2.0.0 |
- | | We also provide sparse approximations, to speed-up the computation | |
- | | of persistent homology, and weighted versions, which are more robust | :License: MIT |
- | | to outliers. | |
- | | | |
- +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------+
- | * :doc:`rips_complex_user` | * :doc:`rips_complex_ref` |
- +----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------+
+ +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------------------+
+ | .. figure:: | The Vietoris-Rips complex is a simplicial complex built as the | :Authors: Clément Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse, Yuichi Ike |
+ | ../../doc/Rips_complex/rips_complex_representation.png | clique-complex of a proximity graph. | |
+ | :figclass: align-center | | :Since: GUDHI 2.0.0 |
+ | | We also provide sparse approximations, to speed-up the computation | |
+ | | of persistent homology, and weighted versions, which are more robust | :License: MIT |
+ | | to outliers. | |
+ | | | |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------------------+
+ | * :doc:`rips_complex_user` | * :doc:`rips_complex_ref` |
+ +----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/doc/rips_complex_user.rst b/src/python/doc/rips_complex_user.rst
index 6048cc4e..27d218d4 100644
--- a/src/python/doc/rips_complex_user.rst
+++ b/src/python/doc/rips_complex_user.rst
@@ -7,9 +7,9 @@ Rips complex user manual
Definition
----------
-==================================================================== ================================ ======================
-:Authors: Clément Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse :Since: GUDHI 2.0.0 :License: GPL v3
-==================================================================== ================================ ======================
+================================================================================ ================================ ======================
+:Authors: Clément Maria, Pawel Dlotko, Vincent Rouvreau, Marc Glisse, Yuichi Ike :Since: GUDHI 2.0.0 :License: GPL v3
+================================================================================ ================================ ======================
+-------------------------------------------+----------------------------------------------------------------------+
| :doc:`rips_complex_user` | :doc:`rips_complex_ref` |
diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd
index 6ec85bca..3c4cbed3 100644
--- a/src/python/gudhi/simplex_tree.pxd
+++ b/src/python/gudhi/simplex_tree.pxd
@@ -64,6 +64,7 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi":
void compute_extended_filtration() nogil
vector[vector[pair[int, pair[double, double]]]] compute_extended_persistence_subdiagrams(vector[pair[int, pair[double, double]]] dgm, double min_persistence) nogil
Simplex_tree_interface_full_featured* collapse_edges(int nb_collapse_iteration) nogil
+ void reset_filtration(double filtration, int dimension) nogil
# Iterators over Simplex tree
pair[vector[int], double] get_simplex_and_filtration(Simplex_tree_simplex_handle f_simplex) nogil
Simplex_tree_simplices_iterator get_simplices_iterator_begin() nogil
diff --git a/src/python/gudhi/simplex_tree.pyx b/src/python/gudhi/simplex_tree.pyx
index 5043c621..c671da56 100644
--- a/src/python/gudhi/simplex_tree.pyx
+++ b/src/python/gudhi/simplex_tree.pyx
@@ -342,7 +342,7 @@ cdef class SimplexTree:
return self.get_ptr().prune_above_filtration(filtration)
def expansion(self, max_dim):
- """Expands the Simplex_tree containing only its one skeleton
+ """Expands the simplex tree containing only its one skeleton
until dimension max_dim.
The expanded simplicial complex until dimension :math:`d`
@@ -352,7 +352,7 @@ cdef class SimplexTree:
The filtration value assigned to a simplex is the maximal filtration
value of one of its edges.
- The Simplex_tree must contain no simplex of dimension bigger than
+ The simplex tree must contain no simplex of dimension bigger than
1 when calling the method.
:param max_dim: The maximal dimension.
@@ -372,6 +372,20 @@ cdef class SimplexTree:
"""
return self.get_ptr().make_filtration_non_decreasing()
+ def reset_filtration(self, filtration, min_dim = 0):
+ """This function resets the filtration value of all the simplices of dimension at least min_dim. Resets all the
+ simplex tree when `min_dim = 0`.
+ `reset_filtration` may break the filtration property with `min_dim > 0`, and it is the user's responsibility to
+ make it a valid filtration (using a large enough `filt_value`, or calling `make_filtration_non_decreasing`
+ afterwards for instance).
+
+ :param filtration: New threshold value.
+ :type filtration: float.
+ :param min_dim: The minimal dimension. Default value is 0.
+ :type min_dim: int.
+ """
+ self.get_ptr().reset_filtration(filtration, min_dim)
+
def extend_filtration(self):
""" Extend filtration for computing extended persistence. This function only uses the
filtration values at the 0-dimensional simplices, and computes the extended persistence
@@ -380,14 +394,14 @@ cdef class SimplexTree:
.. note::
Note that after calling this function, the filtration
- values are actually modified within the Simplex_tree.
+ values are actually modified within the simplex tree.
The function :func:`extended_persistence`
retrieves the original values.
.. note::
Note that this code creates an extra vertex internally, so you should make sure that
- the Simplex_tree does not contain a vertex with the largest possible value (i.e., 4294967295).
+ the simplex tree does not contain a vertex with the largest possible value (i.e., 4294967295).
"""
self.get_ptr().compute_extended_filtration()
diff --git a/src/python/gudhi/subsampling.pyx b/src/python/gudhi/subsampling.pyx
index f77c6f75..b11d07e5 100644
--- a/src/python/gudhi/subsampling.pyx
+++ b/src/python/gudhi/subsampling.pyx
@@ -33,7 +33,7 @@ def choose_n_farthest_points(points=None, off_file='', nb_points=0, starting_poi
The iteration starts with the landmark `starting point`.
:param points: The input point set.
- :type points: Iterable[Iterable[float]].
+ :type points: Iterable[Iterable[float]]
Or
@@ -42,14 +42,15 @@ def choose_n_farthest_points(points=None, off_file='', nb_points=0, starting_poi
And in both cases
- :param nb_points: Number of points of the subsample.
- :type nb_points: unsigned.
+ :param nb_points: Number of points of the subsample (the subsample may be \
+ smaller if there are fewer than nb_points distinct input points)
+ :type nb_points: int
:param starting_point: The iteration starts with the landmark `starting \
- point`,which is the index of the point to start with. If not set, this \
+ point`, which is the index of the point to start with. If not set, this \
index is chosen randomly.
- :type starting_point: unsigned.
+ :type starting_point: int
:returns: The subsample point set.
- :rtype: List[List[float]].
+ :rtype: List[List[float]]
"""
if off_file:
if os.path.isfile(off_file):
@@ -76,7 +77,7 @@ def pick_n_random_points(points=None, off_file='', nb_points=0):
"""Subsample a point set by picking random vertices.
:param points: The input point set.
- :type points: Iterable[Iterable[float]].
+ :type points: Iterable[Iterable[float]]
Or
@@ -86,7 +87,7 @@ def pick_n_random_points(points=None, off_file='', nb_points=0):
And in both cases
:param nb_points: Number of points of the subsample.
- :type nb_points: unsigned.
+ :type nb_points: int
:returns: The subsample point set.
:rtype: List[List[float]]
"""
@@ -107,7 +108,7 @@ def sparsify_point_set(points=None, off_file='', min_squared_dist=0.0):
between any two points is greater than or equal to min_squared_dist.
:param points: The input point set.
- :type points: Iterable[Iterable[float]].
+ :type points: Iterable[Iterable[float]]
Or
@@ -118,7 +119,7 @@ def sparsify_point_set(points=None, off_file='', min_squared_dist=0.0):
:param min_squared_dist: Minimum squared distance separating the output \
points.
- :type min_squared_dist: float.
+ :type min_squared_dist: float
:returns: The subsample point set.
:rtype: List[List[float]]
"""
diff --git a/src/python/test/test_simplex_tree.py b/src/python/test/test_simplex_tree.py
index d79d2c34..3b23fa0b 100755
--- a/src/python/test/test_simplex_tree.py
+++ b/src/python/test/test_simplex_tree.py
@@ -359,6 +359,28 @@ def test_collapse_edges():
for simplex in st.get_skeleton(0):
assert simplex[1] == 1.
+def test_reset_filtration():
+ st = SimplexTree()
+
+ assert st.insert([0, 1, 2], 3.) == True
+ assert st.insert([0, 3], 2.) == True
+ assert st.insert([3, 4, 5], 3.) == True
+ assert st.insert([0, 1, 6, 7], 4.) == True
+
+ # Guaranteed by construction
+ for simplex in st.get_simplices():
+ assert st.filtration(simplex[0]) >= 2.
+
+ # dimension until 5 even if simplex tree is of dimension 3 to test the limits
+ for dimension in range(5, -1, -1):
+ st.reset_filtration(0., dimension)
+ for simplex in st.get_skeleton(3):
+ print(simplex)
+ if len(simplex[0]) < (dimension) + 1:
+ assert st.filtration(simplex[0]) >= 2.
+ else:
+ assert st.filtration(simplex[0]) == 0.
+
def test_boundaries_iterator():
st = SimplexTree()
diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py
index 90d26809..e3b521d6 100755
--- a/src/python/test/test_wasserstein_distance.py
+++ b/src/python/test/test_wasserstein_distance.py
@@ -97,27 +97,3 @@ def test_wasserstein_distance_pot():
def test_wasserstein_distance_hera():
_basic_wasserstein(hera_wrap(delta=1e-12), 1e-12, test_matching=False)
_basic_wasserstein(hera_wrap(delta=.1), .1, test_matching=False)
-
-def test_wasserstein_distance_grad():
- import torch
-
- diag1 = torch.tensor([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]], requires_grad=True)
- diag2 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True)
- diag3 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True)
- assert diag1.grad is None and diag2.grad is None and diag3.grad is None
- dist12 = pot(diag1, diag2, internal_p=2, order=2, enable_autodiff=True)
- dist30 = pot(diag3, torch.tensor([]), internal_p=2, order=2, enable_autodiff=True)
- dist12.backward()
- dist30.backward()
- assert not torch.isnan(diag1.grad).any() and not torch.isnan(diag2.grad).any() and not torch.isnan(diag3.grad).any()
- diag4 = torch.tensor([[0., 10.]], requires_grad=True)
- diag5 = torch.tensor([[1., 11.], [3., 4.]], requires_grad=True)
- dist45 = pot(diag4, diag5, internal_p=1, order=1, enable_autodiff=True)
- assert dist45 == 3.
- dist45.backward()
- assert np.array_equal(diag4.grad, [[-1., -1.]])
- assert np.array_equal(diag5.grad, [[1., 1.], [-1., 1.]])
- diag6 = torch.tensor([[5., 10.]], requires_grad=True)
- pot(diag6, diag6, internal_p=2, order=2, enable_autodiff=True).backward()
- # https://github.com/jonasrauber/eagerpy/issues/6
- # assert np.array_equal(diag6.grad, [[0., 0.]])
diff --git a/src/python/test/test_wasserstein_with_tensors.py b/src/python/test/test_wasserstein_with_tensors.py
new file mode 100755
index 00000000..e3f1411a
--- /dev/null
+++ b/src/python/test/test_wasserstein_with_tensors.py
@@ -0,0 +1,47 @@
+""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT.
+ See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details.
+ Author(s): Mathieu Carriere
+
+ Copyright (C) 2020 Inria
+
+ Modification(s):
+ - YYYY/MM Author: Description of the modification
+"""
+
+from gudhi.wasserstein import wasserstein_distance as pot
+import numpy as np
+import torch
+import tensorflow as tf
+
+def test_wasserstein_distance_grad():
+ diag1 = torch.tensor([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]], requires_grad=True)
+ diag2 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True)
+ diag3 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True)
+ assert diag1.grad is None and diag2.grad is None and diag3.grad is None
+ dist12 = pot(diag1, diag2, internal_p=2, order=2, enable_autodiff=True)
+ dist30 = pot(diag3, torch.tensor([]), internal_p=2, order=2, enable_autodiff=True)
+ dist12.backward()
+ dist30.backward()
+ assert not torch.isnan(diag1.grad).any() and not torch.isnan(diag2.grad).any() and not torch.isnan(diag3.grad).any()
+ diag4 = torch.tensor([[0., 10.]], requires_grad=True)
+ diag5 = torch.tensor([[1., 11.], [3., 4.]], requires_grad=True)
+ dist45 = pot(diag4, diag5, internal_p=1, order=1, enable_autodiff=True)
+ assert dist45 == 3.
+ dist45.backward()
+ assert np.array_equal(diag4.grad, [[-1., -1.]])
+ assert np.array_equal(diag5.grad, [[1., 1.], [-1., 1.]])
+ diag6 = torch.tensor([[5., 10.]], requires_grad=True)
+ pot(diag6, diag6, internal_p=2, order=2, enable_autodiff=True).backward()
+ # https://github.com/jonasrauber/eagerpy/issues/6
+ # assert np.array_equal(diag6.grad, [[0., 0.]])
+
+def test_wasserstein_distance_grad_tensorflow():
+ with tf.GradientTape() as tape:
+ diag4 = tf.convert_to_tensor(tf.Variable(initial_value=np.array([[0., 10.]]), trainable=True))
+ diag5 = tf.convert_to_tensor(tf.Variable(initial_value=np.array([[1., 11.], [3., 4.]]), trainable=True))
+ dist45 = pot(diag4, diag5, internal_p=1, order=1, enable_autodiff=True)
+ assert dist45 == 3.
+
+ grads = tape.gradient(dist45, [diag4, diag5])
+ assert np.array_equal(grads[0].values, [[-1., -1.]])
+ assert np.array_equal(grads[1].values, [[1., 1.], [-1., 1.]]) \ No newline at end of file