summaryrefslogtreecommitdiff
path: root/src/python
diff options
context:
space:
mode:
Diffstat (limited to 'src/python')
-rw-r--r--src/python/CMakeLists.txt11
-rw-r--r--src/python/doc/cubical_complex_sum.inc25
-rw-r--r--src/python/doc/cubical_complex_tflow_itf_ref.rst40
-rw-r--r--src/python/doc/differentiation_sum.inc12
-rw-r--r--src/python/doc/installation.rst6
-rw-r--r--src/python/doc/ls_simplex_tree_tflow_itf_ref.rst53
-rw-r--r--src/python/doc/rips_complex_sum.inc5
-rw-r--r--src/python/doc/rips_complex_tflow_itf_ref.rst48
-rw-r--r--src/python/doc/simplex_tree_sum.inc23
-rw-r--r--src/python/gudhi/tensorflow/__init__.py5
-rw-r--r--src/python/gudhi/tensorflow/cubical_layer.py76
-rw-r--r--src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py84
-rw-r--r--src/python/gudhi/tensorflow/rips_layer.py91
-rw-r--r--src/python/test/test_diff.py78
14 files changed, 532 insertions, 25 deletions
diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt
index fbfb0d1f..26c229d5 100644
--- a/src/python/CMakeLists.txt
+++ b/src/python/CMakeLists.txt
@@ -70,6 +70,7 @@ if(PYTHONINTERP_FOUND)
set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'euclidean_strong_witness_complex', ")
# Modules that should not be auto-imported in __init__.py
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'representations', ")
+ set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'tensorflow', ")
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'wasserstein', ")
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'point_cloud', ")
set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'weighted_rips_complex', ")
@@ -283,7 +284,8 @@ if(PYTHONINTERP_FOUND)
# Other .py files
file(COPY "gudhi/persistence_graphical_tools.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/")
- file(COPY "gudhi/wasserstein" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
+ file(COPY "gudhi/wasserstein" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
+ file(COPY "gudhi/tensorflow" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/point_cloud" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
file(COPY "gudhi/clustering" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi" FILES_MATCHING PATTERN "*.py")
file(COPY "gudhi/weighted_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi")
@@ -556,6 +558,11 @@ if(PYTHONINTERP_FOUND)
add_gudhi_py_test(test_representations)
endif()
+ # Differentiation
+ if(TENSORFLOW_FOUND)
+ add_gudhi_py_test(test_diff)
+ endif()
+
# Betti curves
if(SKLEARN_FOUND AND SCIPY_FOUND)
add_gudhi_py_test(test_betti_curve_representations)
@@ -595,4 +602,4 @@ if(PYTHONINTERP_FOUND)
else(PYTHONINTERP_FOUND)
message("++ Python module will not be compiled because no Python interpreter was found")
set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "python" CACHE INTERNAL "GUDHI_MISSING_MODULES")
-endif(PYTHONINTERP_FOUND) \ No newline at end of file
+endif(PYTHONINTERP_FOUND)
diff --git a/src/python/doc/cubical_complex_sum.inc b/src/python/doc/cubical_complex_sum.inc
index 87db184d..90ec9fc2 100644
--- a/src/python/doc/cubical_complex_sum.inc
+++ b/src/python/doc/cubical_complex_sum.inc
@@ -1,14 +1,17 @@
.. table::
:widths: 30 40 30
- +--------------------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------+
- | .. figure:: | The cubical complex represents a grid as a cell complex with | :Author: Pawel Dlotko |
- | ../../doc/Bitmap_cubical_complex/Cubical_complex_representation.png | cells of all dimensions. | |
- | :alt: Cubical complex representation | | :Since: GUDHI 2.0.0 |
- | :figclass: align-center | | |
- | | | :License: MIT |
- | | | |
- +--------------------------------------------------------------------------+----------------------------------------------------------------------+-----------------------------+
- | * :doc:`cubical_complex_user` | * :doc:`cubical_complex_ref` |
- | | * :doc:`periodic_cubical_complex_ref` |
- +--------------------------------------------------------------------------+----------------------------------------------------------------------------------------------------+
+ +--------------------------------------------------------------------------+----------------------------------------------------------------------+---------------------------------------------------------+
+ | .. figure:: | The cubical complex represents a grid as a cell complex with | :Author: Pawel Dlotko |
+ | ../../doc/Bitmap_cubical_complex/Cubical_complex_representation.png | cells of all dimensions. | |
+ | :alt: Cubical complex representation | | :Since: GUDHI 2.0.0 |
+ | :figclass: align-center | | |
+ | | | :License: MIT |
+ | | | |
+ +--------------------------------------------------------------------------+----------------------------------------------------------------------+---------------------------------------------------------+
+ | * :doc:`cubical_complex_user` | * :doc:`cubical_complex_ref` |
+ | | * :doc:`periodic_cubical_complex_ref` |
+ +--------------------------------------------------------------------------+----------------------------------------------------------------------+---------------------------------------------------------+
+ | | * :doc:`cubical_complex_tflow_itf_ref` | :requires: `TensorFlow <installation.html#tensorflow>`_ |
+ | | | |
+ +--------------------------------------------------------------------------+----------------------------------------------------------------------+---------------------------------------------------------+
diff --git a/src/python/doc/cubical_complex_tflow_itf_ref.rst b/src/python/doc/cubical_complex_tflow_itf_ref.rst
new file mode 100644
index 00000000..18b97adf
--- /dev/null
+++ b/src/python/doc/cubical_complex_tflow_itf_ref.rst
@@ -0,0 +1,40 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+TensorFlow layer for cubical persistence
+########################################
+
+.. include:: differentiation_sum.inc
+
+Example of gradient computed from cubical persistence
+-----------------------------------------------------
+
+.. testcode::
+
+ from gudhi.tensorflow import CubicalLayer
+ import tensorflow as tf
+
+ X = tf.Variable([[0.,2.,2.],[2.,2.,2.],[2.,2.,1.]], dtype=tf.float32, trainable=True)
+ cl = CubicalLayer(dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = cl.call(X)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+
+ grads = tape.gradient(loss, [X])
+ print(grads[0].numpy())
+
+.. testoutput::
+
+ [[ 0. 0. 0. ]
+ [ 0. 0.5 0. ]
+ [ 0. 0. -0.5]]
+
+Documentation for CubicalLayer
+------------------------------
+
+.. autoclass:: gudhi.tensorflow.CubicalLayer
+ :members:
+ :special-members: __init__
+ :show-inheritance:
diff --git a/src/python/doc/differentiation_sum.inc b/src/python/doc/differentiation_sum.inc
new file mode 100644
index 00000000..3aec33df
--- /dev/null
+++ b/src/python/doc/differentiation_sum.inc
@@ -0,0 +1,12 @@
+.. list-table::
+ :widths: 40 30 30
+ :header-rows: 0
+
+ * - :Since: GUDHI 3.5.0
+ - :License: MIT
+ - :Requires: `TensorFlow <installation.html#tensorflow>`_
+
+We provide TensorFlow 2 models that can handle automatic differentiation for the computation of persistence diagrams from complexes available in the Gudhi library.
+This includes simplex trees, cubical complexes and Vietoris-Rips complexes. Detailed example on how to use these layers in practice are available
+in the following `notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-optimization.ipynb>`_. Note that even if TensorFlow GPU is enabled, all
+internal computations using Gudhi will be done on CPU.
diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst
index cff84691..dd476054 100644
--- a/src/python/doc/installation.rst
+++ b/src/python/doc/installation.rst
@@ -396,7 +396,11 @@ mathematics, science, and engineering.
TensorFlow
----------
-`TensorFlow <https://www.tensorflow.org>`_ is currently only used in some automatic differentiation tests.
+The :doc:`cubical complex </cubical_complex_tflow_itf_ref>`, :doc:`simplex tree </ls_simplex_tree_tflow_itf_ref>`
+and :doc:`Rips complex </rips_complex_tflow_itf_ref>` modules require `TensorFlow <https://www.tensorflow.org>`_
+for incorporating them in neural nets.
+
+`TensorFlow <https://www.tensorflow.org>`_ is also used in some automatic differentiation tests.
Bug reports and contributions
*****************************
diff --git a/src/python/doc/ls_simplex_tree_tflow_itf_ref.rst b/src/python/doc/ls_simplex_tree_tflow_itf_ref.rst
new file mode 100644
index 00000000..b8518cdb
--- /dev/null
+++ b/src/python/doc/ls_simplex_tree_tflow_itf_ref.rst
@@ -0,0 +1,53 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+TensorFlow layer for lower-star persistence on simplex trees
+############################################################
+
+.. include:: differentiation_sum.inc
+
+Example of gradient computed from lower-star filtration of a simplex tree
+-------------------------------------------------------------------------
+
+.. testcode::
+
+ from gudhi.tensorflow import LowerStarSimplexTreeLayer
+ import tensorflow as tf
+ import gudhi as gd
+
+ st = gd.SimplexTree()
+ st.insert([0, 1])
+ st.insert([1, 2])
+ st.insert([2, 3])
+ st.insert([3, 4])
+ st.insert([4, 5])
+ st.insert([5, 6])
+ st.insert([6, 7])
+ st.insert([7, 8])
+ st.insert([8, 9])
+ st.insert([9, 10])
+
+ F = tf.Variable([6.,4.,3.,4.,5.,4.,3.,2.,3.,4.,5.], dtype=tf.float32, trainable=True)
+ sl = LowerStarSimplexTreeLayer(simplextree=st, dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = sl.call(F)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+
+ grads = tape.gradient(loss, [F])
+ print(grads[0].indices.numpy())
+ print(grads[0].values.numpy())
+
+.. testoutput::
+
+ [2 4]
+ [-1. 1.]
+
+Documentation for LowerStarSimplexTreeLayer
+-------------------------------------------
+
+.. autoclass:: gudhi.tensorflow.LowerStarSimplexTreeLayer
+ :members:
+ :special-members: __init__
+ :show-inheritance:
diff --git a/src/python/doc/rips_complex_sum.inc b/src/python/doc/rips_complex_sum.inc
index 2cb24990..6931ebee 100644
--- a/src/python/doc/rips_complex_sum.inc
+++ b/src/python/doc/rips_complex_sum.inc
@@ -11,4 +11,7 @@
| | | |
+----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------------------+
| * :doc:`rips_complex_user` | * :doc:`rips_complex_ref` |
- +----------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------+
+ +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------------------+
+ | | * :doc:`rips_complex_tflow_itf_ref` | :requires: `TensorFlow <installation.html#tensorflow>`_ |
+ | | | |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+----------------------------------------------------------------------------------+
diff --git a/src/python/doc/rips_complex_tflow_itf_ref.rst b/src/python/doc/rips_complex_tflow_itf_ref.rst
new file mode 100644
index 00000000..6c65c562
--- /dev/null
+++ b/src/python/doc/rips_complex_tflow_itf_ref.rst
@@ -0,0 +1,48 @@
+:orphan:
+
+.. To get rid of WARNING: document isn't included in any toctree
+
+TensorFlow layer for Vietoris-Rips persistence
+##############################################
+
+.. include:: differentiation_sum.inc
+
+Example of gradient computed from Vietoris-Rips persistence
+-----------------------------------------------------------
+
+.. testsetup::
+
+ import numpy
+ numpy.set_printoptions(precision=4)
+
+.. testcode::
+
+ from gudhi.tensorflow import RipsLayer
+ import tensorflow as tf
+
+ X = tf.Variable([[1.,1.],[2.,2.]], dtype=tf.float32, trainable=True)
+ rl = RipsLayer(maximum_edge_length=2., dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = rl.call(X)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+
+ grads = tape.gradient(loss, [X])
+ print(grads[0].numpy())
+
+.. testcleanup::
+
+ numpy.set_printoptions(precision=8)
+
+.. testoutput::
+
+ [[-0.5 -0.5]
+ [ 0.5 0.5]]
+
+Documentation for RipsLayer
+---------------------------
+
+.. autoclass:: gudhi.tensorflow.RipsLayer
+ :members:
+ :special-members: __init__
+ :show-inheritance:
diff --git a/src/python/doc/simplex_tree_sum.inc b/src/python/doc/simplex_tree_sum.inc
index a8858f16..3ad1292c 100644
--- a/src/python/doc/simplex_tree_sum.inc
+++ b/src/python/doc/simplex_tree_sum.inc
@@ -1,13 +1,16 @@
.. table::
:widths: 30 40 30
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------+
- | .. figure:: | The simplex tree is an efficient and flexible data structure for | :Author: Clément Maria |
- | ../../doc/Simplex_tree/Simplex_tree_representation.png | representing general (filtered) simplicial complexes. | |
- | :alt: Simplex tree representation | | :Since: GUDHI 2.0.0 |
- | :figclass: align-center | The data structure is described in | |
- | | :cite:`boissonnatmariasimplextreealgorithmica` | :License: MIT |
- | | | |
- +----------------------------------------------------------------+------------------------------------------------------------------------+-----------------------------+
- | * :doc:`simplex_tree_user` | * :doc:`simplex_tree_ref` |
- +----------------------------------------------------------------+------------------------------------------------------------------------------------------------------+
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------+
+ | .. figure:: | The simplex tree is an efficient and flexible data structure for | :Author: Clément Maria |
+ | ../../doc/Simplex_tree/Simplex_tree_representation.png | representing general (filtered) simplicial complexes. | |
+ | :alt: Simplex tree representation | | :Since: GUDHI 2.0.0 |
+ | :figclass: align-center | The data structure is described in | |
+ | | :cite:`boissonnatmariasimplextreealgorithmica` | :License: MIT |
+ | | | |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------+
+ | * :doc:`simplex_tree_user` | * :doc:`simplex_tree_ref` |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------+
+ | | * :doc:`ls_simplex_tree_tflow_itf_ref` | :requires: `TensorFlow <installation.html#tensorflow>`_ |
+ | | | |
+ +----------------------------------------------------------------+------------------------------------------------------------------------+---------------------------------------------------------+
diff --git a/src/python/gudhi/tensorflow/__init__.py b/src/python/gudhi/tensorflow/__init__.py
new file mode 100644
index 00000000..1599cf52
--- /dev/null
+++ b/src/python/gudhi/tensorflow/__init__.py
@@ -0,0 +1,5 @@
+from .cubical_layer import CubicalLayer
+from .lower_star_simplex_tree_layer import LowerStarSimplexTreeLayer
+from .rips_layer import RipsLayer
+
+__all__ = ["LowerStarSimplexTreeLayer", "RipsLayer", "CubicalLayer"]
diff --git a/src/python/gudhi/tensorflow/cubical_layer.py b/src/python/gudhi/tensorflow/cubical_layer.py
new file mode 100644
index 00000000..369b0e54
--- /dev/null
+++ b/src/python/gudhi/tensorflow/cubical_layer.py
@@ -0,0 +1,76 @@
+import numpy as np
+import tensorflow as tf
+from ..cubical_complex import CubicalComplex
+
+######################
+# Cubical filtration #
+######################
+
+# The parameters of the model are the pixel values.
+
+def _Cubical(Xflat, Xdim, dimensions):
+ # Parameters: Xflat (flattened image),
+ # Xdim (shape of non-flattened image)
+ # dimensions (homology dimensions)
+
+ # Compute the persistence pairs with Gudhi
+ # We reverse the dimensions because CubicalComplex uses Fortran ordering
+ cc = CubicalComplex(dimensions=Xdim[::-1], top_dimensional_cells=Xflat)
+ cc.compute_persistence()
+
+ # Retrieve and ouput image indices/pixels corresponding to positive and negative simplices
+ cof_pp = cc.cofaces_of_persistence_pairs()
+
+ L_cofs = []
+ for dim in dimensions:
+
+ try:
+ cof = cof_pp[0][dim]
+ except IndexError:
+ cof = np.array([])
+
+ L_cofs.append(np.array(cof, dtype=np.int32))
+
+ return L_cofs
+
+class CubicalLayer(tf.keras.layers.Layer):
+ """
+ TensorFlow layer for computing the persistent homology of a cubical complex
+ """
+ def __init__(self, dimensions, min_persistence=None, **kwargs):
+ """
+ Constructor for the CubicalLayer class
+
+ Parameters:
+ dimensions (List[int]): homology dimensions
+ min_persistence (List[float]): minimum distance-to-diagonal of the points in the output persistence diagrams (default None, in which case 0. is used for all dimensions)
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.dimensions = dimensions
+ self.min_persistence = min_persistence if min_persistence != None else [0.] * len(self.dimensions)
+ assert len(self.min_persistence) == len(self.dimensions)
+
+ def call(self, X):
+ """
+ Compute persistence diagram associated to a cubical complex filtered by some pixel values
+
+ Parameters:
+ X (TensorFlow variable): pixel values of the cubical complex
+
+ Returns:
+ dgms (list of TensorFlow variables): list of cubical persistence diagrams of length self.dimensions, where each element contains a finite persistence diagram of shape [num_finite_points, 2]
+ """
+ # Compute pixels associated to positive and negative simplices
+ # Don't compute gradient for this operation
+ Xflat = tf.reshape(X, [-1])
+ Xdim = X.shape
+ indices_list = _Cubical(Xflat.numpy(), Xdim, self.dimensions)
+ # Get persistence diagram by simply picking the corresponding entries in the image
+ self.dgms = [tf.reshape(tf.gather(Xflat, indices), [-1,2]) for indices in indices_list]
+ for idx_dim in range(len(self.min_persistence)):
+ min_pers = self.min_persistence[idx_dim]
+ if min_pers >= 0:
+ finite_dgm = self.dgms[idx_dim]
+ persistent_indices = tf.where(tf.math.abs(finite_dgm[:,1]-finite_dgm[:,0]) > min_pers)
+ self.dgms[idx_dim] = tf.gather(finite_dgm, indices=persistent_indices)
+ return self.dgms
diff --git a/src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py b/src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py
new file mode 100644
index 00000000..cf7df6de
--- /dev/null
+++ b/src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py
@@ -0,0 +1,84 @@
+import numpy as np
+import tensorflow as tf
+
+#########################################
+# Lower star filtration on simplex tree #
+#########################################
+
+# The parameters of the model are the vertex function values of the simplex tree.
+
+def _LowerStarSimplexTree(simplextree, filtration, dimensions):
+ # Parameters: simplextree (simplex tree on which to compute persistence)
+ # filtration (function values on the vertices of st),
+ # dimensions (homology dimensions),
+
+ simplextree.reset_filtration(-np.inf, 0)
+
+ # Assign new filtration values
+ for i in range(simplextree.num_vertices()):
+ simplextree.assign_filtration([i], filtration[i])
+ simplextree.make_filtration_non_decreasing()
+
+ # Compute persistence diagram
+ simplextree.compute_persistence()
+
+ # Get vertex pairs for optimization. First, get all simplex pairs
+ pairs = simplextree.lower_star_persistence_generators()
+
+ L_indices = []
+ for dimension in dimensions:
+
+ finite_pairs = pairs[0][dimension] if len(pairs[0]) >= dimension+1 else np.empty(shape=[0,2])
+ essential_pairs = pairs[1][dimension] if len(pairs[1]) >= dimension+1 else np.empty(shape=[0,1])
+
+ finite_indices = np.array(finite_pairs.flatten(), dtype=np.int32)
+ essential_indices = np.array(essential_pairs.flatten(), dtype=np.int32)
+
+ L_indices.append((finite_indices, essential_indices))
+
+ return L_indices
+
+class LowerStarSimplexTreeLayer(tf.keras.layers.Layer):
+ """
+ TensorFlow layer for computing lower-star persistence out of a simplex tree
+ """
+ def __init__(self, simplextree, dimensions, min_persistence=None, **kwargs):
+ """
+ Constructor for the LowerStarSimplexTreeLayer class
+
+ Parameters:
+ simplextree (gudhi.SimplexTree): underlying simplex tree. Its vertices MUST be named with integers from 0 to n = number of vertices. Note that its filtration values are modified in each call of the class.
+ dimensions (List[int]): homology dimensions
+ min_persistence (List[float]): minimum distance-to-diagonal of the points in the output persistence diagrams (default None, in which case 0. is used for all dimensions)
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.dimensions = dimensions
+ self.simplextree = simplextree
+ self.min_persistence = min_persistence if min_persistence != None else [0. for _ in range(len(self.dimensions))]
+ assert len(self.min_persistence) == len(self.dimensions)
+
+ def call(self, filtration):
+ """
+ Compute lower-star persistence diagram associated to a function defined on the vertices of the simplex tree
+
+ Parameters:
+ F (TensorFlow variable): filter function values over the vertices of the simplex tree. The ith entry of F corresponds to vertex i in self.simplextree
+
+ Returns:
+ dgms (list of tuple of TensorFlow variables): list of lower-star persistence diagrams of length self.dimensions, where each element of the list is a tuple that contains the finite and essential persistence diagrams of shapes [num_finite_points, 2] and [num_essential_points, 1] respectively
+ """
+ # Don't try to compute gradients for the vertex pairs
+ indices = _LowerStarSimplexTree(self.simplextree, filtration.numpy(), self.dimensions)
+ # Get persistence diagrams
+ self.dgms = []
+ for idx_dim, dimension in enumerate(self.dimensions):
+ finite_dgm = tf.reshape(tf.gather(filtration, indices[idx_dim][0]), [-1,2])
+ essential_dgm = tf.reshape(tf.gather(filtration, indices[idx_dim][1]), [-1,1])
+ min_pers = self.min_persistence[idx_dim]
+ if min_pers >= 0:
+ persistent_indices = tf.where(tf.math.abs(finite_dgm[:,1]-finite_dgm[:,0]) > min_pers)
+ self.dgms.append((tf.reshape(tf.gather(finite_dgm, indices=persistent_indices),[-1,2]), essential_dgm))
+ else:
+ self.dgms.append((finite_dgm, essential_dgm))
+ return self.dgms
+
diff --git a/src/python/gudhi/tensorflow/rips_layer.py b/src/python/gudhi/tensorflow/rips_layer.py
new file mode 100644
index 00000000..7b5edfa3
--- /dev/null
+++ b/src/python/gudhi/tensorflow/rips_layer.py
@@ -0,0 +1,91 @@
+import numpy as np
+import tensorflow as tf
+from ..rips_complex import RipsComplex
+
+############################
+# Vietoris-Rips filtration #
+############################
+
+# The parameters of the model are the point coordinates.
+
+def _Rips(DX, max_edge, dimensions):
+ # Parameters: DX (distance matrix),
+ # max_edge (maximum edge length for Rips filtration),
+ # dimensions (homology dimensions)
+
+ # Compute the persistence pairs with Gudhi
+ rc = RipsComplex(distance_matrix=DX, max_edge_length=max_edge)
+ st = rc.create_simplex_tree(max_dimension=max(dimensions)+1)
+ st.compute_persistence()
+ pairs = st.flag_persistence_generators()
+
+ L_indices = []
+ for dimension in dimensions:
+
+ if dimension == 0:
+ finite_pairs = pairs[0]
+ essential_pairs = pairs[2]
+ else:
+ finite_pairs = pairs[1][dimension-1] if len(pairs[1]) >= dimension else np.empty(shape=[0,4])
+ essential_pairs = pairs[3][dimension-1] if len(pairs[3]) >= dimension else np.empty(shape=[0,2])
+
+ finite_indices = np.array(finite_pairs.flatten(), dtype=np.int32)
+ essential_indices = np.array(essential_pairs.flatten(), dtype=np.int32)
+
+ L_indices.append((finite_indices, essential_indices))
+
+ return L_indices
+
+class RipsLayer(tf.keras.layers.Layer):
+ """
+ TensorFlow layer for computing Rips persistence out of a point cloud
+ """
+ def __init__(self, dimensions, maximum_edge_length=np.inf, min_persistence=None, **kwargs):
+ """
+ Constructor for the RipsLayer class
+
+ Parameters:
+ maximum_edge_length (float): maximum edge length for the Rips complex
+ dimensions (List[int]): homology dimensions
+ min_persistence (List[float]): minimum distance-to-diagonal of the points in the output persistence diagrams (default None, in which case 0. is used for all dimensions)
+ """
+ super().__init__(dynamic=True, **kwargs)
+ self.max_edge = maximum_edge_length
+ self.dimensions = dimensions
+ self.min_persistence = min_persistence if min_persistence != None else [0. for _ in range(len(self.dimensions))]
+ assert len(self.min_persistence) == len(self.dimensions)
+
+ def call(self, X):
+ """
+ Compute Rips persistence diagram associated to a point cloud
+
+ Parameters:
+ X (TensorFlow variable): point cloud of shape [number of points, number of dimensions]
+
+ Returns:
+ dgms (list of tuple of TensorFlow variables): list of Rips persistence diagrams of length self.dimensions, where each element of the list is a tuple that contains the finite and essential persistence diagrams of shapes [num_finite_points, 2] and [num_essential_points, 1] respectively
+ """
+ # Compute distance matrix
+ DX = tf.norm(tf.expand_dims(X, 1)-tf.expand_dims(X, 0), axis=2)
+ # Compute vertices associated to positive and negative simplices
+ # Don't compute gradient for this operation
+ indices = _Rips(DX.numpy(), self.max_edge, self.dimensions)
+ # Get persistence diagrams by simply picking the corresponding entries in the distance matrix
+ self.dgms = []
+ for idx_dim, dimension in enumerate(self.dimensions):
+ cur_idx = indices[idx_dim]
+ if dimension > 0:
+ finite_dgm = tf.reshape(tf.gather_nd(DX, tf.reshape(cur_idx[0], [-1,2])), [-1,2])
+ essential_dgm = tf.reshape(tf.gather_nd(DX, tf.reshape(cur_idx[1], [-1,2])), [-1,1])
+ else:
+ reshaped_cur_idx = tf.reshape(cur_idx[0], [-1,3])
+ finite_dgm = tf.concat([tf.zeros([reshaped_cur_idx.shape[0],1]), tf.reshape(tf.gather_nd(DX, reshaped_cur_idx[:,1:]), [-1,1])], axis=1)
+ essential_dgm = tf.zeros([cur_idx[1].shape[0],1])
+ min_pers = self.min_persistence[idx_dim]
+ if min_pers >= 0:
+ persistent_indices = tf.where(tf.math.abs(finite_dgm[:,1]-finite_dgm[:,0]) > min_pers)
+ self.dgms.append((tf.reshape(tf.gather(finite_dgm, indices=persistent_indices),[-1,2]), essential_dgm))
+ else:
+ self.dgms.append((finite_dgm, essential_dgm))
+ return self.dgms
+
diff --git a/src/python/test/test_diff.py b/src/python/test/test_diff.py
new file mode 100644
index 00000000..bab0d10c
--- /dev/null
+++ b/src/python/test/test_diff.py
@@ -0,0 +1,78 @@
+from gudhi.tensorflow import *
+import numpy as np
+import tensorflow as tf
+import gudhi as gd
+
+def test_rips_diff():
+
+ Xinit = np.array([[1.,1.],[2.,2.]], dtype=np.float32)
+ X = tf.Variable(initial_value=Xinit, trainable=True)
+ rl = RipsLayer(maximum_edge_length=2., dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = rl.call(X)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+ grads = tape.gradient(loss, [X])
+ assert np.abs(grads[0].numpy()-np.array([[-.5,-.5],[.5,.5]])).sum() <= 1e-6
+
+def test_cubical_diff():
+
+ Xinit = np.array([[0.,2.,2.],[2.,2.,2.],[2.,2.,1.]], dtype=np.float32)
+ X = tf.Variable(initial_value=Xinit, trainable=True)
+ cl = CubicalLayer(dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = cl.call(X)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+ grads = tape.gradient(loss, [X])
+ assert np.abs(grads[0].numpy()-np.array([[0.,0.,0.],[0.,.5,0.],[0.,0.,-.5]])).sum() <= 1e-6
+
+def test_nonsquare_cubical_diff():
+
+ Xinit = np.array([[-1.,1.,0.],[1.,1.,1.]], dtype=np.float32)
+ X = tf.Variable(initial_value=Xinit, trainable=True)
+ cl = CubicalLayer(dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = cl.call(X)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+ grads = tape.gradient(loss, [X])
+ assert np.abs(grads[0].numpy()-np.array([[0.,0.5,-0.5],[0.,0.,0.]])).sum() <= 1e-6
+
+def test_st_diff():
+
+ st = gd.SimplexTree()
+ st.insert([0])
+ st.insert([1])
+ st.insert([2])
+ st.insert([3])
+ st.insert([4])
+ st.insert([5])
+ st.insert([6])
+ st.insert([7])
+ st.insert([8])
+ st.insert([9])
+ st.insert([10])
+ st.insert([0, 1])
+ st.insert([1, 2])
+ st.insert([2, 3])
+ st.insert([3, 4])
+ st.insert([4, 5])
+ st.insert([5, 6])
+ st.insert([6, 7])
+ st.insert([7, 8])
+ st.insert([8, 9])
+ st.insert([9, 10])
+
+ Finit = np.array([6.,4.,3.,4.,5.,4.,3.,2.,3.,4.,5.], dtype=np.float32)
+ F = tf.Variable(initial_value=Finit, trainable=True)
+ sl = LowerStarSimplexTreeLayer(simplextree=st, dimensions=[0])
+
+ with tf.GradientTape() as tape:
+ dgm = sl.call(F)[0][0]
+ loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
+ grads = tape.gradient(loss, [F])
+
+ assert np.array_equal(np.array(grads[0].indices), np.array([2,4]))
+ assert np.array_equal(np.array(grads[0].values), np.array([-1,1]))
+