summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMathieuCarriere <mathieu.carriere3@gmail.com>2022-04-27 11:58:39 +0200
committerMathieuCarriere <mathieu.carriere3@gmail.com>2022-04-27 11:58:39 +0200
commitb9119a92c5316a36e0ae8ff041f0625b51973321 (patch)
tree0b6bc968020af2040f8dad4b2fc4cc8eb0873f6d
parentcc723a7a3735a44491bd1085b6bb6c47272b73ed (diff)
update doc + remove numpy/tensorflow mixup
-rw-r--r--src/python/gudhi/tensorflow/cubical_layer.py2
-rw-r--r--src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py2
-rw-r--r--src/python/gudhi/tensorflow/rips_layer.py2
-rw-r--r--src/python/test/test_diff.py10
4 files changed, 8 insertions, 8 deletions
diff --git a/src/python/gudhi/tensorflow/cubical_layer.py b/src/python/gudhi/tensorflow/cubical_layer.py
index 31c44205..8db46a8e 100644
--- a/src/python/gudhi/tensorflow/cubical_layer.py
+++ b/src/python/gudhi/tensorflow/cubical_layer.py
@@ -58,7 +58,7 @@ class CubicalLayer(tf.keras.layers.Layer):
X (TensorFlow variable): pixel values of the cubical complex
Returns:
- dgms (list of TensorFlow variables): list of cubical persistence diagrams of length self.dimensions, where each element contains a finite persistence diagram of shape [num_finite_points, 2]
+ dgms (list of TensorFlow variables): list of cubical persistence diagrams. The length of this list is the same than that of dimensions, i.e., there is one persistence diagram per homology dimension provided in the input list dimensions. Moreover, each element of this list is an array containing the finite part of the corresponding persistence diagram, of shape [num_finite_points, 2]. Note that there is no essential part since this part is always empty in cubical persistence diagrams, except in homology dimension zero, where the essential part always contains a single point, with abscissa equal to the smallest value in the complex, and infinite ordinate.
"""
# Compute pixels associated to positive and negative simplices
# Don't compute gradient for this operation
diff --git a/src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py b/src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py
index cf7df6de..a2e48d8a 100644
--- a/src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py
+++ b/src/python/gudhi/tensorflow/lower_star_simplex_tree_layer.py
@@ -65,7 +65,7 @@ class LowerStarSimplexTreeLayer(tf.keras.layers.Layer):
F (TensorFlow variable): filter function values over the vertices of the simplex tree. The ith entry of F corresponds to vertex i in self.simplextree
Returns:
- dgms (list of tuple of TensorFlow variables): list of lower-star persistence diagrams of length self.dimensions, where each element of the list is a tuple that contains the finite and essential persistence diagrams of shapes [num_finite_points, 2] and [num_essential_points, 1] respectively
+ dgms (list of tuple of TensorFlow variables): list of lower-star persistence diagrams. The length of this list is the same than that of dimensions, i.e., there is one persistence diagram per homology dimension provided in the input list dimensions. Moreover, the finite and essential parts of the persistence diagrams are provided separately: each element of this list is a tuple of size two that contains the finite and essential parts of the corresponding persistence diagram, of shapes [num_finite_points, 2] and [num_essential_points, 1] respectively
"""
# Don't try to compute gradients for the vertex pairs
indices = _LowerStarSimplexTree(self.simplextree, filtration.numpy(), self.dimensions)
diff --git a/src/python/gudhi/tensorflow/rips_layer.py b/src/python/gudhi/tensorflow/rips_layer.py
index 7b5edfa3..b5b58ab4 100644
--- a/src/python/gudhi/tensorflow/rips_layer.py
+++ b/src/python/gudhi/tensorflow/rips_layer.py
@@ -63,7 +63,7 @@ class RipsLayer(tf.keras.layers.Layer):
X (TensorFlow variable): point cloud of shape [number of points, number of dimensions]
Returns:
- dgms (list of tuple of TensorFlow variables): list of Rips persistence diagrams of length self.dimensions, where each element of the list is a tuple that contains the finite and essential persistence diagrams of shapes [num_finite_points, 2] and [num_essential_points, 1] respectively
+ dgms (list of tuple of TensorFlow variables): list of Rips persistence diagrams. The length of this list is the same than that of dimensions, i.e., there is one persistence diagram per homology dimension provided in the input list dimensions. Moreover, the finite and essential parts of the persistence diagrams are provided separately: each element of this list is a tuple of size two that contains the finite and essential parts of the corresponding persistence diagram, of shapes [num_finite_points, 2] and [num_essential_points, 1] respectively
"""
# Compute distance matrix
DX = tf.norm(tf.expand_dims(X, 1)-tf.expand_dims(X, 0), axis=2)
diff --git a/src/python/test/test_diff.py b/src/python/test/test_diff.py
index e0c99d07..2529cf22 100644
--- a/src/python/test/test_diff.py
+++ b/src/python/test/test_diff.py
@@ -13,7 +13,7 @@ def test_rips_diff():
dgm = rl.call(X)[0][0]
loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
grads = tape.gradient(loss, [X])
- assert np.abs(grads[0].numpy()-np.array([[-.5,-.5],[.5,.5]])).sum() <= 1e-6
+ assert tf.norm(grads[0]-tf.constant([[-.5,-.5],[.5,.5]]),1) <= 1e-6
def test_cubical_diff():
@@ -25,7 +25,7 @@ def test_cubical_diff():
dgm = cl.call(X)[0]
loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
grads = tape.gradient(loss, [X])
- assert np.abs(grads[0].numpy()-np.array([[0.,0.,0.],[0.,.5,0.],[0.,0.,-.5]])).sum() <= 1e-6
+ assert tf.norm(grads[0]-tf.constant([[0.,0.,0.],[0.,.5,0.],[0.,0.,-.5]]),1) <= 1e-6
def test_nonsquare_cubical_diff():
@@ -37,7 +37,7 @@ def test_nonsquare_cubical_diff():
dgm = cl.call(X)[0]
loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
grads = tape.gradient(loss, [X])
- assert np.abs(grads[0].numpy()-np.array([[0.,0.5,-0.5],[0.,0.,0.]])).sum() <= 1e-6
+ assert tf.norm(grads[0]-tf.constant([[0.,0.5,-0.5],[0.,0.,0.]]),1) <= 1e-6
def test_st_diff():
@@ -73,6 +73,6 @@ def test_st_diff():
loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
grads = tape.gradient(loss, [F])
- assert np.array_equal(np.array(grads[0].indices), np.array([2,4]))
- assert np.array_equal(np.array(grads[0].values), np.array([-1,1]))
+ assert tf.math.reduce_all(tf.math.equal(grads[0].indices, tf.constant([2,4])))
+ assert tf.math.reduce_all(tf.math.equal(grads[0].values, tf.constant([-1.,1.])))