diff options
author | wreise <wojciech.reise@epfl.ch> | 2022-08-10 10:28:26 +0200 |
---|---|---|
committer | wreise <wojciech.reise@epfl.ch> | 2022-08-10 10:28:26 +0200 |
commit | b9a31fed2b90ee69a5c23047fd2ff1c264ad9605 (patch) | |
tree | c7a6e319c5cc6ba9c7963b08f7a4cdf3c776c922 /src/python/test/test_diff.py | |
parent | 60e57f9c86a7aae67c2931200066aba059ec2721 (diff) | |
parent | 4f83706aa1263c04cb5e8763e1e8eb6c580bed3c (diff) |
Merge branch 'master' into optimize_silhouettes
Diffstat (limited to 'src/python/test/test_diff.py')
-rw-r--r-- | src/python/test/test_diff.py | 78 |
1 files changed, 78 insertions, 0 deletions
diff --git a/src/python/test/test_diff.py b/src/python/test/test_diff.py new file mode 100644 index 00000000..dca001a9 --- /dev/null +++ b/src/python/test/test_diff.py @@ -0,0 +1,78 @@ +from gudhi.tensorflow import * +import numpy as np +import tensorflow as tf +import gudhi as gd + +def test_rips_diff(): + + Xinit = np.array([[1.,1.],[2.,2.]], dtype=np.float32) + X = tf.Variable(initial_value=Xinit, trainable=True) + rl = RipsLayer(maximum_edge_length=2., homology_dimensions=[0]) + + with tf.GradientTape() as tape: + dgm = rl.call(X)[0][0] + loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0]))) + grads = tape.gradient(loss, [X]) + assert tf.norm(grads[0]-tf.constant([[-.5,-.5],[.5,.5]]),1) <= 1e-6 + +def test_cubical_diff(): + + Xinit = np.array([[0.,2.,2.],[2.,2.,2.],[2.,2.,1.]], dtype=np.float32) + X = tf.Variable(initial_value=Xinit, trainable=True) + cl = CubicalLayer(homology_dimensions=[0]) + + with tf.GradientTape() as tape: + dgm = cl.call(X)[0][0] + loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0]))) + grads = tape.gradient(loss, [X]) + assert tf.norm(grads[0]-tf.constant([[0.,0.,0.],[0.,.5,0.],[0.,0.,-.5]]),1) <= 1e-6 + +def test_nonsquare_cubical_diff(): + + Xinit = np.array([[-1.,1.,0.],[1.,1.,1.]], dtype=np.float32) + X = tf.Variable(initial_value=Xinit, trainable=True) + cl = CubicalLayer(homology_dimensions=[0]) + + with tf.GradientTape() as tape: + dgm = cl.call(X)[0][0] + loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0]))) + grads = tape.gradient(loss, [X]) + assert tf.norm(grads[0]-tf.constant([[0.,0.5,-0.5],[0.,0.,0.]]),1) <= 1e-6 + +def test_st_diff(): + + st = gd.SimplexTree() + st.insert([0]) + st.insert([1]) + st.insert([2]) + st.insert([3]) + st.insert([4]) + st.insert([5]) + st.insert([6]) + st.insert([7]) + st.insert([8]) + st.insert([9]) + st.insert([10]) + st.insert([0, 1]) + st.insert([1, 2]) + st.insert([2, 3]) + st.insert([3, 4]) + st.insert([4, 5]) + st.insert([5, 6]) + st.insert([6, 7]) + st.insert([7, 8]) + st.insert([8, 9]) + st.insert([9, 10]) + + Finit = np.array([6.,4.,3.,4.,5.,4.,3.,2.,3.,4.,5.], dtype=np.float32) + F = tf.Variable(initial_value=Finit, trainable=True) + sl = LowerStarSimplexTreeLayer(simplextree=st, homology_dimensions=[0]) + + with tf.GradientTape() as tape: + dgm = sl.call(F)[0][0] + loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0]))) + grads = tape.gradient(loss, [F]) + + assert tf.math.reduce_all(tf.math.equal(grads[0].indices, tf.constant([2,4]))) + assert tf.math.reduce_all(tf.math.equal(grads[0].values, tf.constant([-1.,1.]))) + |