summaryrefslogtreecommitdiff
path: root/src/python/test/test_diff.py
diff options
context:
space:
mode:
authorMathieuCarriere <mathieu.carriere3@gmail.com>2021-07-05 17:42:54 +0200
committerMathieuCarriere <mathieu.carriere3@gmail.com>2021-07-05 17:42:54 +0200
commit6e2b5caf7fe0f255dbafa70d6cad62ec4d7277a3 (patch)
tree38eca0612a377b3e33fb61e127a0c13099f34ee8 /src/python/test/test_diff.py
parentdc78f94cd3f9be37e007fdc913b26160238944e1 (diff)
removed padding
Diffstat (limited to 'src/python/test/test_diff.py')
-rw-r--r--src/python/test/test_diff.py52
1 files changed, 39 insertions, 13 deletions
diff --git a/src/python/test/test_diff.py b/src/python/test/test_diff.py
index d42e25cd..129b9f03 100644
--- a/src/python/test/test_diff.py
+++ b/src/python/test/test_diff.py
@@ -1,41 +1,67 @@
from gudhi.differentiation import *
import numpy as np
import tensorflow as tf
+import gudhi as gd
def test_rips_diff():
Xinit = np.array([[1.,1.],[2.,2.]], dtype=np.float32)
X = tf.Variable(initial_value=Xinit, trainable=True)
- model = RipsModel(X=X, mel=2., dim=0, card=10)
+ rl = RipsLayer(maximum_edge_length=2., dimension=0)
with tf.GradientTape() as tape:
- dgm = model.call()
+ dgm = rl.call(X)
loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
- grads = tape.gradient(loss, [X])
- assert np.abs(grads[0].numpy()-np.array([[-.5,-.5],[.5,.5]])).sum() <= 1e-6
+ grads = tape.gradient(loss, [X])
+ assert np.abs(grads[0].numpy()-np.array([[-.5,-.5],[.5,.5]])).sum() <= 1e-6
def test_cubical_diff():
Xinit = np.array([[0.,2.,2.],[2.,2.,2.],[2.,2.,1.]], dtype=np.float32)
X = tf.Variable(initial_value=Xinit, trainable=True)
- model = CubicalModel(X, dim=0, card=10)
+ cl = CubicalLayer(dimension=0)
with tf.GradientTape() as tape:
- dgm = model.call()
+ dgm = cl.call(X)
loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
- grads = tape.gradient(loss, [X])
- assert np.abs(grads[0].numpy()-np.array([[0.,0.,0.],[0.,.5,0.],[0.,0.,-.5]])).sum() <= 1e-6
+ grads = tape.gradient(loss, [X])
+ assert np.abs(grads[0].numpy()-np.array([[0.,0.,0.],[0.,.5,0.],[0.,0.,-.5]])).sum() <= 1e-6
def test_st_diff():
+ st = gd.SimplexTree()
+ st.insert([0])
+ st.insert([1])
+ st.insert([2])
+ st.insert([3])
+ st.insert([4])
+ st.insert([5])
+ st.insert([6])
+ st.insert([7])
+ st.insert([8])
+ st.insert([9])
+ st.insert([10])
+ st.insert([0, 1])
+ st.insert([1, 2])
+ st.insert([2, 3])
+ st.insert([3, 4])
+ st.insert([4, 5])
+ st.insert([5, 6])
+ st.insert([6, 7])
+ st.insert([7, 8])
+ st.insert([8, 9])
+ st.insert([9, 10])
+
Finit = np.array([6.,4.,3.,4.,5.,4.,3.,2.,3.,4.,5.], dtype=np.float32)
F = tf.Variable(initial_value=Finit, trainable=True)
- model = LowerStarSimplexTreeModel(F, stbase="../../../data/filtered_simplicial_complex/simplextree.txt", dim=0, card=10)
+ sl = LowerStarSimplexTreeLayer(simplextree=st, dimension=0)
with tf.GradientTape() as tape:
- dgm = model.call()
+ dgm = sl.call(F)
loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0])))
- grads = tape.gradient(loss, [F])
- assert np.array_equal(np.array(grads[0].indices), np.array([2,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]))
- assert np.array_equal(np.array(grads[0].values), np.array([-1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]))
+ grads = tape.gradient(loss, [F])
+
+ assert np.array_equal(np.array(grads[0].indices), np.array([2,4]))
+ assert np.array_equal(np.array(grads[0].values), np.array([-1,1]))
+