diff options
Diffstat (limited to 'src/python/doc/ls_simplex_tree_tflow_itf_ref.rst')
-rw-r--r-- | src/python/doc/ls_simplex_tree_tflow_itf_ref.rst | 64 |
1 files changed, 64 insertions, 0 deletions
diff --git a/src/python/doc/ls_simplex_tree_tflow_itf_ref.rst b/src/python/doc/ls_simplex_tree_tflow_itf_ref.rst new file mode 100644 index 00000000..0a6764fa --- /dev/null +++ b/src/python/doc/ls_simplex_tree_tflow_itf_ref.rst @@ -0,0 +1,64 @@ +:orphan: + +.. To get rid of WARNING: document isn't included in any toctree + +TensorFlow layer for lower-star persistence on simplex trees +############################################################ + +.. include:: differentiation_sum.inc + +Example of gradient computed from lower-star filtration of a simplex tree +------------------------------------------------------------------------- + +.. testcode:: + + from gudhi.tensorflow import LowerStarSimplexTreeLayer + import tensorflow as tf + import gudhi as gd + + st = gd.SimplexTree() + st.insert([0]) + st.insert([1]) + st.insert([2]) + st.insert([3]) + st.insert([4]) + st.insert([5]) + st.insert([6]) + st.insert([7]) + st.insert([8]) + st.insert([9]) + st.insert([10]) + st.insert([0, 1]) + st.insert([1, 2]) + st.insert([2, 3]) + st.insert([3, 4]) + st.insert([4, 5]) + st.insert([5, 6]) + st.insert([6, 7]) + st.insert([7, 8]) + st.insert([8, 9]) + st.insert([9, 10]) + + F = tf.Variable([6.,4.,3.,4.,5.,4.,3.,2.,3.,4.,5.], dtype=tf.float32, trainable=True) + sl = LowerStarSimplexTreeLayer(simplextree=st, dimension=0) + + with tf.GradientTape() as tape: + dgm = sl.call(F) + loss = tf.math.reduce_sum(tf.square(.5*(dgm[:,1]-dgm[:,0]))) + + grads = tape.gradient(loss, [F]) + print(grads[0].indices.numpy()) + print(grads[0].values.numpy()) + +.. testoutput:: + + [2 4] + [-1. 1.] + +Documentation for LowerStarSimplexTreeLayer +------------------------------------------- + +.. autoclass:: gudhi.tensorflow.LowerStarSimplexTreeLayer + :members: + :special-members: __init__ + :show-inheritance: |