From 062989b716b4e3ec4a900373370f1cb4633ede45 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Fri, 16 Oct 2020 18:12:00 +0200 Subject: added perslay file --- src/python/gudhi/representations/perslay.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 src/python/gudhi/representations/perslay.py diff --git a/src/python/gudhi/representations/perslay.py b/src/python/gudhi/representations/perslay.py new file mode 100644 index 00000000..e69de29b -- cgit v1.2.3 From b33ce0631f8a1474c17029764af1c4fd35578f05 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Fri, 16 Oct 2020 18:14:14 +0200 Subject: modify __init__ --- src/python/gudhi/representations/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/python/gudhi/representations/__init__.py b/src/python/gudhi/representations/__init__.py index f020248d..b8ed7293 100644 --- a/src/python/gudhi/representations/__init__.py +++ b/src/python/gudhi/representations/__init__.py @@ -2,5 +2,6 @@ from .kernel_methods import * from .metrics import * from .preprocessing import * from .vector_methods import * +from .perslay import * -__all__ = ["kernel_methods", "metrics", "preprocessing", "vector_methods"] +__all__ = ["kernel_methods", "metrics", "preprocessing", "vector_methods", "perslay"] -- cgit v1.2.3 From 3417bf0b712ac2ea2d660716765c6c609a9e927d Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Fri, 16 Oct 2020 18:18:22 +0200 Subject: add perslay class --- src/python/gudhi/representations/perslay.py | 187 ++++++++++++++++++++++++++++ 1 file changed, 187 insertions(+) diff --git a/src/python/gudhi/representations/perslay.py b/src/python/gudhi/representations/perslay.py index e69de29b..b7ed1ba0 100644 --- a/src/python/gudhi/representations/perslay.py +++ b/src/python/gudhi/representations/perslay.py @@ -0,0 +1,187 @@ +import tensorflow as tf +import tensorflow_addons as tfa +from tensorflow import random_uniform_initializer as rui +import numpy as np + +class PerslayModel(tf.keras.Model): + + def __init__(self, name, diagdim, perslay_parameters, rho): + super(PerslayModel, self).__init__() + self.namemodel = name + self.diagdim = diagdim + self.perslay_parameters = perslay_parameters + self.rho = rho + + self.vars = [[] for _ in range(len(self.perslay_parameters))] + for nf, plp in enumerate(self.perslay_parameters): + + weight = plp["pweight"] + if weight != None: + Winit, Wtrain, Wname = plp["pweight_init"], plp["pweight_train"], self.namemodel + "-pweight-" + str(nf) + if not callable(Winit): + W = tf.Variable(name=Wname, initial_value=Winit, trainable=Wtrain) + else: + if weight == "power": + W = tf.Variable(name=Wname, initial_value=Winit([1]), trainable=Wtrain) + elif weight == "grid": + Wshape = plp["pweight_size"] + W = tf.Variable(name=Wname, initial_value=Winit(Wshape), trainable=Wtrain) + elif weight == "gmix": + ngs = plp["pweight_num"] + W = tf.Variable(name=Wname, initial_value=Winit([4,ngs]), trainable=Wtrain) + else: + W = 0 + self.vars[nf].append(W) + + layer, Ltrain, Lname = plp["layer"], plp["layer_train"], self.namemodel + "-" + str(nf) + + if layer == "PermutationEquivariant": + Lpeq, LWinit, LBinit, LGinit = plp["lpeq"], plp["lweight_init"], plp["lbias_init"], plp["lgamma_init"] + LW, LB, LG = [], [], [] + for idx, (dim, pop) in enumerate(Lpeq): + dim_before = self.diagdim if idx == 0 else Lpeq[idx-1][0] + LWiv = LWinit([dim_before, dim]) if callable(LWinit) else LWinit + LBiv = LBinit([dim]) if callable(LBinit) else LBinit + LW.append( tf.Variable(name=Lname+"-W", initial_value=LWiv, trainable=Ltrain)) + LB.append( tf.Variable(name=Lname+"-B", initial_value=LBiv, trainable=Ltrain)) + if pop != None: + LGiv = LGinit([dim_before, dim]) if callable(LGinit) else LGinit + LG.append( tf.Variable(name=Lname+"-G", initial_value=LGiv, trainable=Ltrain)) + else: + LG.append([]) + self.vars[nf].append([LW, LB, LG]) + + elif layer == "Landscape" or layer == "BettiCurve" or layer == "Entropy": + LSinit = plp["lsample_init"] + LSiv = LSinit if not callable(LSinit) else LSinit([plp["lsample_num"]]) + LS = tf.Variable(name=Lname+"-S", initial_value=LSiv, trainable=Ltrain) + self.vars[nf].append(LS) + + elif layer == "Image": + LVinit = plp["lvariance_init"] + LViv = LVinit if not callable(LVinit) else LVinit([1]) + LV = tf.Variable(name=Lname+"-V", initial_value=LViv, trainable=Ltrain) + self.vars[nf].append(LV) + + elif layer == "Exponential": + LMinit, LVinit = plp["lmean_init"], plp["lvariance_init"] + LMiv = LMinit if not callable(LMinit) else LMinit([self.diagdim, plp["lnum"]]) + LViv = LVinit if not callable(LVinit) else LVinit([self.diagdim, plp["lnum"]]) + LM = tf.Variable(name=Lname+"-M", initial_value=LMiv, trainable=Ltrain) + LV = tf.Variable(name=Lname+"-V", initial_value=LViv, trainable=Ltrain) + self.vars[nf].append([LM, LV]) + + elif layer == "Rational": + LMinit, LVinit, LAinit = plp["lmean_init"], plp["lvariance_init"], plp["lalpha_init"] + LMiv = LMinit if not callable(LMinit) else LMinit([self.diagdim, plp["lnum"]]) + LViv = LVinit if not callable(LVinit) else LVinit([self.diagdim, plp["lnum"]]) + LAiv = LAinit if not callable(LAinit) else LAinit([plp["lnum"]]) + LM = tf.Variable(name=Lname+"-M", initial_value=LMiv, trainable=Ltrain) + LV = tf.Variable(name=Lname+"-V", initial_value=LViv, trainable=Ltrain) + LA = tf.Variable(name=Lname+"-A", initial_value=LAiv, trainable=Ltrain) + self.vars[nf].append([LM, LV, LA]) + + elif layer == "RationalHat": + LMinit, LRinit = plp["lmean_init"], plp["lr_init"] + LMiv = LMinit if not callable(LMinit) else LMinit([self.diagdim, plp["lnum"]]) + LRiv = LRinit if not callable(LRinit) else LVinit([1]) + LM = tf.Variable(name=Lname+"-M", initial_value=LMiv, trainable=Ltrain) + LR = tf.Variable(name=Lname+"-R", initial_value=LRiv, trainable=Ltrain) + self.vars[nf].append([LM, LR]) + + def compute_representations(self, diags, training=False): + + list_v = [] + + for nf, plp in enumerate(self.perslay_parameters): + + diag = diags[nf] + + N, dimension_diag = diag.shape[1], diag.shape[2] + tensor_mask = diag[:, :, dimension_diag - 1] + tensor_diag = diag[:, :, :dimension_diag - 1] + + W = self.vars[nf][0] + + if plp["pweight"] == "power": + p = plp["pweight_power"] + weight = W * tf.math.pow(tf.math.abs(tensor_diag[:, :, 1:2]-tensor_diag[:, :, 0:1]), p) + + elif plp["pweight"] == "grid": + grid_shape = W.shape + indices = [] + for dim in range(dimension_diag-1): + [m, M] = plp["pweight_bnds"][dim] + coords = tf.slice(tensor_diag, [0, 0, dim], [-1, -1, 1]) + ids = grid_shape[dim] * (coords - m)/(M - m) + indices.append(tf.cast(ids, tf.int32)) + weight = tf.expand_dims(tf.gather_nd(params=W, indices=tf.concat(indices, axis=2)), -1) + + elif plp["pweight"] == "gmix": + M, V = tf.expand_dims(tf.expand_dims(W[:2,:], 0), 0), tf.expand_dims(tf.expand_dims(W[2:,:], 0), 0) + bc_inp = tf.expand_dims(tensor_diag, -1) + weight = tf.expand_dims(tf.math.reduce_sum(tf.math.exp(tf.math.reduce_sum(-tf.math.multiply(tf.math.square(bc_inp-M), tf.math.square(V)), axis=2)), axis=2), -1) + + + lvars = self.vars[nf][1] + if plp["layer"] == "PermutationEquivariant": + for idx, (dim, pop) in enumerate(plp["lpeq"]): + tensor_diag = permutation_equivariant_layer(tensor_diag, dim, pop, lvars[0][idx], lvars[1][idx], lvars[2][idx]) + elif plp["layer"] == "Landscape": + tensor_diag = landscape_layer(tensor_diag, lvars) + elif plp["layer"] == "BettiCurve": + tensor_diag = betti_layer(tensor_diag, plp["theta"], lvars) + elif plp["layer"] == "Entropy": + tensor_diag = entropy_layer(tensor_diag, plp["theta"], lvars) + elif plp["layer"] == "Image": + tensor_diag = image_layer(tensor_diag, plp["image_size"], plp["image_bnds"], lvars) + elif plp["layer"] == "Exponential": + tensor_diag = exponential_layer(tensor_diag, **lvars) + elif plp["layer"] == "Rational": + tensor_diag = rational_layer(tensor_diag, **lvars) + elif plp["layer"] == "RationalHat": + tensor_diag = rational_hat_layer(tensor_diag, plp["q"], **lvars) + + # Apply weight + output_dim = len(tensor_diag.shape) - 2 + if plp["pweight"] != None: + for _ in range(output_dim-1): + weight = tf.expand_dims(weight, -1) + tiled_weight = tf.tile(weight, [1, 1] + tensor_diag.shape[2:]) + tensor_diag = tf.math.multiply(tensor_diag, tiled_weight) + + # Apply mask + for _ in range(output_dim): + tensor_mask = tf.expand_dims(tensor_mask, -1) + tiled_mask = tf.tile(tensor_mask, [1, 1] + tensor_diag.shape[2:]) + masked_layer = tf.math.multiply(tensor_diag, tiled_mask) + + # Permutation invariant operation + if plp["perm_op"] == "topk" and output_dim == 1: # k first values + masked_layer_t = tf.transpose(masked_layer, perm=[0, 2, 1]) + values, indices = tf.math.top_k(masked_layer_t, k=plp["keep"]) + vector = tf.reshape(values, [-1, plp["keep"] * tensor_diag.shape[2]]) + elif plp["perm_op"] == "sum": # sum + vector = tf.math.reduce_sum(masked_layer, axis=1) + elif plp["perm_op"] == "max": # maximum + vector = tf.math.reduce_max(masked_layer, axis=1) + elif plp["perm_op"] == "mean": # minimum + vector = tf.math.reduce_mean(masked_layer, axis=1) + + # Second layer of channel + vector = plp["final_model"].call(vector, training=training) if plp["final_model"] != "identity" else vector + list_v.append(vector) + + # Concatenate all channels and add other features + representations = tf.concat(values=list_v, axis=1) + return representations + + def call(self, inputs, training=False): + + diags, feats = inputs[0], inputs[1] + representations = self.compute_representations(diags, training) + concat_representations = tf.concat(values=[representations, feats], axis=1) + final_representations = self.rho(concat_representations) if self.rho != "identity" else concat_representations + + return final_representations + -- cgit v1.2.3 From 710c09b99c3504919625513b8d22ec5436aef7fa Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Fri, 16 Oct 2020 18:19:09 +0200 Subject: add different representations --- src/python/gudhi/representations/perslay.py | 73 +++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/src/python/gudhi/representations/perslay.py b/src/python/gudhi/representations/perslay.py index b7ed1ba0..31a5076f 100644 --- a/src/python/gudhi/representations/perslay.py +++ b/src/python/gudhi/representations/perslay.py @@ -3,6 +3,79 @@ import tensorflow_addons as tfa from tensorflow import random_uniform_initializer as rui import numpy as np +def permutation_equivariant_layer(inp, dimension, perm_op, lbda, b, gamma): + """ DeepSet PersLay """ + dimension_before, num_pts = inp.shape[2], inp.shape[1] + b = tf.expand_dims(tf.expand_dims(b, 0), 0) + A = tf.reshape(tf.einsum("ijk,kl->ijl", inp, lbda), [-1, num_pts, dimension]) + if perm_op != None: + if perm_op == "max": + beta = tf.tile(tf.expand_dims(tf.math.reduce_max(inp, axis=1), 1), [1, num_pts, 1]) + elif perm_op == "min": + beta = tf.tile(tf.expand_dims(tf.math.reduce_min(inp, axis=1), 1), [1, num_pts, 1]) + elif perm_op == "sum": + beta = tf.tile(tf.expand_dims(tf.math.reduce_sum(inp, axis=1), 1), [1, num_pts, 1]) + else: + raise Exception("perm_op should be min, max or sum") + B = tf.reshape(tf.einsum("ijk,kl->ijl", beta, gamma), [-1, num_pts, dimension]) + return A - B + b + else: + return A + b + +def rational_hat_layer(inp, q, mu, r): + """ Rational Hat PersLay """ + mu, r = tf.expand_dims(tf.expand_dims(mu, 0), 0), tf.expand_dims(tf.expand_dims(r, 0), 0) + dimension_before, num_pts = inp.shape[2], inp.shape[1] + bc_inp = tf.expand_dims(inp, -1) + norms = tf.norm(bc_inp - mu, ord=q, axis=2) + return 1/(1 + norms) - 1/(1 + tf.math.abs(tf.math.abs(r)-norms)) + +def rational_layer(inp, mu, sg, al): + """ Rational PersLay """ + mu, sg, al = tf.expand_dims(tf.expand_dims(mu, 0), 0), tf.expand_dims(tf.expand_dims(sg, 0), 0), tf.expand_dims(tf.expand_dims(al, 0), 0) + dimension_before, num_pts = inp.shape[2], inp.shape[1] + bc_inp = tf.expand_dims(inp, -1) + return 1/tf.math.pow(1+tf.math.reduce_sum(tf.math.multiply(tf.math.abs(bc_inp - mu), tf.math.abs(sg)), axis=2), al) + +def exponential_layer(inp, mu, sg): + """ Exponential PersLay """ + mu, sg = tf.expand_dims(tf.expand_dims(mu, 0), 0), tf.expand_dims(tf.expand_dims(sg, 0), 0) + dimension_before, num_pts = inp.shape[2], inp.shape[1] + bc_inp = tf.expand_dims(inp, -1) + return tf.math.exp(tf.math.reduce_sum(-tf.math.multiply(tf.math.square(bc_inp - mu), tf.math.square(sg)), axis=2)) + +def landscape_layer(inp, sp): + """ Landscape PersLay """ + sp = tf.expand_dims(tf.expand_dims(sp, 0), 0) + return tf.math.maximum( .5 * (inp[:, :, 1:2] - inp[:, :, 0:1]) - tf.math.abs(sp - .5 * (inp[:, :, 1:2] + inp[:, :, 0:1])), np.array([0])) + +def betti_layer(inp, theta, sp): + """ Betti PersLay """ + sp = tf.expand_dims(tf.expand_dims(sp, 0), 0) + X, Y = inp[:, :, 0:1], inp[:, :, 1:2] + return 1. / ( 1. + tf.math.exp( -theta * (.5*(Y-X) - tf.math.abs(sp - .5*(Y+X))) ) ) + +def entropy_layer(inp, theta, sp): + """ Entropy PersLay + WARNING: this function assumes that padding values are zero + """ + sp = tf.expand_dims(tf.expand_dims(sp, 0), 0) + bp_inp = tf.einsum("ijk,kl->ijl", inp, tf.constant(np.array([[1.,-1.],[0.,1.]], dtype=np.float32))) + L, X, Y = bp_inp[:, :, 1:2], bp_inp[:, :, 0:1], bp_inp[:, :, 0:1] + bp_inp[:, :, 1:2] + LN = tf.math.multiply(L, 1. / tf.expand_dims(tf.linalg.matmul(L[:,:,0], tf.ones([L.shape[1],1])), -1)) + entropy_terms = tf.where(LN > 0., -tf.math.multiply(LN, tf.math.log(LN)), LN) + return tf.math.multiply(entropy_terms, 1. / ( 1. + tf.math.exp( -theta * (.5*(Y-X) - tf.math.abs(sp - .5*(Y+X))) ) )) + +def image_layer(inp, image_size, image_bnds, sg): + """ Persistence Image PersLay """ + bp_inp = tf.einsum("ijk,kl->ijl", inp, tf.constant(np.array([[1.,-1.],[0.,1.]], dtype=np.float32))) + dimension_before, num_pts = inp.shape[2], inp.shape[1] + coords = [tf.range(start=image_bnds[i][0], limit=image_bnds[i][1], delta=(image_bnds[i][1] - image_bnds[i][0]) / image_size[i]) for i in range(dimension_before)] + M = tf.meshgrid(*coords) + mu = tf.concat([tf.expand_dims(tens, 0) for tens in M], axis=0) + bc_inp = tf.reshape(bp_inp, [-1, num_pts, dimension_before] + [1 for _ in range(dimension_before)]) + return tf.expand_dims(tf.math.exp(tf.math.reduce_sum( -tf.math.square(bc_inp-mu) / (2*tf.math.square(sg)), axis=2)) / (2*np.pi*tf.math.square(sg)), -1) + class PerslayModel(tf.keras.Model): def __init__(self, name, diagdim, perslay_parameters, rho): -- cgit v1.2.3 From 9b3c74cace71744b471e2ed1137520ad341639de Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Sun, 1 Nov 2020 19:44:29 +0100 Subject: added doc and test --- src/python/gudhi/representations/perslay.py | 40 ++++---- src/python/gudhi/representations/perslay_params.md | 103 +++++++++++++++++++++ src/python/test/test_perslay.py | 57 ++++++++++++ 3 files changed, 184 insertions(+), 16 deletions(-) create mode 100644 src/python/gudhi/representations/perslay_params.md create mode 100644 src/python/test/test_perslay.py diff --git a/src/python/gudhi/representations/perslay.py b/src/python/gudhi/representations/perslay.py index 31a5076f..f836a616 100644 --- a/src/python/gudhi/representations/perslay.py +++ b/src/python/gudhi/representations/perslay.py @@ -3,7 +3,7 @@ import tensorflow_addons as tfa from tensorflow import random_uniform_initializer as rui import numpy as np -def permutation_equivariant_layer(inp, dimension, perm_op, lbda, b, gamma): +def _permutation_equivariant_layer(inp, dimension, perm_op, lbda, b, gamma): """ DeepSet PersLay """ dimension_before, num_pts = inp.shape[2], inp.shape[1] b = tf.expand_dims(tf.expand_dims(b, 0), 0) @@ -22,7 +22,7 @@ def permutation_equivariant_layer(inp, dimension, perm_op, lbda, b, gamma): else: return A + b -def rational_hat_layer(inp, q, mu, r): +def _rational_hat_layer(inp, q, mu, r): """ Rational Hat PersLay """ mu, r = tf.expand_dims(tf.expand_dims(mu, 0), 0), tf.expand_dims(tf.expand_dims(r, 0), 0) dimension_before, num_pts = inp.shape[2], inp.shape[1] @@ -30,32 +30,32 @@ def rational_hat_layer(inp, q, mu, r): norms = tf.norm(bc_inp - mu, ord=q, axis=2) return 1/(1 + norms) - 1/(1 + tf.math.abs(tf.math.abs(r)-norms)) -def rational_layer(inp, mu, sg, al): +def _rational_layer(inp, mu, sg, al): """ Rational PersLay """ mu, sg, al = tf.expand_dims(tf.expand_dims(mu, 0), 0), tf.expand_dims(tf.expand_dims(sg, 0), 0), tf.expand_dims(tf.expand_dims(al, 0), 0) dimension_before, num_pts = inp.shape[2], inp.shape[1] bc_inp = tf.expand_dims(inp, -1) return 1/tf.math.pow(1+tf.math.reduce_sum(tf.math.multiply(tf.math.abs(bc_inp - mu), tf.math.abs(sg)), axis=2), al) -def exponential_layer(inp, mu, sg): +def _exponential_layer(inp, mu, sg): """ Exponential PersLay """ mu, sg = tf.expand_dims(tf.expand_dims(mu, 0), 0), tf.expand_dims(tf.expand_dims(sg, 0), 0) dimension_before, num_pts = inp.shape[2], inp.shape[1] bc_inp = tf.expand_dims(inp, -1) return tf.math.exp(tf.math.reduce_sum(-tf.math.multiply(tf.math.square(bc_inp - mu), tf.math.square(sg)), axis=2)) -def landscape_layer(inp, sp): +def _landscape_layer(inp, sp): """ Landscape PersLay """ sp = tf.expand_dims(tf.expand_dims(sp, 0), 0) return tf.math.maximum( .5 * (inp[:, :, 1:2] - inp[:, :, 0:1]) - tf.math.abs(sp - .5 * (inp[:, :, 1:2] + inp[:, :, 0:1])), np.array([0])) -def betti_layer(inp, theta, sp): +def _betti_layer(inp, theta, sp): """ Betti PersLay """ sp = tf.expand_dims(tf.expand_dims(sp, 0), 0) X, Y = inp[:, :, 0:1], inp[:, :, 1:2] return 1. / ( 1. + tf.math.exp( -theta * (.5*(Y-X) - tf.math.abs(sp - .5*(Y+X))) ) ) -def entropy_layer(inp, theta, sp): +def _entropy_layer(inp, theta, sp): """ Entropy PersLay WARNING: this function assumes that padding values are zero """ @@ -66,7 +66,7 @@ def entropy_layer(inp, theta, sp): entropy_terms = tf.where(LN > 0., -tf.math.multiply(LN, tf.math.log(LN)), LN) return tf.math.multiply(entropy_terms, 1. / ( 1. + tf.math.exp( -theta * (.5*(Y-X) - tf.math.abs(sp - .5*(Y+X))) ) )) -def image_layer(inp, image_size, image_bnds, sg): +def _image_layer(inp, image_size, image_bnds, sg): """ Persistence Image PersLay """ bp_inp = tf.einsum("ijk,kl->ijl", inp, tf.constant(np.array([[1.,-1.],[0.,1.]], dtype=np.float32))) dimension_before, num_pts = inp.shape[2], inp.shape[1] @@ -77,7 +77,15 @@ def image_layer(inp, image_size, image_bnds, sg): return tf.expand_dims(tf.math.exp(tf.math.reduce_sum( -tf.math.square(bc_inp-mu) / (2*tf.math.square(sg)), axis=2)) / (2*np.pi*tf.math.square(sg)), -1) class PerslayModel(tf.keras.Model): + """ + TensorFlow model implementing PersLay. + Attributes: + name (string): name of the layer. Used for naming variables. + diagdim (integer): dimension of persistence diagram points. Usually 2 but can handle more. + perslay_parameters (dict): dictionary containing the PersLay parameters. See file perslay_params.md + rho (TensorFlow model): layers used to process the learned representations of persistence diagrams (for instance, a fully connected layer that outputs the number of classes). Use the string "identity" if you want to output the representations directly. + """ def __init__(self, name, diagdim, perslay_parameters, rho): super(PerslayModel, self).__init__() self.namemodel = name @@ -199,21 +207,21 @@ class PerslayModel(tf.keras.Model): lvars = self.vars[nf][1] if plp["layer"] == "PermutationEquivariant": for idx, (dim, pop) in enumerate(plp["lpeq"]): - tensor_diag = permutation_equivariant_layer(tensor_diag, dim, pop, lvars[0][idx], lvars[1][idx], lvars[2][idx]) + tensor_diag = _permutation_equivariant_layer(tensor_diag, dim, pop, lvars[0][idx], lvars[1][idx], lvars[2][idx]) elif plp["layer"] == "Landscape": - tensor_diag = landscape_layer(tensor_diag, lvars) + tensor_diag = _landscape_layer(tensor_diag, lvars) elif plp["layer"] == "BettiCurve": - tensor_diag = betti_layer(tensor_diag, plp["theta"], lvars) + tensor_diag = _betti_layer(tensor_diag, plp["theta"], lvars) elif plp["layer"] == "Entropy": - tensor_diag = entropy_layer(tensor_diag, plp["theta"], lvars) + tensor_diag = _entropy_layer(tensor_diag, plp["theta"], lvars) elif plp["layer"] == "Image": - tensor_diag = image_layer(tensor_diag, plp["image_size"], plp["image_bnds"], lvars) + tensor_diag = _image_layer(tensor_diag, plp["image_size"], plp["image_bnds"], lvars) elif plp["layer"] == "Exponential": - tensor_diag = exponential_layer(tensor_diag, **lvars) + tensor_diag = _exponential_layer(tensor_diag, **lvars) elif plp["layer"] == "Rational": - tensor_diag = rational_layer(tensor_diag, **lvars) + tensor_diag = _rational_layer(tensor_diag, **lvars) elif plp["layer"] == "RationalHat": - tensor_diag = rational_hat_layer(tensor_diag, plp["q"], **lvars) + tensor_diag = _rational_hat_layer(tensor_diag, plp["q"], **lvars) # Apply weight output_dim = len(tensor_diag.shape) - 2 diff --git a/src/python/gudhi/representations/perslay_params.md b/src/python/gudhi/representations/perslay_params.md new file mode 100644 index 00000000..7cc9caf3 --- /dev/null +++ b/src/python/gudhi/representations/perslay_params.md @@ -0,0 +1,103 @@ +In the following description of PersLay parameters, each parameter, or dictionary key, that contains `_init` in its name is optimized and learned by PersLay during training. If you do not want to optimize the vectorization, set the keys **train_vect** and **train_weight** to False. + + * The following keys are mandatory: + + | **name** | **description** | + | --- | --- | + | **layer** | Either "PermutationEquivariant", "Image", "Landscape", "BettiCurve", "Entropy", "Exponential", "Rational" or "RationalHat". Type of the PersLay layer. "Image" is for [persistence images](https://arxiv.org/abs/1507.06217), "Landscape" is for [persistence landscapes](http://www.jmlr.org/papers/volume16/bubenik15a/bubenik15a.pdf), "Exponential", "Rational" and "RationalHat" are for [structure elements](http://jmlr.org/beta/papers/v20/18-358.html), "PermutationEquivariant" is for the original DeepSet layer, defined in [this article](https://arxiv.org/abs/1703.06114), "BettiCurve" is for [Betti curves](https://www.jstage.jst.go.jp/article/tjsai/32/3/32_D-G72/_pdf) and "Entropy" is for [entropy](https://arxiv.org/abs/1803.08304). | + | **perm_op** | Either "sum", "mean", "max", "topk". Permutation invariant operation. | + | **keep** | Number of top values to keep. Used only if **perm_op** is "topk". | + | **pweight** | Either "power", "grid", "gmix" or None. Weight function to be applied on persistence diagram points. If "power", this function is a (trainable) coefficient times the distances to the diagonal of the points to a certain power. If "grid", this function is piecewise-constant and defined with pixel values of a grid. If "gmix", this function is defined as a mixture of Gaussians. If None, no weighting is applied. | + | **final_model** | A Tensorflow / Keras model used to postprocess the persistence diagrams in each channel. Use "identity" if you don't want to postprocess. | + +Depending on what **pweight** is, the following additional keys are requested: + + * if **pweight** is "power": + + | **name** | **description** | + | --- | --- | + | **pweight_init** | Initializer of the coefficient of the power weight function. It can be either a single value, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | + | **pweight_power** | Integer used for exponentiating the distances to the diagonal of the persistence diagram points. | + + * if **pweight** is "grid": + + | **name** | **description** | + | --- | --- | + | **pweight_size** | Grid size of the grid weight function. It is a tuple of integer values, such as (10,10). | + | **pweight_bnds** | Grid boundaries of the grid weight function. It is a tuple containing two tuples, each containing the minimum and maximum values of each axis of the plane. Example: ((-0.01, 1.01), (-0.01, 1.01)). | + | **pweight_init** | Initializer for the pixel values of the grid weight function. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.).| + + * if **pweight** is "gmix": + + | **name** | **description** | + | --- | --- | + | **pweight_num** | Number of Gaussian functions of the mixture of Gaussians weight function. | + | **pweight_init** | Initializer of the means and variances of the mixture of Gaussians weight function. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | + +Depending on what **layer** is, the following additional keys are requested: + + * if **layer** is "PermutationEquivariant": + + | **name** | **description** | + | --- | --- | + | **lpeq** | Sequence of permutation equivariant operations, as defined in [the DeepSet article](). It is a list of tuples of the form (*dim*, *operation*). Each tuple defines a permutation equivariant function of dimension *dim* and second permutation operation *operation* (string, either "max", "min", "sum" or None). Second permutation operation is optional and is not applied if *operation* is set to None. Example: [(150, "max"), (75, None)]. | + | **lweight_init** | Initializer for the weight matrices of the permutation equivariant operations. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.).| + | **lbias_init** | Initializer for the biases of the permutation equivariant operations. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | + | **lgamma_init** | Initializer for the Gamma matrices of the permutation equivariant operations. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.).| + + * if **layer** is "Image": + + | **name** | **description** | + | --- | --- | + | **image_size** | Persistence image size. It is a tuple of integer values, such as (10,10). | + | **image_bnds** | Persistence image boundaries. It is a tuple containing two tuples, each containing the minimum and maximum values of each axis of the plane. Example: ((-0.01, 1.01), (-0.01, 1.01)). | + | **lvariance_init** | Initializer for the bandwidths of the Gaussian functions centered on the persistence image pixels. It can be either a single value, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 3.). | + + * if **layer** is "Landscape": + + | **name** | **description** | + | --- | --- | + | **lsample_num** | Number of samples of the diagonal that will be evaluated on the persistence landscapes. | + | **lsample_init** | Initializer of the samples of the diagonal. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | + + * if **layer** is "BettiCurve": + + | **name** | **description** | + | --- | --- | + | **lsample_num** | Number of samples of the diagonal that will be evaluated on the Betti curves. | + | **lsample_init** | Initializer of the samples of the diagonal. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | + | **theta** | Sigmoid parameter used for approximating the piecewise constant functions associated to the persistence diagram points. | + + * if **layer** is "Entropy": + + | **name** | **description** | + | --- | --- | + | **lsample_num** | Number of samples on the diagonal that will be evaluated on the persistence entropies. | + | **lsample_init** | Initializer of the samples of the diagonal. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | + | **theta** | Sigmoid parameter used for approximating the piecewise constant functions associated to the persistence diagram points. | + + * if **layer** is "Exponential": + + | **name** | **description** | + | --- | --- | + | **lnum** | Number of exponential structure elements that will be evaluated on the persistence diagram points. | + | **lmean_init** | Initializer of the means of the exponential structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | + | **lvariance_init** | Initializer of the bandwidths of the exponential structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). | + + * if **layer** is "Rational": + + | **name** | **description** | + | --- | --- | + | **lnum** | Number of rational structure elements that will be evaluated on the persistence diagram points. | + | **lmean_init** | Initializer of the means of the rational structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | + | **lvariance_init** | Initializer of the bandwidths of the rational structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). | + | **lalpha_init** | Initializer of the exponents of the rational structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). | + + * if **layer** is "RationalHat": + + | **name** | **description** | + | --- | --- | + | **lnum** | Number of rational hat structure elements that will be evaluated on the persistence diagram points. | + | **lmean_init** | Initializer of the means of the rational hat structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | + | **lr_init** | Initializer of the threshold of the rational hat structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). | + | **q** | Norm parameter. | diff --git a/src/python/test/test_perslay.py b/src/python/test/test_perslay.py new file mode 100644 index 00000000..72b02944 --- /dev/null +++ b/src/python/test/test_perslay.py @@ -0,0 +1,57 @@ +import sys +import numpy as np +import tensorflow as tf +import matplotlib.pyplot as plt + +from sklearn.preprocessing import MinMaxScaler +from tensorflow import random_uniform_initializer as rui + +from gudhi.representations import DiagramScaler, Padding, PerslayModel + + +def test_perslay_image(): + + diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] + diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag) + diag = Padding(use=True).fit_transform(diag) + D = np.stack(np.array(diag, dtype=np.float32), 0) + diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) + perslayParameters = {} + + perslayParameters["pweight"] = None + perslayParameters["perm_op"] = "sum" + perslayParameters["layer"] = "Image" + perslayParameters["layer_train"] = False + perslayParameters["image_size"] = (2,2) + perslayParameters["image_bnds"] = ((-.501, 1.501), (-.501, 1.501)) + perslayParameters["lvariance_init"] = .1 + perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()]) + model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") + vector = model([diagrams, empty_feats]).numpy() + + assert vector.shape == (1,4) + assert np.abs(vector-np.array([[0,0,5.6e-5,3.3668644]])).sum() <= 1e-6 + +def test_perslay_landscape(): + + diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] + diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag) + diag = Padding(use=True).fit_transform(diag) + D = np.stack(np.array(diag, dtype=np.float32), 0) + diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) + perslayParameters = {} + + perslayParameters["pweight"] = None + perslayParameters["perm_op"] = "topk" + perslayParameters["keep"] = 3 + perslayParameters["layer"] = "Landscape" + perslayParameters["layer_train"] = False + perslayParameters["lsample_num"] = 3 + perslayParameters["lsample_init"] = np.array(np.arange(-.1,1.1,.5), dtype=np.float32) + perslayParameters["final_model"] = "identity" + model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") + vector = model([diagrams, empty_feats]).numpy() + + assert vector.shape == (1,9) + assert np.abs(vector-np.array([[0.,0.,0.,0.1,0.025,0.,0.1,0.1,0.]])).sum() <= 1e-6 + -- cgit v1.2.3 From 73be9043e6a3e9541d2c5393634774ef512d4494 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Mon, 7 Dec 2020 18:49:16 +0100 Subject: added more test and improved doc --- src/python/doc/installation.rst | 13 +- src/python/doc/representations.rst | 13 +- src/python/doc/representations_sum.inc | 23 +-- src/python/gudhi/representations/perslay.py | 10 +- src/python/gudhi/representations/perslay_params.md | 103 ------------ src/python/test/test_perslay.py | 183 ++++++++++++++++++++- 6 files changed, 213 insertions(+), 132 deletions(-) delete mode 100644 src/python/gudhi/representations/perslay_params.md diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst index 66efe45a..9bec6c97 100644 --- a/src/python/doc/installation.rst +++ b/src/python/doc/installation.rst @@ -372,6 +372,15 @@ PyTorch `PyTorch `_ is currently only used as a dependency of `PyKeOps`_, and in some tests. +TensorFlow +---------- + +`TensorFlow `_ is currently only used in some automatic differentiation tests. + +:class:`~gudhi.representations.PerslayModel` in the :doc:`persistence representations ` module requires +`TensorFlow 2 `_. + + Scikit-learn ------------ @@ -394,10 +403,6 @@ mathematics, science, and engineering. :class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package `SciPy `_ as a backend if explicitly requested. -TensorFlow ----------- - -`TensorFlow `_ is currently only used in some automatic differentiation tests. Bug reports and contributions ***************************** diff --git a/src/python/doc/representations.rst b/src/python/doc/representations.rst index b0477197..35517ebb 100644 --- a/src/python/doc/representations.rst +++ b/src/python/doc/representations.rst @@ -8,7 +8,7 @@ Representations manual .. include:: representations_sum.inc -This module, originally available at https://github.com/MathieuCarriere/sklearn-tda and named sklearn_tda, aims at bridging the gap between persistence diagrams and machine learning, by providing implementations of most of the vector representations for persistence diagrams in the literature, in a scikit-learn format. More specifically, it provides tools, using the scikit-learn standard interface, to compute distances and kernels on persistence diagrams, and to convert these diagrams into vectors in Euclidean space. +This module, originally available at https://github.com/MathieuCarriere/sklearn-tda and named sklearn_tda, aims at bridging the gap between persistence diagrams and machine learning, by providing implementations of most of the vector representations for persistence diagrams in the literature, in a scikit-learn format. More specifically, it provides tools, using the scikit-learn standard interface, to compute distances and kernels on persistence diagrams, and to convert these diagrams into vectors in Euclidean space. Moreover, this module also contains the `PersLay architecture `_, which is a general neural network architecture for performing deep learning with persistence diagrams. It is implemented in TensorFlow 2. A diagram is represented as a numpy array of shape (n,2), as can be obtained from :func:`~gudhi.SimplexTree.persistence_intervals_in_dimension` for instance. Points at infinity are represented as a numpy array of shape (n,1), storing only the birth time. The classes in this module can handle several persistence diagrams at once. In that case, the diagrams are provided as a list of numpy arrays. Note that it is not necessary for the diagrams to have the same number of points, i.e., for the corresponding arrays to have the same number of rows: all classes can handle arrays with different shapes. @@ -50,7 +50,7 @@ Machine Learning and Topological Data Analysis This `notebook `_ explains how to efficiently combine machine learning and topological data analysis with the -:doc:`representations module`. +:doc:`representations module` in a scikit-learn fashion. This `notebook `_ and `this one `_ explain how to use the PersLay architecture. Preprocessing @@ -80,3 +80,12 @@ Metrics :members: :special-members: :show-inheritance: + +Deep Learning +------------- +.. automodule:: gudhi.representations.perslay + :members: + :special-members: + :show-inheritance: + +.. include:: perslay_params.md diff --git a/src/python/doc/representations_sum.inc b/src/python/doc/representations_sum.inc index 4298aea9..430e1c4e 100644 --- a/src/python/doc/representations_sum.inc +++ b/src/python/doc/representations_sum.inc @@ -1,14 +1,15 @@ .. table:: :widths: 30 40 30 - +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------+ - | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière, Martin Royer | - | img/sklearn-tda.png | diagrams, compatible with scikit-learn. | | - | | | :Since: GUDHI 3.1.0 | - | | | | - | | | :License: MIT | - | | | | - | | | :Requires: `Scikit-learn `_ | - +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------+ - | * :doc:`representations` | - +------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------+ + +------------------------------------------------------------------+----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------+ + | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière, Martin Royer | + | img/sklearn-tda.png | diagrams, compatible with scikit-learn and tensorflow. | | + | | | :Since: GUDHI 3.1.0 | + | | | | + | | | :License: MIT | + | | | | + | | | :Requires: `Scikit-learn `_, `TensorFlow 2 `_| + | | | | + +------------------------------------------------------------------+----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------+ + | * :doc:`representations` | + +------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ diff --git a/src/python/gudhi/representations/perslay.py b/src/python/gudhi/representations/perslay.py index f836a616..5e522fad 100644 --- a/src/python/gudhi/representations/perslay.py +++ b/src/python/gudhi/representations/perslay.py @@ -83,7 +83,7 @@ class PerslayModel(tf.keras.Model): Attributes: name (string): name of the layer. Used for naming variables. diagdim (integer): dimension of persistence diagram points. Usually 2 but can handle more. - perslay_parameters (dict): dictionary containing the PersLay parameters. See file perslay_params.md + perslay_parameters (dict): dictionary containing the PersLay parameters. See below. rho (TensorFlow model): layers used to process the learned representations of persistence diagrams (for instance, a fully connected layer that outputs the number of classes). Use the string "identity" if you want to output the representations directly. """ def __init__(self, name, diagdim, perslay_parameters, rho): @@ -165,7 +165,7 @@ class PerslayModel(tf.keras.Model): elif layer == "RationalHat": LMinit, LRinit = plp["lmean_init"], plp["lr_init"] LMiv = LMinit if not callable(LMinit) else LMinit([self.diagdim, plp["lnum"]]) - LRiv = LRinit if not callable(LRinit) else LVinit([1]) + LRiv = LRinit if not callable(LRinit) else LRinit([1]) LM = tf.Variable(name=Lname+"-M", initial_value=LMiv, trainable=Ltrain) LR = tf.Variable(name=Lname+"-R", initial_value=LRiv, trainable=Ltrain) self.vars[nf].append([LM, LR]) @@ -217,11 +217,11 @@ class PerslayModel(tf.keras.Model): elif plp["layer"] == "Image": tensor_diag = _image_layer(tensor_diag, plp["image_size"], plp["image_bnds"], lvars) elif plp["layer"] == "Exponential": - tensor_diag = _exponential_layer(tensor_diag, **lvars) + tensor_diag = _exponential_layer(tensor_diag, lvars[0], lvars[1]) elif plp["layer"] == "Rational": - tensor_diag = _rational_layer(tensor_diag, **lvars) + tensor_diag = _rational_layer(tensor_diag, lvars[0], lvars[1], lvars[2]) elif plp["layer"] == "RationalHat": - tensor_diag = _rational_hat_layer(tensor_diag, plp["q"], **lvars) + tensor_diag = _rational_hat_layer(tensor_diag, plp["q"], lvars[0], lvars[1]) # Apply weight output_dim = len(tensor_diag.shape) - 2 diff --git a/src/python/gudhi/representations/perslay_params.md b/src/python/gudhi/representations/perslay_params.md deleted file mode 100644 index 7cc9caf3..00000000 --- a/src/python/gudhi/representations/perslay_params.md +++ /dev/null @@ -1,103 +0,0 @@ -In the following description of PersLay parameters, each parameter, or dictionary key, that contains `_init` in its name is optimized and learned by PersLay during training. If you do not want to optimize the vectorization, set the keys **train_vect** and **train_weight** to False. - - * The following keys are mandatory: - - | **name** | **description** | - | --- | --- | - | **layer** | Either "PermutationEquivariant", "Image", "Landscape", "BettiCurve", "Entropy", "Exponential", "Rational" or "RationalHat". Type of the PersLay layer. "Image" is for [persistence images](https://arxiv.org/abs/1507.06217), "Landscape" is for [persistence landscapes](http://www.jmlr.org/papers/volume16/bubenik15a/bubenik15a.pdf), "Exponential", "Rational" and "RationalHat" are for [structure elements](http://jmlr.org/beta/papers/v20/18-358.html), "PermutationEquivariant" is for the original DeepSet layer, defined in [this article](https://arxiv.org/abs/1703.06114), "BettiCurve" is for [Betti curves](https://www.jstage.jst.go.jp/article/tjsai/32/3/32_D-G72/_pdf) and "Entropy" is for [entropy](https://arxiv.org/abs/1803.08304). | - | **perm_op** | Either "sum", "mean", "max", "topk". Permutation invariant operation. | - | **keep** | Number of top values to keep. Used only if **perm_op** is "topk". | - | **pweight** | Either "power", "grid", "gmix" or None. Weight function to be applied on persistence diagram points. If "power", this function is a (trainable) coefficient times the distances to the diagonal of the points to a certain power. If "grid", this function is piecewise-constant and defined with pixel values of a grid. If "gmix", this function is defined as a mixture of Gaussians. If None, no weighting is applied. | - | **final_model** | A Tensorflow / Keras model used to postprocess the persistence diagrams in each channel. Use "identity" if you don't want to postprocess. | - -Depending on what **pweight** is, the following additional keys are requested: - - * if **pweight** is "power": - - | **name** | **description** | - | --- | --- | - | **pweight_init** | Initializer of the coefficient of the power weight function. It can be either a single value, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | - | **pweight_power** | Integer used for exponentiating the distances to the diagonal of the persistence diagram points. | - - * if **pweight** is "grid": - - | **name** | **description** | - | --- | --- | - | **pweight_size** | Grid size of the grid weight function. It is a tuple of integer values, such as (10,10). | - | **pweight_bnds** | Grid boundaries of the grid weight function. It is a tuple containing two tuples, each containing the minimum and maximum values of each axis of the plane. Example: ((-0.01, 1.01), (-0.01, 1.01)). | - | **pweight_init** | Initializer for the pixel values of the grid weight function. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.).| - - * if **pweight** is "gmix": - - | **name** | **description** | - | --- | --- | - | **pweight_num** | Number of Gaussian functions of the mixture of Gaussians weight function. | - | **pweight_init** | Initializer of the means and variances of the mixture of Gaussians weight function. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | - -Depending on what **layer** is, the following additional keys are requested: - - * if **layer** is "PermutationEquivariant": - - | **name** | **description** | - | --- | --- | - | **lpeq** | Sequence of permutation equivariant operations, as defined in [the DeepSet article](). It is a list of tuples of the form (*dim*, *operation*). Each tuple defines a permutation equivariant function of dimension *dim* and second permutation operation *operation* (string, either "max", "min", "sum" or None). Second permutation operation is optional and is not applied if *operation* is set to None. Example: [(150, "max"), (75, None)]. | - | **lweight_init** | Initializer for the weight matrices of the permutation equivariant operations. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.).| - | **lbias_init** | Initializer for the biases of the permutation equivariant operations. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | - | **lgamma_init** | Initializer for the Gamma matrices of the permutation equivariant operations. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.).| - - * if **layer** is "Image": - - | **name** | **description** | - | --- | --- | - | **image_size** | Persistence image size. It is a tuple of integer values, such as (10,10). | - | **image_bnds** | Persistence image boundaries. It is a tuple containing two tuples, each containing the minimum and maximum values of each axis of the plane. Example: ((-0.01, 1.01), (-0.01, 1.01)). | - | **lvariance_init** | Initializer for the bandwidths of the Gaussian functions centered on the persistence image pixels. It can be either a single value, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 3.). | - - * if **layer** is "Landscape": - - | **name** | **description** | - | --- | --- | - | **lsample_num** | Number of samples of the diagonal that will be evaluated on the persistence landscapes. | - | **lsample_init** | Initializer of the samples of the diagonal. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | - - * if **layer** is "BettiCurve": - - | **name** | **description** | - | --- | --- | - | **lsample_num** | Number of samples of the diagonal that will be evaluated on the Betti curves. | - | **lsample_init** | Initializer of the samples of the diagonal. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | - | **theta** | Sigmoid parameter used for approximating the piecewise constant functions associated to the persistence diagram points. | - - * if **layer** is "Entropy": - - | **name** | **description** | - | --- | --- | - | **lsample_num** | Number of samples on the diagonal that will be evaluated on the persistence entropies. | - | **lsample_init** | Initializer of the samples of the diagonal. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | - | **theta** | Sigmoid parameter used for approximating the piecewise constant functions associated to the persistence diagram points. | - - * if **layer** is "Exponential": - - | **name** | **description** | - | --- | --- | - | **lnum** | Number of exponential structure elements that will be evaluated on the persistence diagram points. | - | **lmean_init** | Initializer of the means of the exponential structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | - | **lvariance_init** | Initializer of the bandwidths of the exponential structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). | - - * if **layer** is "Rational": - - | **name** | **description** | - | --- | --- | - | **lnum** | Number of rational structure elements that will be evaluated on the persistence diagram points. | - | **lmean_init** | Initializer of the means of the rational structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | - | **lvariance_init** | Initializer of the bandwidths of the rational structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). | - | **lalpha_init** | Initializer of the exponents of the rational structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). | - - * if **layer** is "RationalHat": - - | **name** | **description** | - | --- | --- | - | **lnum** | Number of rational hat structure elements that will be evaluated on the persistence diagram points. | - | **lmean_init** | Initializer of the means of the rational hat structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). | - | **lr_init** | Initializer of the threshold of the rational hat structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). | - | **q** | Norm parameter. | diff --git a/src/python/test/test_perslay.py b/src/python/test/test_perslay.py index 72b02944..d20bfe14 100644 --- a/src/python/test/test_perslay.py +++ b/src/python/test/test_perslay.py @@ -6,8 +6,14 @@ import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from tensorflow import random_uniform_initializer as rui +my_devices = tf.config.experimental.list_physical_devices(device_type='CPU') +tf.config.experimental.set_visible_devices(devices=my_devices, device_type='CPU') +tf.config.experimental.set_visible_devices([], 'GPU') + from gudhi.representations import DiagramScaler, Padding, PerslayModel +np.random.seed(0) +gauss_init = np.array(np.vstack([np.random.uniform(0.,10.,[2,3]), 1e-5*np.ones([2,3])]), dtype=np.float32) def test_perslay_image(): @@ -18,20 +24,21 @@ def test_perslay_image(): diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) perslayParameters = {} - perslayParameters["pweight"] = None - perslayParameters["perm_op"] = "sum" perslayParameters["layer"] = "Image" perslayParameters["layer_train"] = False perslayParameters["image_size"] = (2,2) perslayParameters["image_bnds"] = ((-.501, 1.501), (-.501, 1.501)) perslayParameters["lvariance_init"] = .1 + + perslayParameters["pweight"] = None + perslayParameters["perm_op"] = "sum" + perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()]) model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") vector = model([diagrams, empty_feats]).numpy() - assert vector.shape == (1,4) assert np.abs(vector-np.array([[0,0,5.6e-5,3.3668644]])).sum() <= 1e-6 - + def test_perslay_landscape(): diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] @@ -41,17 +48,179 @@ def test_perslay_landscape(): diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) perslayParameters = {} + perslayParameters["layer"] = "Landscape" + perslayParameters["layer_train"] = False + perslayParameters["lsample_num"] = 3 + perslayParameters["lsample_init"] = np.array(np.arange(-.1,1.1,.5), dtype=np.float32) + perslayParameters["final_model"] = "identity" + perslayParameters["pweight"] = None perslayParameters["perm_op"] = "topk" perslayParameters["keep"] = 3 - perslayParameters["layer"] = "Landscape" + + model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") + vector = model([diagrams, empty_feats]).numpy() + assert vector.shape == (1,9) + assert np.abs(vector-np.array([[0.,0.,0.,0.1,0.025,0.,0.1,0.1,0.]])).sum() <= 1e-6 + + perslayParameters["pweight"] = "power" + perslayParameters["pweight_power"] = 2 + perslayParameters["pweight_init"] = 1. + perslayParameters["pweight_train"] = False + perslayParameters["perm_op"] = "sum" + + model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") + vector = model([diagrams, empty_feats]).numpy() + assert vector.shape == (1,3) + assert np.abs(vector-np.array([[0., 0.03476562, 0.04531251]])).sum() <= 1e-6 + +def test_perslay_betti(): + + diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] + diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag) + diag = Padding(use=True).fit_transform(diag) + D = np.stack(np.array(diag, dtype=np.float32), 0) + diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) + perslayParameters = {} + + perslayParameters["layer"] = "BettiCurve" perslayParameters["layer_train"] = False perslayParameters["lsample_num"] = 3 perslayParameters["lsample_init"] = np.array(np.arange(-.1,1.1,.5), dtype=np.float32) + perslayParameters["theta"] = 1. perslayParameters["final_model"] = "identity" + + perslayParameters["pweight"] = "grid" + perslayParameters["pweight_size"] = [100,100] + perslayParameters["pweight_bnds"] = ((-.001, 10.001), (-.001, 10.001)) + perslayParameters["pweight_init"] = np.tile(np.arange(0.,100.,1, dtype=np.float32)[np.newaxis,:], [100,1]) + perslayParameters["pweight_train"] = False + perslayParameters["perm_op"] = "sum" model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") vector = model([diagrams, empty_feats]).numpy() + assert vector.shape == (1,3) + assert np.abs(vector-np.array([[10.091741, 12.746357, 13.192123]])).sum() <= 1e-6 - assert vector.shape == (1,9) - assert np.abs(vector-np.array([[0.,0.,0.,0.1,0.025,0.,0.1,0.1,0.]])).sum() <= 1e-6 +def test_perslay_entropy(): + + diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] + diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag) + diag = Padding(use=True).fit_transform(diag) + D = np.stack(np.array(diag, dtype=np.float32), 0) + diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) + perslayParameters = {} + + perslayParameters["layer"] = "Entropy" + perslayParameters["layer_train"] = False + perslayParameters["lsample_num"] = 3 + perslayParameters["lsample_init"] = np.array(np.arange(-.1,1.1,.5), dtype=np.float32) + perslayParameters["theta"] = 1. + perslayParameters["final_model"] = "identity" + + perslayParameters["pweight"] = "gmix" + perslayParameters["pweight_num"] = 3 + perslayParameters["pweight_init"] = gauss_init + perslayParameters["pweight_train"] = False + perslayParameters["perm_op"] = "sum" + + model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") + vector = model([diagrams, empty_feats]).numpy() + assert vector.shape == (1,3) + assert np.abs(vector-np.array([[1.4855406, 1.7884576, 1.6987829]])).sum() <= 1e-6 + +def test_perslay_rational(): + + diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] + diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag) + diag = Padding(use=True).fit_transform(diag) + D = np.stack(np.array(diag, dtype=np.float32), 0) + diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) + perslayParameters = {} + + perslayParameters["layer"] = "Rational" + perslayParameters["layer_train"] = False + perslayParameters["lnum"] = 3 + perslayParameters["lmean_init"] = gauss_init[:2,:] + perslayParameters["lvariance_init"] = gauss_init[2:,:] + perslayParameters["lalpha_init"] = rui(1., 1.) + + perslayParameters["pweight"] = "power" + perslayParameters["pweight_power"] = 2 + perslayParameters["pweight_init"] = 1. + perslayParameters["pweight_train"] = False + perslayParameters["perm_op"] = "sum" + + perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()]) + model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") + vector = model([diagrams, empty_feats]).numpy() + assert vector.shape == (1,3) + assert np.abs(vector-np.array([[0.7186792, 0.7186759, 0.718668]])).sum() <= 1e-6 + perslayParameters["layer"] = "RationalHat" + perslayParameters["layer_train"] = False + perslayParameters["lnum"] = 3 + perslayParameters["q"] = 1. + perslayParameters["lmean_init"] = gauss_init[:2,:] + perslayParameters["lr_init"] = rui(1., 1.) + + perslayParameters["pweight"] = "power" + perslayParameters["pweight_power"] = 2 + perslayParameters["pweight_init"] = 1. + perslayParameters["pweight_train"] = False + perslayParameters["perm_op"] = "sum" + + perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()]) + model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") + vector = model([diagrams, empty_feats]).numpy() + assert vector.shape == (1,3) + assert np.abs(vector-np.array([[-0.00675799, -0.00620097, -0.00510298]])).sum() <= 1e-6 + +def test_perslay_exponential(): + + diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] + diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag) + diag = Padding(use=True).fit_transform(diag) + D = np.stack(np.array(diag, dtype=np.float32), 0) + diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) + perslayParameters = {} + + perslayParameters["layer"] = "Exponential" + perslayParameters["layer_train"] = False + perslayParameters["lnum"] = 3 + perslayParameters["lmean_init"] = 1e3 * gauss_init[:2,:] + perslayParameters["lvariance_init"] = gauss_init[2:,:] + + perslayParameters["pweight"] = None + perslayParameters["perm_op"] = "max" + + perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()]) + model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") + vector = model([diagrams, empty_feats]).numpy() + assert vector.shape == (1,3) + assert np.abs(vector-np.array([[0.9940388, 0.99311596, 0.99222755]])).sum() <= 1e-6 + +def test_perslay_peq(): + + diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] + diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag) + diag = Padding(use=True).fit_transform(diag) + D = np.stack(np.array(diag, dtype=np.float32), 0) + diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) + perslayParameters = {} + + perslayParameters["layer"] = "PermutationEquivariant" + perslayParameters["layer_train"] = False + perslayParameters["lpeq"] = [(5, "sum"), (5, "sum")] + perslayParameters["lweight_init"] = rui(1e-1, 1e-1) + perslayParameters["lbias_init"] = rui(0.1, 0.1) + perslayParameters["lgamma_init"] = rui(1e-1, 1e-1) + + perslayParameters["pweight"] = None + perslayParameters["perm_op"] = "topk" + perslayParameters["keep"] = 3 + + perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()]) + model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") + vector = model([diagrams, empty_feats]).numpy() + assert vector.shape == (1,15) + assert np.abs(vector-np.array([[0.4375, 0.41875, 0.375, 0.4375, 0.41875, 0.375, 0.4375, 0.41875, 0.375, 0.4375, 0.41875, 0.375, 0.4375, 0.41875, 0.375]])).sum() <= 1e-6 -- cgit v1.2.3 From c8ede8a74c5b936a3d509aae91b483bddfdae292 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Mon, 7 Dec 2020 20:19:37 +0100 Subject: added missing file --- src/python/doc/perslay_params.md | 95 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 src/python/doc/perslay_params.md diff --git a/src/python/doc/perslay_params.md b/src/python/doc/perslay_params.md new file mode 100644 index 00000000..58537939 --- /dev/null +++ b/src/python/doc/perslay_params.md @@ -0,0 +1,95 @@ +PersLay parameters +------------------ + +In the following description of PersLay parameters, each parameter, or dictionary key, that contains `_init` in its name is optimized and learned by PersLay during training. If you do not want to optimize the vectorization, set the keys **train_vect** and **train_weight** to False. + +* The following keys are mandatory: + + layer + Either "PermutationEquivariant", "Image", "Landscape", "BettiCurve", "Entropy", "Exponential", "Rational" or "RationalHat". Type of the PersLay layer. "Image" is for `persistence images `_, "Landscape" is for `persistence landscapes `_, "Exponential", "Rational" and "RationalHat" are for `structure elements `_, "PermutationEquivariant" is for the original DeepSet layer, defined in `this article `_, "BettiCurve" is for `Betti curves `_ and "Entropy" is for `entropy `_. + + perm_op + Either "sum", "mean", "max", "topk". Permutation invariant operation. + + keep + Number of top values to keep. Used only if **perm_op** is "topk". + + pweight + Either "power", "grid", "gmix" or None. Weight function to be applied on persistence diagram points. If "power", this function is a (trainable) coefficient times the distances to the diagonal of the points to a certain power. If "grid", this function is piecewise-constant and defined with pixel values of a grid. If "gmix", this function is defined as a mixture of Gaussians. If None, no weighting is applied. + + final_model + A Tensorflow / Keras model used to postprocess the persistence diagrams in each channel. Use "identity" if you don't want to postprocess. +* Depending on what **pweight** is, the following additional keys are requested: + + if **pweight** is "power": + - pweight_init + Initializer of the coefficient of the power weight function. It can be either a single value, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). + - pweight_power + Integer used for exponentiating the distances to the diagonal of the persistence diagram points. + + if **pweight** is "grid": + - pweight_size + Grid size of the grid weight function. It is a tuple of integer values, such as (10,10). + - pweight_bnds + Grid boundaries of the grid weight function. It is a tuple containing two tuples, each containing the minimum and maximum values of each axis of the plane. Example: ((-0.01, 1.01), (-0.01, 1.01)). + - pweight_init + Initializer for the pixel values of the grid weight function. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). + + if **pweight** is "gmix": + - pweight_num + Number of Gaussian functions of the mixture of Gaussians weight function. + - pweight_init + Initializer of the means and variances of the mixture of Gaussians weight function. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). +* Depending on what **layer** is, the following additional keys are requested: + + if **layer** is "PermutationEquivariant": + - lpeq + Sequence of permutation equivariant operations, as defined in [the DeepSet article](). It is a list of tuples of the form (*dim*, *operation*). Each tuple defines a permutation equivariant function of dimension *dim* and second permutation operation *operation* (string, either "max", "min", "sum" or None). Second permutation operation is optional and is not applied if *operation* is set to None. Example: [(150, "max"), (75, None)]. + - lweight_init + Initializer for the weight matrices of the permutation equivariant operations. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). + - lbias_init + Initializer for the biases of the permutation equivariant operations. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). + - lgamma_init + Initializer for the Gamma matrices of the permutation equivariant operations. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). + + if **layer** is "Image": + - image_size + Persistence image size. It is a tuple of integer values, such as (10,10). + - image_bnds + Persistence image boundaries. It is a tuple containing two tuples, each containing the minimum and maximum values of each axis of the plane. Example: ((-0.01, 1.01), (-0.01, 1.01)). + - lvariance_init + Initializer for the bandwidths of the Gaussian functions centered on the persistence image pixels. It can be either a single value, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 3.). + + if **layer** is "Landscape": + - lsample_num + Number of samples of the diagonal that will be evaluated on the persistence landscapes. + - lsample_init + Initializer of the samples of the diagonal. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). + + if **layer** is "BettiCurve": + - lsample_num + Number of samples of the diagonal that will be evaluated on the Betti curves. + - lsample_init + Initializer of the samples of the diagonal. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). + - theta + Sigmoid parameter used for approximating the piecewise constant functions associated to the persistence diagram points. + + if **layer** is "Entropy": + - lsample_num + Number of samples on the diagonal that will be evaluated on the persistence entropies. + - lsample_init + Initializer of the samples of the diagonal. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). + - theta + Sigmoid parameter used for approximating the piecewise constant functions associated to the persistence diagram points. + + if **layer** is "Exponential": + - lnum + Number of exponential structure elements that will be evaluated on the persistence diagram points. + - lmean_init + Initializer of the means of the exponential structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). + - lvariance_init + Initializer of the bandwidths of the exponential structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). + + if **layer** is "Rational": + - lnum + Number of rational structure elements that will be evaluated on the persistence diagram points. + - lmean_init + Initializer of the means of the rational structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). + - lvariance_init + Initializer of the bandwidths of the rational structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). + - lalpha_init + Initializer of the exponents of the rational structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). + + if **layer** is "RationalHat": + - lnum + Number of rational hat structure elements that will be evaluated on the persistence diagram points. + - lmean_init + Initializer of the means of the rational hat structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). + - lr_init + Initializer of the threshold of the rational hat structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). + - q + Norm parameter. -- cgit v1.2.3 From 2a283846f927f9eb5088587b273d8f59123642d9 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Sat, 10 Jul 2021 23:05:35 +0200 Subject: changed PersLay API --- src/python/gudhi/representations/perslay.py | 409 ++++++++++------------------ src/python/test/test_perslay.py | 293 +++++--------------- 2 files changed, 218 insertions(+), 484 deletions(-) diff --git a/src/python/gudhi/representations/perslay.py b/src/python/gudhi/representations/perslay.py index 5e522fad..b4778496 100644 --- a/src/python/gudhi/representations/perslay.py +++ b/src/python/gudhi/representations/perslay.py @@ -1,268 +1,151 @@ +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +# Author(s): Mathieu Carrière +# +# Copyright (C) 2018-2019 Inria +# +# Modification(s): +# - YYYY/MM Author: Description of the modification + import tensorflow as tf -import tensorflow_addons as tfa -from tensorflow import random_uniform_initializer as rui import numpy as np -def _permutation_equivariant_layer(inp, dimension, perm_op, lbda, b, gamma): - """ DeepSet PersLay """ - dimension_before, num_pts = inp.shape[2], inp.shape[1] - b = tf.expand_dims(tf.expand_dims(b, 0), 0) - A = tf.reshape(tf.einsum("ijk,kl->ijl", inp, lbda), [-1, num_pts, dimension]) - if perm_op != None: - if perm_op == "max": - beta = tf.tile(tf.expand_dims(tf.math.reduce_max(inp, axis=1), 1), [1, num_pts, 1]) - elif perm_op == "min": - beta = tf.tile(tf.expand_dims(tf.math.reduce_min(inp, axis=1), 1), [1, num_pts, 1]) - elif perm_op == "sum": - beta = tf.tile(tf.expand_dims(tf.math.reduce_sum(inp, axis=1), 1), [1, num_pts, 1]) +class GridPerslayWeight(tf.keras.layers.Layer): + + def __init__(self, grid, grid_bnds, **kwargs): + super().__init__(dynamic=True, **kwargs) + self.grid = tf.Variable(initial_value=grid, trainable=True) + self.grid_bnds = grid_bnds + + def build(self, input_shape): + return self + + def call(self, diagrams): + grid_shape = self.grid.shape + indices = [] + for dim in range(2): + [m,M] = self.grid_bnds[dim] + coords = tf.expand_dims(diagrams[:,:,dim],-1) + ids = grid_shape[dim]*(coords-m)/(M-m) + indices.append(tf.cast(ids, tf.int32)) + weight = tf.gather_nd(params=self.grid, indices=tf.concat(indices, axis=2)) + return weight + +class GaussianMixturePerslayWeight(tf.keras.layers.Layer): + + def __init__(self, gaussians, **kwargs): + super().__init__(dynamic=True, **kwargs) + self.W = tf.Variable(initial_value=gaussians, trainable=True) + + def build(self, input_shape): + return self + + def call(self, diagrams): + means = tf.expand_dims(tf.expand_dims(self.W[:2,:],0),0) + variances = tf.expand_dims(tf.expand_dims(self.W[2:,:],0),0) + diagrams = tf.expand_dims(diagrams, -1) + dists = tf.math.multiply(tf.math.square(diagrams-means), 1/tf.math.square(variances)) + weight = tf.math.reduce_sum(tf.math.exp(tf.math.reduce_sum(-dists, axis=2)), axis=2) + return weight + +class PowerPerslayWeight(tf.keras.layers.Layer): + + def __init__(self, constant, power, **kwargs): + super().__init__(dynamic=True, **kwargs) + self.constant = tf.Variable(initial_value=constant, trainable=True) + self.power = power + + def build(self, input_shape): + return self + + def call(self, diagrams): + weight = self.constant * tf.math.pow(tf.math.abs(diagrams[:,:,1]-diagrams[:,:,0]), self.power) + return weight + + +class GaussianPerslayPhi(tf.keras.layers.Layer): + + def __init__(self, image_size, image_bnds, variance, **kwargs): + super().__init__(dynamic=True, **kwargs) + self.image_size = image_size + self.image_bnds = image_bnds + self.variance = tf.Variable(initial_value=variance, trainable=True) + + def build(self, input_shape): + return self + + def call(self, diagrams): + diagrams_d = tf.concat([diagrams[:,:,0:1], diagrams[:,:,1:2]-diagrams[:,:,0:1]], axis=2) + step = [(self.image_bnds[i][1]-self.image_bnds[i][0])/self.image_size[i] for i in range(2)] + coords = [tf.range(self.image_bnds[i][0], self.image_bnds[i][1], step[i]) for i in range(2)] + M = tf.meshgrid(*coords) + mu = tf.concat([tf.expand_dims(tens, 0) for tens in M], axis=0) + for _ in range(2): + diagrams_d = tf.expand_dims(diagrams_d,-1) + dists = tf.math.square(diagrams_d-mu) / (2*tf.math.square(self.variance)) + gauss = tf.math.exp(tf.math.reduce_sum(-dists, axis=2)) / (2*np.pi*tf.math.square(self.variance)) + return tf.expand_dims(gauss,-1), M[0].shape + tuple([1]) + +class TentPerslayPhi(tf.keras.layers.Layer): + + def __init__(self, samples, **kwargs): + super().__init__(dynamic=True, **kwargs) + self.samples = tf.Variable(initial_value=samples, trainable=True) + + def build(self, input_shape): + return self + + def call(self, diagrams): + samples_d = tf.expand_dims(tf.expand_dims(self.samples,0),0) + xs, ys = diagrams[:,:,0:1], diagrams[:,:,1:2] + tent = tf.math.maximum(.5*(ys-xs) - tf.math.abs(samples_d-.5*(ys+xs)), np.array([0.])) + return tent, self.samples.shape + +class FlatPerslayPhi(tf.keras.layers.Layer): + + def __init__(self, samples, theta, **kwargs): + super().__init__(dynamic=True, **kwargs) + self.samples = tf.Variable(initial_value=samples, trainable=True) + self.theta = tf.Variable(initial_value=theta, trainable=True) + + def build(self, input_shape): + return self + + def call(self, diagrams): + samples_d = tf.expand_dims(tf.expand_dims(self.samples,0),0) + xs, ys = diagrams[:,:,0:1], diagrams[:,:,1:2] + flat = 1./(1.+tf.math.exp(-self.theta*(.5*(ys-xs)-tf.math.abs(samples_d-.5*(ys+xs))))) + return flat, self.samples.shape + +class Perslay(tf.keras.layers.Layer): + + def __init__(self, weight, phi, perm_op, rho, **kwargs): + super().__init__(dynamic=True, **kwargs) + self.weight = weight + self.phi = phi + self.pop = perm_op + self.rho = rho + + def build(self, input_shape): + return self + + def call(self, diagrams): + + vector, dim = self.phi(diagrams) + weight = self.weight(diagrams) + for _ in range(len(dim)): + weight = tf.expand_dims(weight, -1) + vector = tf.math.multiply(vector, weight) + + permop = self.pop + if type(permop) == str and permop[:3] == 'top': + k = int(permop[3:]) + vector = vector.to_tensor(default_value=-1e10) + vector = tf.math.top_k(tf.transpose(vector, perm=[0, 2, 1]), k=k).values + vector = tf.reshape(vector, [-1,k*dim[0]]) else: - raise Exception("perm_op should be min, max or sum") - B = tf.reshape(tf.einsum("ijk,kl->ijl", beta, gamma), [-1, num_pts, dimension]) - return A - B + b - else: - return A + b - -def _rational_hat_layer(inp, q, mu, r): - """ Rational Hat PersLay """ - mu, r = tf.expand_dims(tf.expand_dims(mu, 0), 0), tf.expand_dims(tf.expand_dims(r, 0), 0) - dimension_before, num_pts = inp.shape[2], inp.shape[1] - bc_inp = tf.expand_dims(inp, -1) - norms = tf.norm(bc_inp - mu, ord=q, axis=2) - return 1/(1 + norms) - 1/(1 + tf.math.abs(tf.math.abs(r)-norms)) - -def _rational_layer(inp, mu, sg, al): - """ Rational PersLay """ - mu, sg, al = tf.expand_dims(tf.expand_dims(mu, 0), 0), tf.expand_dims(tf.expand_dims(sg, 0), 0), tf.expand_dims(tf.expand_dims(al, 0), 0) - dimension_before, num_pts = inp.shape[2], inp.shape[1] - bc_inp = tf.expand_dims(inp, -1) - return 1/tf.math.pow(1+tf.math.reduce_sum(tf.math.multiply(tf.math.abs(bc_inp - mu), tf.math.abs(sg)), axis=2), al) - -def _exponential_layer(inp, mu, sg): - """ Exponential PersLay """ - mu, sg = tf.expand_dims(tf.expand_dims(mu, 0), 0), tf.expand_dims(tf.expand_dims(sg, 0), 0) - dimension_before, num_pts = inp.shape[2], inp.shape[1] - bc_inp = tf.expand_dims(inp, -1) - return tf.math.exp(tf.math.reduce_sum(-tf.math.multiply(tf.math.square(bc_inp - mu), tf.math.square(sg)), axis=2)) - -def _landscape_layer(inp, sp): - """ Landscape PersLay """ - sp = tf.expand_dims(tf.expand_dims(sp, 0), 0) - return tf.math.maximum( .5 * (inp[:, :, 1:2] - inp[:, :, 0:1]) - tf.math.abs(sp - .5 * (inp[:, :, 1:2] + inp[:, :, 0:1])), np.array([0])) - -def _betti_layer(inp, theta, sp): - """ Betti PersLay """ - sp = tf.expand_dims(tf.expand_dims(sp, 0), 0) - X, Y = inp[:, :, 0:1], inp[:, :, 1:2] - return 1. / ( 1. + tf.math.exp( -theta * (.5*(Y-X) - tf.math.abs(sp - .5*(Y+X))) ) ) - -def _entropy_layer(inp, theta, sp): - """ Entropy PersLay - WARNING: this function assumes that padding values are zero - """ - sp = tf.expand_dims(tf.expand_dims(sp, 0), 0) - bp_inp = tf.einsum("ijk,kl->ijl", inp, tf.constant(np.array([[1.,-1.],[0.,1.]], dtype=np.float32))) - L, X, Y = bp_inp[:, :, 1:2], bp_inp[:, :, 0:1], bp_inp[:, :, 0:1] + bp_inp[:, :, 1:2] - LN = tf.math.multiply(L, 1. / tf.expand_dims(tf.linalg.matmul(L[:,:,0], tf.ones([L.shape[1],1])), -1)) - entropy_terms = tf.where(LN > 0., -tf.math.multiply(LN, tf.math.log(LN)), LN) - return tf.math.multiply(entropy_terms, 1. / ( 1. + tf.math.exp( -theta * (.5*(Y-X) - tf.math.abs(sp - .5*(Y+X))) ) )) - -def _image_layer(inp, image_size, image_bnds, sg): - """ Persistence Image PersLay """ - bp_inp = tf.einsum("ijk,kl->ijl", inp, tf.constant(np.array([[1.,-1.],[0.,1.]], dtype=np.float32))) - dimension_before, num_pts = inp.shape[2], inp.shape[1] - coords = [tf.range(start=image_bnds[i][0], limit=image_bnds[i][1], delta=(image_bnds[i][1] - image_bnds[i][0]) / image_size[i]) for i in range(dimension_before)] - M = tf.meshgrid(*coords) - mu = tf.concat([tf.expand_dims(tens, 0) for tens in M], axis=0) - bc_inp = tf.reshape(bp_inp, [-1, num_pts, dimension_before] + [1 for _ in range(dimension_before)]) - return tf.expand_dims(tf.math.exp(tf.math.reduce_sum( -tf.math.square(bc_inp-mu) / (2*tf.math.square(sg)), axis=2)) / (2*np.pi*tf.math.square(sg)), -1) - -class PerslayModel(tf.keras.Model): - """ - TensorFlow model implementing PersLay. - - Attributes: - name (string): name of the layer. Used for naming variables. - diagdim (integer): dimension of persistence diagram points. Usually 2 but can handle more. - perslay_parameters (dict): dictionary containing the PersLay parameters. See below. - rho (TensorFlow model): layers used to process the learned representations of persistence diagrams (for instance, a fully connected layer that outputs the number of classes). Use the string "identity" if you want to output the representations directly. - """ - def __init__(self, name, diagdim, perslay_parameters, rho): - super(PerslayModel, self).__init__() - self.namemodel = name - self.diagdim = diagdim - self.perslay_parameters = perslay_parameters - self.rho = rho - - self.vars = [[] for _ in range(len(self.perslay_parameters))] - for nf, plp in enumerate(self.perslay_parameters): - - weight = plp["pweight"] - if weight != None: - Winit, Wtrain, Wname = plp["pweight_init"], plp["pweight_train"], self.namemodel + "-pweight-" + str(nf) - if not callable(Winit): - W = tf.Variable(name=Wname, initial_value=Winit, trainable=Wtrain) - else: - if weight == "power": - W = tf.Variable(name=Wname, initial_value=Winit([1]), trainable=Wtrain) - elif weight == "grid": - Wshape = plp["pweight_size"] - W = tf.Variable(name=Wname, initial_value=Winit(Wshape), trainable=Wtrain) - elif weight == "gmix": - ngs = plp["pweight_num"] - W = tf.Variable(name=Wname, initial_value=Winit([4,ngs]), trainable=Wtrain) - else: - W = 0 - self.vars[nf].append(W) - - layer, Ltrain, Lname = plp["layer"], plp["layer_train"], self.namemodel + "-" + str(nf) - - if layer == "PermutationEquivariant": - Lpeq, LWinit, LBinit, LGinit = plp["lpeq"], plp["lweight_init"], plp["lbias_init"], plp["lgamma_init"] - LW, LB, LG = [], [], [] - for idx, (dim, pop) in enumerate(Lpeq): - dim_before = self.diagdim if idx == 0 else Lpeq[idx-1][0] - LWiv = LWinit([dim_before, dim]) if callable(LWinit) else LWinit - LBiv = LBinit([dim]) if callable(LBinit) else LBinit - LW.append( tf.Variable(name=Lname+"-W", initial_value=LWiv, trainable=Ltrain)) - LB.append( tf.Variable(name=Lname+"-B", initial_value=LBiv, trainable=Ltrain)) - if pop != None: - LGiv = LGinit([dim_before, dim]) if callable(LGinit) else LGinit - LG.append( tf.Variable(name=Lname+"-G", initial_value=LGiv, trainable=Ltrain)) - else: - LG.append([]) - self.vars[nf].append([LW, LB, LG]) - - elif layer == "Landscape" or layer == "BettiCurve" or layer == "Entropy": - LSinit = plp["lsample_init"] - LSiv = LSinit if not callable(LSinit) else LSinit([plp["lsample_num"]]) - LS = tf.Variable(name=Lname+"-S", initial_value=LSiv, trainable=Ltrain) - self.vars[nf].append(LS) - - elif layer == "Image": - LVinit = plp["lvariance_init"] - LViv = LVinit if not callable(LVinit) else LVinit([1]) - LV = tf.Variable(name=Lname+"-V", initial_value=LViv, trainable=Ltrain) - self.vars[nf].append(LV) - - elif layer == "Exponential": - LMinit, LVinit = plp["lmean_init"], plp["lvariance_init"] - LMiv = LMinit if not callable(LMinit) else LMinit([self.diagdim, plp["lnum"]]) - LViv = LVinit if not callable(LVinit) else LVinit([self.diagdim, plp["lnum"]]) - LM = tf.Variable(name=Lname+"-M", initial_value=LMiv, trainable=Ltrain) - LV = tf.Variable(name=Lname+"-V", initial_value=LViv, trainable=Ltrain) - self.vars[nf].append([LM, LV]) - - elif layer == "Rational": - LMinit, LVinit, LAinit = plp["lmean_init"], plp["lvariance_init"], plp["lalpha_init"] - LMiv = LMinit if not callable(LMinit) else LMinit([self.diagdim, plp["lnum"]]) - LViv = LVinit if not callable(LVinit) else LVinit([self.diagdim, plp["lnum"]]) - LAiv = LAinit if not callable(LAinit) else LAinit([plp["lnum"]]) - LM = tf.Variable(name=Lname+"-M", initial_value=LMiv, trainable=Ltrain) - LV = tf.Variable(name=Lname+"-V", initial_value=LViv, trainable=Ltrain) - LA = tf.Variable(name=Lname+"-A", initial_value=LAiv, trainable=Ltrain) - self.vars[nf].append([LM, LV, LA]) - - elif layer == "RationalHat": - LMinit, LRinit = plp["lmean_init"], plp["lr_init"] - LMiv = LMinit if not callable(LMinit) else LMinit([self.diagdim, plp["lnum"]]) - LRiv = LRinit if not callable(LRinit) else LRinit([1]) - LM = tf.Variable(name=Lname+"-M", initial_value=LMiv, trainable=Ltrain) - LR = tf.Variable(name=Lname+"-R", initial_value=LRiv, trainable=Ltrain) - self.vars[nf].append([LM, LR]) - - def compute_representations(self, diags, training=False): - - list_v = [] - - for nf, plp in enumerate(self.perslay_parameters): - - diag = diags[nf] - - N, dimension_diag = diag.shape[1], diag.shape[2] - tensor_mask = diag[:, :, dimension_diag - 1] - tensor_diag = diag[:, :, :dimension_diag - 1] - - W = self.vars[nf][0] - - if plp["pweight"] == "power": - p = plp["pweight_power"] - weight = W * tf.math.pow(tf.math.abs(tensor_diag[:, :, 1:2]-tensor_diag[:, :, 0:1]), p) - - elif plp["pweight"] == "grid": - grid_shape = W.shape - indices = [] - for dim in range(dimension_diag-1): - [m, M] = plp["pweight_bnds"][dim] - coords = tf.slice(tensor_diag, [0, 0, dim], [-1, -1, 1]) - ids = grid_shape[dim] * (coords - m)/(M - m) - indices.append(tf.cast(ids, tf.int32)) - weight = tf.expand_dims(tf.gather_nd(params=W, indices=tf.concat(indices, axis=2)), -1) - - elif plp["pweight"] == "gmix": - M, V = tf.expand_dims(tf.expand_dims(W[:2,:], 0), 0), tf.expand_dims(tf.expand_dims(W[2:,:], 0), 0) - bc_inp = tf.expand_dims(tensor_diag, -1) - weight = tf.expand_dims(tf.math.reduce_sum(tf.math.exp(tf.math.reduce_sum(-tf.math.multiply(tf.math.square(bc_inp-M), tf.math.square(V)), axis=2)), axis=2), -1) - - - lvars = self.vars[nf][1] - if plp["layer"] == "PermutationEquivariant": - for idx, (dim, pop) in enumerate(plp["lpeq"]): - tensor_diag = _permutation_equivariant_layer(tensor_diag, dim, pop, lvars[0][idx], lvars[1][idx], lvars[2][idx]) - elif plp["layer"] == "Landscape": - tensor_diag = _landscape_layer(tensor_diag, lvars) - elif plp["layer"] == "BettiCurve": - tensor_diag = _betti_layer(tensor_diag, plp["theta"], lvars) - elif plp["layer"] == "Entropy": - tensor_diag = _entropy_layer(tensor_diag, plp["theta"], lvars) - elif plp["layer"] == "Image": - tensor_diag = _image_layer(tensor_diag, plp["image_size"], plp["image_bnds"], lvars) - elif plp["layer"] == "Exponential": - tensor_diag = _exponential_layer(tensor_diag, lvars[0], lvars[1]) - elif plp["layer"] == "Rational": - tensor_diag = _rational_layer(tensor_diag, lvars[0], lvars[1], lvars[2]) - elif plp["layer"] == "RationalHat": - tensor_diag = _rational_hat_layer(tensor_diag, plp["q"], lvars[0], lvars[1]) - - # Apply weight - output_dim = len(tensor_diag.shape) - 2 - if plp["pweight"] != None: - for _ in range(output_dim-1): - weight = tf.expand_dims(weight, -1) - tiled_weight = tf.tile(weight, [1, 1] + tensor_diag.shape[2:]) - tensor_diag = tf.math.multiply(tensor_diag, tiled_weight) - - # Apply mask - for _ in range(output_dim): - tensor_mask = tf.expand_dims(tensor_mask, -1) - tiled_mask = tf.tile(tensor_mask, [1, 1] + tensor_diag.shape[2:]) - masked_layer = tf.math.multiply(tensor_diag, tiled_mask) - - # Permutation invariant operation - if plp["perm_op"] == "topk" and output_dim == 1: # k first values - masked_layer_t = tf.transpose(masked_layer, perm=[0, 2, 1]) - values, indices = tf.math.top_k(masked_layer_t, k=plp["keep"]) - vector = tf.reshape(values, [-1, plp["keep"] * tensor_diag.shape[2]]) - elif plp["perm_op"] == "sum": # sum - vector = tf.math.reduce_sum(masked_layer, axis=1) - elif plp["perm_op"] == "max": # maximum - vector = tf.math.reduce_max(masked_layer, axis=1) - elif plp["perm_op"] == "mean": # minimum - vector = tf.math.reduce_mean(masked_layer, axis=1) - - # Second layer of channel - vector = plp["final_model"].call(vector, training=training) if plp["final_model"] != "identity" else vector - list_v.append(vector) - - # Concatenate all channels and add other features - representations = tf.concat(values=list_v, axis=1) - return representations - - def call(self, inputs, training=False): - - diags, feats = inputs[0], inputs[1] - representations = self.compute_representations(diags, training) - concat_representations = tf.concat(values=[representations, feats], axis=1) - final_representations = self.rho(concat_representations) if self.rho != "identity" else concat_representations - - return final_representations + vector = permop(vector, axis=1) + vector = self.rho(vector) + + return vector diff --git a/src/python/test/test_perslay.py b/src/python/test/test_perslay.py index d20bfe14..c0e363ec 100644 --- a/src/python/test/test_perslay.py +++ b/src/python/test/test_perslay.py @@ -2,225 +2,76 @@ import sys import numpy as np import tensorflow as tf import matplotlib.pyplot as plt - from sklearn.preprocessing import MinMaxScaler -from tensorflow import random_uniform_initializer as rui - -my_devices = tf.config.experimental.list_physical_devices(device_type='CPU') -tf.config.experimental.set_visible_devices(devices=my_devices, device_type='CPU') -tf.config.experimental.set_visible_devices([], 'GPU') - -from gudhi.representations import DiagramScaler, Padding, PerslayModel - -np.random.seed(0) -gauss_init = np.array(np.vstack([np.random.uniform(0.,10.,[2,3]), 1e-5*np.ones([2,3])]), dtype=np.float32) - -def test_perslay_image(): - - diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] - diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag) - diag = Padding(use=True).fit_transform(diag) - D = np.stack(np.array(diag, dtype=np.float32), 0) - diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) - perslayParameters = {} - - perslayParameters["layer"] = "Image" - perslayParameters["layer_train"] = False - perslayParameters["image_size"] = (2,2) - perslayParameters["image_bnds"] = ((-.501, 1.501), (-.501, 1.501)) - perslayParameters["lvariance_init"] = .1 - - perslayParameters["pweight"] = None - perslayParameters["perm_op"] = "sum" - - perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()]) - model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") - vector = model([diagrams, empty_feats]).numpy() - assert vector.shape == (1,4) - assert np.abs(vector-np.array([[0,0,5.6e-5,3.3668644]])).sum() <= 1e-6 - -def test_perslay_landscape(): - - diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] - diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag) - diag = Padding(use=True).fit_transform(diag) - D = np.stack(np.array(diag, dtype=np.float32), 0) - diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) - perslayParameters = {} - - perslayParameters["layer"] = "Landscape" - perslayParameters["layer_train"] = False - perslayParameters["lsample_num"] = 3 - perslayParameters["lsample_init"] = np.array(np.arange(-.1,1.1,.5), dtype=np.float32) - perslayParameters["final_model"] = "identity" - - perslayParameters["pweight"] = None - perslayParameters["perm_op"] = "topk" - perslayParameters["keep"] = 3 - - model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") - vector = model([diagrams, empty_feats]).numpy() - assert vector.shape == (1,9) - assert np.abs(vector-np.array([[0.,0.,0.,0.1,0.025,0.,0.1,0.1,0.]])).sum() <= 1e-6 - - perslayParameters["pweight"] = "power" - perslayParameters["pweight_power"] = 2 - perslayParameters["pweight_init"] = 1. - perslayParameters["pweight_train"] = False - perslayParameters["perm_op"] = "sum" - - model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") - vector = model([diagrams, empty_feats]).numpy() - assert vector.shape == (1,3) - assert np.abs(vector-np.array([[0., 0.03476562, 0.04531251]])).sum() <= 1e-6 - -def test_perslay_betti(): - - diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] - diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag) - diag = Padding(use=True).fit_transform(diag) - D = np.stack(np.array(diag, dtype=np.float32), 0) - diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) - perslayParameters = {} - - perslayParameters["layer"] = "BettiCurve" - perslayParameters["layer_train"] = False - perslayParameters["lsample_num"] = 3 - perslayParameters["lsample_init"] = np.array(np.arange(-.1,1.1,.5), dtype=np.float32) - perslayParameters["theta"] = 1. - perslayParameters["final_model"] = "identity" - - perslayParameters["pweight"] = "grid" - perslayParameters["pweight_size"] = [100,100] - perslayParameters["pweight_bnds"] = ((-.001, 10.001), (-.001, 10.001)) - perslayParameters["pweight_init"] = np.tile(np.arange(0.,100.,1, dtype=np.float32)[np.newaxis,:], [100,1]) - perslayParameters["pweight_train"] = False - perslayParameters["perm_op"] = "sum" - model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") - vector = model([diagrams, empty_feats]).numpy() - assert vector.shape == (1,3) - assert np.abs(vector-np.array([[10.091741, 12.746357, 13.192123]])).sum() <= 1e-6 - -def test_perslay_entropy(): - - diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] - diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag) - diag = Padding(use=True).fit_transform(diag) - D = np.stack(np.array(diag, dtype=np.float32), 0) - diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) - perslayParameters = {} - - perslayParameters["layer"] = "Entropy" - perslayParameters["layer_train"] = False - perslayParameters["lsample_num"] = 3 - perslayParameters["lsample_init"] = np.array(np.arange(-.1,1.1,.5), dtype=np.float32) - perslayParameters["theta"] = 1. - perslayParameters["final_model"] = "identity" - - perslayParameters["pweight"] = "gmix" - perslayParameters["pweight_num"] = 3 - perslayParameters["pweight_init"] = gauss_init - perslayParameters["pweight_train"] = False - perslayParameters["perm_op"] = "sum" - - model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") - vector = model([diagrams, empty_feats]).numpy() - assert vector.shape == (1,3) - assert np.abs(vector-np.array([[1.4855406, 1.7884576, 1.6987829]])).sum() <= 1e-6 - -def test_perslay_rational(): - - diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] - diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag) - diag = Padding(use=True).fit_transform(diag) - D = np.stack(np.array(diag, dtype=np.float32), 0) - diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) - perslayParameters = {} - - perslayParameters["layer"] = "Rational" - perslayParameters["layer_train"] = False - perslayParameters["lnum"] = 3 - perslayParameters["lmean_init"] = gauss_init[:2,:] - perslayParameters["lvariance_init"] = gauss_init[2:,:] - perslayParameters["lalpha_init"] = rui(1., 1.) - - perslayParameters["pweight"] = "power" - perslayParameters["pweight_power"] = 2 - perslayParameters["pweight_init"] = 1. - perslayParameters["pweight_train"] = False - perslayParameters["perm_op"] = "sum" - - perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()]) - model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") - vector = model([diagrams, empty_feats]).numpy() - assert vector.shape == (1,3) - assert np.abs(vector-np.array([[0.7186792, 0.7186759, 0.718668]])).sum() <= 1e-6 - - perslayParameters["layer"] = "RationalHat" - perslayParameters["layer_train"] = False - perslayParameters["lnum"] = 3 - perslayParameters["q"] = 1. - perslayParameters["lmean_init"] = gauss_init[:2,:] - perslayParameters["lr_init"] = rui(1., 1.) - - perslayParameters["pweight"] = "power" - perslayParameters["pweight_power"] = 2 - perslayParameters["pweight_init"] = 1. - perslayParameters["pweight_train"] = False - perslayParameters["perm_op"] = "sum" - - perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()]) - model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") - vector = model([diagrams, empty_feats]).numpy() - assert vector.shape == (1,3) - assert np.abs(vector-np.array([[-0.00675799, -0.00620097, -0.00510298]])).sum() <= 1e-6 - -def test_perslay_exponential(): - - diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] - diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag) - diag = Padding(use=True).fit_transform(diag) - D = np.stack(np.array(diag, dtype=np.float32), 0) - diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) - perslayParameters = {} - - perslayParameters["layer"] = "Exponential" - perslayParameters["layer_train"] = False - perslayParameters["lnum"] = 3 - perslayParameters["lmean_init"] = 1e3 * gauss_init[:2,:] - perslayParameters["lvariance_init"] = gauss_init[2:,:] - - perslayParameters["pweight"] = None - perslayParameters["perm_op"] = "max" - - perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()]) - model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") - vector = model([diagrams, empty_feats]).numpy() - assert vector.shape == (1,3) - assert np.abs(vector-np.array([[0.9940388, 0.99311596, 0.99222755]])).sum() <= 1e-6 - -def test_perslay_peq(): - - diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] - diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag) - diag = Padding(use=True).fit_transform(diag) - D = np.stack(np.array(diag, dtype=np.float32), 0) - diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32) - perslayParameters = {} - - perslayParameters["layer"] = "PermutationEquivariant" - perslayParameters["layer_train"] = False - perslayParameters["lpeq"] = [(5, "sum"), (5, "sum")] - perslayParameters["lweight_init"] = rui(1e-1, 1e-1) - perslayParameters["lbias_init"] = rui(0.1, 0.1) - perslayParameters["lgamma_init"] = rui(1e-1, 1e-1) - - perslayParameters["pweight"] = None - perslayParameters["perm_op"] = "topk" - perslayParameters["keep"] = 3 - - perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()]) - model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity") - vector = model([diagrams, empty_feats]).numpy() - assert vector.shape == (1,15) - assert np.abs(vector-np.array([[0.4375, 0.41875, 0.375, 0.4375, 0.41875, 0.375, 0.4375, 0.41875, 0.375, 0.4375, 0.41875, 0.375, 0.4375, 0.41875, 0.375]])).sum() <= 1e-6 +import gudhi.representations as gdr + +def test_gaussian_perslay(): + + diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] + diagrams = gdr.DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diagrams) + diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32)) + + rho = tf.identity + phi = gdr.GaussianPerslayPhi((100, 100), ((-.5, 1.5), (-.5, 1.5)), .1) + weight = gdr.PowerPerslayWeight(1.,0.) + perm_op = tf.math.reduce_sum + + perslay = gdr.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) + vectors = perslay(diagrams) + +def test_tent_perslay(): + + diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] + diagrams = gdr.DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diagrams) + diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32)) + + rho = tf.identity + phi = gdr.TentPerslayPhi(np.array(np.arange(-1.,2.,.001), dtype=np.float32)) + weight = gdr.PowerPerslayWeight(1.,0.) + perm_op = 'top3' + + perslay = gdr.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) + vectors = perslay(diagrams) + +def test_flat_perslay(): + + diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] + diagrams = gdr.DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diagrams) + diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32)) + + rho = tf.identity + phi = gdr.FlatPerslayPhi(np.array(np.arange(-1.,2.,.001), dtype=np.float32), 100.) + weight = gdr.PowerPerslayWeight(1.,0.) + perm_op = tf.math.reduce_sum + + perslay = gdr.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) + vectors = perslay(diagrams) + +def test_gmix_weight(): + + diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] + diagrams = gdr.DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diagrams) + diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32)) + + rho = tf.identity + phi = gdr.FlatPerslayPhi(np.array(np.arange(-1.,2.,.001), dtype=np.float32), 100.) + weight = gdr.GaussianMixturePerslayWeight(np.array([[.5],[.5],[5],[5]], dtype=np.float32)) + perm_op = tf.math.reduce_sum + + perslay = gdr.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) + vectors = perslay(diagrams) + +def test_grid_weight(): + + diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] + diagrams = gdr.DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diagrams) + diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32)) + + rho = tf.identity + phi = gdr.FlatPerslayPhi(np.array(np.arange(-1.,2.,.001), dtype=np.float32), 100.) + weight = gdr.GridPerslayWeight(np.array(np.random.uniform(size=[100,100]),dtype=np.float32),((-0.01, 1.01),(-0.01, 1.01))) + perm_op = tf.math.reduce_sum + + perslay = gdr.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) + vectors = perslay(diagrams) + -- cgit v1.2.3 From d78acf9c6c71e896b2fdbd1b16715f420f6250e8 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Mon, 1 Nov 2021 22:36:03 +0100 Subject: update doc --- src/python/CMakeLists.txt | 2 + src/python/doc/installation.rst | 7 +- src/python/doc/representations.rst | 75 +++++-- src/python/doc/representations_sum.inc | 3 +- src/python/gudhi/representations/__init__.py | 3 +- src/python/gudhi/representations/perslay.py | 151 -------------- src/python/gudhi/tensorflow/__init__.py | 3 + src/python/gudhi/tensorflow/perslay.py | 284 +++++++++++++++++++++++++++ src/python/test/test_perslay.py | 106 ++++++++-- 9 files changed, 446 insertions(+), 188 deletions(-) delete mode 100644 src/python/gudhi/representations/perslay.py create mode 100644 src/python/gudhi/tensorflow/__init__.py create mode 100644 src/python/gudhi/tensorflow/perslay.py diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index 96107cfe..ddd2d93f 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -67,6 +67,7 @@ if(PYTHONINTERP_FOUND) set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'euclidean_strong_witness_complex', ") # Modules that should not be auto-imported in __init__.py set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'representations', ") + set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'tensorflow', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'wasserstein', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'point_cloud', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'weighted_rips_complex', ") @@ -269,6 +270,7 @@ if(PYTHONINTERP_FOUND) # Other .py files file(COPY "gudhi/persistence_graphical_tools.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/") + file(COPY "gudhi/tensorflow" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/") file(COPY "gudhi/wasserstein" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/point_cloud" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/clustering" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi" FILES_MATCHING PATTERN "*.py") diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst index aed542c1..1789f3c1 100644 --- a/src/python/doc/installation.rst +++ b/src/python/doc/installation.rst @@ -371,11 +371,10 @@ PyTorch TensorFlow ---------- -`TensorFlow `_ is currently only used in some automatic differentiation tests. - -:class:`~gudhi.representations.PerslayModel` in the :doc:`persistence representations ` module requires -`TensorFlow 2 `_. +:class:`~gudhi.tensorflow.perslay` from the :doc:`persistence representations ` module +requires `TensorFlow `_. +`TensorFlow `_ is also used in some automatic differentiation tests. Scikit-learn ------------ diff --git a/src/python/doc/representations.rst b/src/python/doc/representations.rst index 35517ebb..52e95230 100644 --- a/src/python/doc/representations.rst +++ b/src/python/doc/representations.rst @@ -8,10 +8,16 @@ Representations manual .. include:: representations_sum.inc -This module, originally available at https://github.com/MathieuCarriere/sklearn-tda and named sklearn_tda, aims at bridging the gap between persistence diagrams and machine learning, by providing implementations of most of the vector representations for persistence diagrams in the literature, in a scikit-learn format. More specifically, it provides tools, using the scikit-learn standard interface, to compute distances and kernels on persistence diagrams, and to convert these diagrams into vectors in Euclidean space. Moreover, this module also contains the `PersLay architecture `_, which is a general neural network architecture for performing deep learning with persistence diagrams. It is implemented in TensorFlow 2. +This module aims at bridging the gap between persistence diagrams and machine learning, by providing implementations of most of the vector representations for persistence diagrams in the literature, in a scikit-learn format. More specifically, it provides tools, using the scikit-learn standard interface, to compute distances and kernels on persistence diagrams, and to convert these diagrams into vectors in Euclidean space. Moreover, this module also contains `PersLay `_, which is a general neural network layer for performing deep learning with persistence diagrams, implemented in TensorFlow. A diagram is represented as a numpy array of shape (n,2), as can be obtained from :func:`~gudhi.SimplexTree.persistence_intervals_in_dimension` for instance. Points at infinity are represented as a numpy array of shape (n,1), storing only the birth time. The classes in this module can handle several persistence diagrams at once. In that case, the diagrams are provided as a list of numpy arrays. Note that it is not necessary for the diagrams to have the same number of points, i.e., for the corresponding arrays to have the same number of rows: all classes can handle arrays with different shapes. +This `notebook `_ explains how to +efficiently combine machine learning and topological data analysis with the +:doc:`representations module` in a scikit-learn fashion. This `notebook `_ +and `this one `_ explain how to use PersLay. + + Examples -------- @@ -30,8 +36,6 @@ This example computes the first two Landscapes associated to a persistence diagr l=Landscape(num_landscapes=2,resolution=10).fit_transform(diags) print(l) -The output is: - .. testoutput:: [[1.02851895 2.05703791 2.57129739 1.54277843 0.89995409 1.92847304 @@ -45,13 +49,61 @@ Various kernels This small example is also provided :download:`diagram_vectorizations_distances_kernels.py <../example/diagram_vectorizations_distances_kernels.py>` -Machine Learning and Topological Data Analysis -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +PersLay +^^^^^^^ -This `notebook `_ explains how to -efficiently combine machine learning and topological data analysis with the -:doc:`representations module` in a scikit-learn fashion. This `notebook `_ and `this one `_ explain how to use the PersLay architecture. +.. testcode:: + import numpy as np + import tensorflow as tf + from sklearn.preprocessing import MinMaxScaler + import gudhi.representations as gdr + + diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] + diagrams = gdr.DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diagrams) + diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32)) + + rho = tf.identity + phi = gdr.GaussianPerslayPhi((100, 100), ((-.5, 1.5), (-.5, 1.5)), .1) + weight = gdr.PowerPerslayWeight(1.,0.) + perm_op = tf.math.reduce_sum + + perslay = gdr.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) + vectors = perslay(diagrams) + print(vectors) + +.. testoutput:: + + tf.Tensor( + [[[[1.7266072e-16] + [4.1706043e-09] + [1.1336876e-08] + [8.5738821e-12] + [2.1243891e-14]] + + [[4.1715076e-09] + [1.0074080e-01] + [2.7384272e-01] + [3.0724244e-02] + [7.6157507e-05]] + + [[8.0382870e-06] + [1.5802664e+00] + [8.2997030e-01] + [1.2395413e+01] + [3.0724116e-02]] + + [[8.0269419e-06] + [1.3065740e+00] + [9.0923014e+00] + [6.1664842e-02] + [1.3949171e-06]] + + [[9.0331329e-13] + [1.4954816e-07] + [1.5145997e-04] + [1.0205092e-06] + [7.8093526e-16]]]], shape=(1, 5, 5, 1), dtype=float32) Preprocessing ------------- @@ -81,11 +133,10 @@ Metrics :special-members: :show-inheritance: -Deep Learning -------------- -.. automodule:: gudhi.representations.perslay +PersLay +------- +.. automodule:: gudhi.tensorflow.perslay :members: :special-members: :show-inheritance: -.. include:: perslay_params.md diff --git a/src/python/doc/representations_sum.inc b/src/python/doc/representations_sum.inc index 430e1c4e..cce91975 100644 --- a/src/python/doc/representations_sum.inc +++ b/src/python/doc/representations_sum.inc @@ -8,8 +8,9 @@ | | | | | | | :License: MIT | | | | | - | | | :Requires: `Scikit-learn `_, `TensorFlow 2 `_| + | | | :Requires: `Scikit-learn `_, `TensorFlow `_ | | | | | +------------------------------------------------------------------+----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------+ | * :doc:`representations` | +------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + diff --git a/src/python/gudhi/representations/__init__.py b/src/python/gudhi/representations/__init__.py index b8ed7293..f020248d 100644 --- a/src/python/gudhi/representations/__init__.py +++ b/src/python/gudhi/representations/__init__.py @@ -2,6 +2,5 @@ from .kernel_methods import * from .metrics import * from .preprocessing import * from .vector_methods import * -from .perslay import * -__all__ = ["kernel_methods", "metrics", "preprocessing", "vector_methods", "perslay"] +__all__ = ["kernel_methods", "metrics", "preprocessing", "vector_methods"] diff --git a/src/python/gudhi/representations/perslay.py b/src/python/gudhi/representations/perslay.py deleted file mode 100644 index b4778496..00000000 --- a/src/python/gudhi/representations/perslay.py +++ /dev/null @@ -1,151 +0,0 @@ -# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. -# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. -# Author(s): Mathieu Carrière -# -# Copyright (C) 2018-2019 Inria -# -# Modification(s): -# - YYYY/MM Author: Description of the modification - -import tensorflow as tf -import numpy as np - -class GridPerslayWeight(tf.keras.layers.Layer): - - def __init__(self, grid, grid_bnds, **kwargs): - super().__init__(dynamic=True, **kwargs) - self.grid = tf.Variable(initial_value=grid, trainable=True) - self.grid_bnds = grid_bnds - - def build(self, input_shape): - return self - - def call(self, diagrams): - grid_shape = self.grid.shape - indices = [] - for dim in range(2): - [m,M] = self.grid_bnds[dim] - coords = tf.expand_dims(diagrams[:,:,dim],-1) - ids = grid_shape[dim]*(coords-m)/(M-m) - indices.append(tf.cast(ids, tf.int32)) - weight = tf.gather_nd(params=self.grid, indices=tf.concat(indices, axis=2)) - return weight - -class GaussianMixturePerslayWeight(tf.keras.layers.Layer): - - def __init__(self, gaussians, **kwargs): - super().__init__(dynamic=True, **kwargs) - self.W = tf.Variable(initial_value=gaussians, trainable=True) - - def build(self, input_shape): - return self - - def call(self, diagrams): - means = tf.expand_dims(tf.expand_dims(self.W[:2,:],0),0) - variances = tf.expand_dims(tf.expand_dims(self.W[2:,:],0),0) - diagrams = tf.expand_dims(diagrams, -1) - dists = tf.math.multiply(tf.math.square(diagrams-means), 1/tf.math.square(variances)) - weight = tf.math.reduce_sum(tf.math.exp(tf.math.reduce_sum(-dists, axis=2)), axis=2) - return weight - -class PowerPerslayWeight(tf.keras.layers.Layer): - - def __init__(self, constant, power, **kwargs): - super().__init__(dynamic=True, **kwargs) - self.constant = tf.Variable(initial_value=constant, trainable=True) - self.power = power - - def build(self, input_shape): - return self - - def call(self, diagrams): - weight = self.constant * tf.math.pow(tf.math.abs(diagrams[:,:,1]-diagrams[:,:,0]), self.power) - return weight - - -class GaussianPerslayPhi(tf.keras.layers.Layer): - - def __init__(self, image_size, image_bnds, variance, **kwargs): - super().__init__(dynamic=True, **kwargs) - self.image_size = image_size - self.image_bnds = image_bnds - self.variance = tf.Variable(initial_value=variance, trainable=True) - - def build(self, input_shape): - return self - - def call(self, diagrams): - diagrams_d = tf.concat([diagrams[:,:,0:1], diagrams[:,:,1:2]-diagrams[:,:,0:1]], axis=2) - step = [(self.image_bnds[i][1]-self.image_bnds[i][0])/self.image_size[i] for i in range(2)] - coords = [tf.range(self.image_bnds[i][0], self.image_bnds[i][1], step[i]) for i in range(2)] - M = tf.meshgrid(*coords) - mu = tf.concat([tf.expand_dims(tens, 0) for tens in M], axis=0) - for _ in range(2): - diagrams_d = tf.expand_dims(diagrams_d,-1) - dists = tf.math.square(diagrams_d-mu) / (2*tf.math.square(self.variance)) - gauss = tf.math.exp(tf.math.reduce_sum(-dists, axis=2)) / (2*np.pi*tf.math.square(self.variance)) - return tf.expand_dims(gauss,-1), M[0].shape + tuple([1]) - -class TentPerslayPhi(tf.keras.layers.Layer): - - def __init__(self, samples, **kwargs): - super().__init__(dynamic=True, **kwargs) - self.samples = tf.Variable(initial_value=samples, trainable=True) - - def build(self, input_shape): - return self - - def call(self, diagrams): - samples_d = tf.expand_dims(tf.expand_dims(self.samples,0),0) - xs, ys = diagrams[:,:,0:1], diagrams[:,:,1:2] - tent = tf.math.maximum(.5*(ys-xs) - tf.math.abs(samples_d-.5*(ys+xs)), np.array([0.])) - return tent, self.samples.shape - -class FlatPerslayPhi(tf.keras.layers.Layer): - - def __init__(self, samples, theta, **kwargs): - super().__init__(dynamic=True, **kwargs) - self.samples = tf.Variable(initial_value=samples, trainable=True) - self.theta = tf.Variable(initial_value=theta, trainable=True) - - def build(self, input_shape): - return self - - def call(self, diagrams): - samples_d = tf.expand_dims(tf.expand_dims(self.samples,0),0) - xs, ys = diagrams[:,:,0:1], diagrams[:,:,1:2] - flat = 1./(1.+tf.math.exp(-self.theta*(.5*(ys-xs)-tf.math.abs(samples_d-.5*(ys+xs))))) - return flat, self.samples.shape - -class Perslay(tf.keras.layers.Layer): - - def __init__(self, weight, phi, perm_op, rho, **kwargs): - super().__init__(dynamic=True, **kwargs) - self.weight = weight - self.phi = phi - self.pop = perm_op - self.rho = rho - - def build(self, input_shape): - return self - - def call(self, diagrams): - - vector, dim = self.phi(diagrams) - weight = self.weight(diagrams) - for _ in range(len(dim)): - weight = tf.expand_dims(weight, -1) - vector = tf.math.multiply(vector, weight) - - permop = self.pop - if type(permop) == str and permop[:3] == 'top': - k = int(permop[3:]) - vector = vector.to_tensor(default_value=-1e10) - vector = tf.math.top_k(tf.transpose(vector, perm=[0, 2, 1]), k=k).values - vector = tf.reshape(vector, [-1,k*dim[0]]) - else: - vector = permop(vector, axis=1) - - vector = self.rho(vector) - - return vector diff --git a/src/python/gudhi/tensorflow/__init__.py b/src/python/gudhi/tensorflow/__init__.py new file mode 100644 index 00000000..e98d5a31 --- /dev/null +++ b/src/python/gudhi/tensorflow/__init__.py @@ -0,0 +1,3 @@ +from .perslay import * + +__all__ = ["Perslay", "GridPerslayWeight", "GaussianMixturePerslayWeight", "PowerPerslayWeight", "GaussianPerslayPhi", "TentPerslayPhi", "FlatPerslayPhi"] diff --git a/src/python/gudhi/tensorflow/perslay.py b/src/python/gudhi/tensorflow/perslay.py new file mode 100644 index 00000000..69acc529 --- /dev/null +++ b/src/python/gudhi/tensorflow/perslay.py @@ -0,0 +1,284 @@ +# This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. +# See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. +# Author(s): Mathieu Carrière +# +# Copyright (C) 2021 Inria +# +# Modification(s): +# - YYYY/MM Author: Description of the modification + +import tensorflow as tf +import numpy as np + +class GridPerslayWeight(tf.keras.layers.Layer): + """ + This is a class for computing a differentiable weight function for persistence diagram points. This function is defined from an array that contains its values on a 2D grid. + """ + def __init__(self, grid, grid_bnds, **kwargs): + """ + Constructor for the GridPerslayWeight class. + + Parameters: + grid (n x n numpy array): grid of values. + grid_bnds (2 x 2 numpy array): boundaries of the grid, of the form [[min_x, max_x], [min_y, max_y]]. + """ + super().__init__(dynamic=True, **kwargs) + self.grid = tf.Variable(initial_value=grid, trainable=True) + self.grid_bnds = grid_bnds + + def build(self, input_shape): + return self + + def call(self, diagrams): + """ + Apply GridPerslayWeight on a ragged tensor containing a list of persistence diagrams. + + Parameters: + diagrams (n x None x 2): ragged tensor containing n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points. + + Returns: + weight (n x None): ragged tensor containing the weights of the points in the n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points. + """ + grid_shape = self.grid.shape + indices = [] + for dim in range(2): + [m,M] = self.grid_bnds[dim] + coords = tf.expand_dims(diagrams[:,:,dim],-1) + ids = grid_shape[dim]*(coords-m)/(M-m) + indices.append(tf.cast(ids, tf.int32)) + weight = tf.gather_nd(params=self.grid, indices=tf.concat(indices, axis=2)) + return weight + +class GaussianMixturePerslayWeight(tf.keras.layers.Layer): + """ + This is a class for computing a differentiable weight function for persistence diagram points. This function is defined from a mixture of Gaussian functions. + """ + def __init__(self, gaussians, **kwargs): + """ + Constructor for the GridPerslayWeight class. + + Parameters: + gaussians (4 x n numpy array): parameters of the n Gaussian functions, of the form transpose([[mu_x^1, mu_y^1, sigma_x^1, sigma_y^1], ..., [mu_x^n, mu_y^n, sigma_x^n, sigma_y^n]]). + """ + super().__init__(dynamic=True, **kwargs) + self.W = tf.Variable(initial_value=gaussians, trainable=True) + + def build(self, input_shape): + return self + + def call(self, diagrams): + """ + Apply GaussianMixturePerslayWeight on a ragged tensor containing a list of persistence diagrams. + + Parameters: + diagrams (n x None x 2): ragged tensor containing n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points. + + Returns: + weight (n x None): ragged tensor containing the weights of the points in the n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points. + """ + means = tf.expand_dims(tf.expand_dims(self.W[:2,:],0),0) + variances = tf.expand_dims(tf.expand_dims(self.W[2:,:],0),0) + diagrams = tf.expand_dims(diagrams, -1) + dists = tf.math.multiply(tf.math.square(diagrams-means), 1/tf.math.square(variances)) + weight = tf.math.reduce_sum(tf.math.exp(tf.math.reduce_sum(-dists, axis=2)), axis=2) + return weight + +class PowerPerslayWeight(tf.keras.layers.Layer): + """ + This is a class for computing a differentiable weight function for persistence diagram points. This function is defined as a constant multiplied by the distance to the diagonal of the persistence diagram point raised to some power. + """ + def __init__(self, constant, power, **kwargs): + """ + Constructor for the PowerPerslayWeight class. + + Parameters: + constant (float): constant value. + power (float): power applied to the distance to the diagonal. + """ + super().__init__(dynamic=True, **kwargs) + self.constant = tf.Variable(initial_value=constant, trainable=True) + self.power = power + + def build(self, input_shape): + return self + + def call(self, diagrams): + """ + Apply PowerPerslayWeight on a ragged tensor containing a list of persistence diagrams. + + Parameters: + diagrams (n x None x 2): ragged tensor containing n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points. + + Returns: + weight (n x None): ragged tensor containing the weights of the points in the n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points. + """ + weight = self.constant * tf.math.pow(tf.math.abs(diagrams[:,:,1]-diagrams[:,:,0]), self.power) + return weight + + +class GaussianPerslayPhi(tf.keras.layers.Layer): + """ + This is a class for computing a transformation function for persistence diagram points. This function turns persistence diagram points into 2D Gaussian functions centered on the points, that are then evaluated on a regular 2D grid. + """ + def __init__(self, image_size, image_bnds, variance, **kwargs): + """ + Constructor for the GaussianPerslayPhi class. + + Parameters: + image_size (int numpy array): number of grid elements on each grid axis, of the form [n_x, n_y]. + image_bnds (2 x 2 numpy array): boundaries of the grid, of the form [[min_x, max_x], [min_y, max_y]]. + variance (float): variance of the Gaussian functions. + """ + super().__init__(dynamic=True, **kwargs) + self.image_size = image_size + self.image_bnds = image_bnds + self.variance = tf.Variable(initial_value=variance, trainable=True) + + def build(self, input_shape): + return self + + def call(self, diagrams): + """ + Apply GaussianPerslayPhi on a ragged tensor containing a list of persistence diagrams. + + Parameters: + diagrams (n x None x 2): ragged tensor containing n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points. + + Returns: + output (n x None x image_size x image_size x 1): ragged tensor containing the evaluations on the 2D grid of the 2D Gaussian functions corresponding to the persistence diagram points, in the form of a 2D image with 1 channel that can be processed with, e.g., convolutional layers. The second dimension is ragged since persistence diagrams can have different numbers of points. + output_shape (int numpy array): shape of the output tensor. + """ + diagrams_d = tf.concat([diagrams[:,:,0:1], diagrams[:,:,1:2]-diagrams[:,:,0:1]], axis=2) + step = [(self.image_bnds[i][1]-self.image_bnds[i][0])/self.image_size[i] for i in range(2)] + coords = [tf.range(self.image_bnds[i][0], self.image_bnds[i][1], step[i]) for i in range(2)] + M = tf.meshgrid(*coords) + mu = tf.concat([tf.expand_dims(tens, 0) for tens in M], axis=0) + for _ in range(2): + diagrams_d = tf.expand_dims(diagrams_d,-1) + dists = tf.math.square(diagrams_d-mu) / (2*tf.math.square(self.variance)) + gauss = tf.math.exp(tf.math.reduce_sum(-dists, axis=2)) / (2*np.pi*tf.math.square(self.variance)) + output = tf.expand_dims(gauss,-1) + output_shape = M[0].shape + tuple([1]) + return output, output_shape + +class TentPerslayPhi(tf.keras.layers.Layer): + """ + This is a class for computing a transformation function for persistence diagram points. This function turns persistence diagram points into 1D tent functions (linearly increasing on the first half of the bar corresponding to the point from zero to half of the bar length, linearly decreasing on the second half and zero elsewhere) centered on the points, that are then evaluated on a regular 1D grid. + """ + def __init__(self, samples, **kwargs): + """ + Constructor for the GaussianPerslayPhi class. + + Parameters: + samples (float numpy array): grid elements on which to evaluate the tent functions, of the form [x_1, ..., x_n]. + """ + super().__init__(dynamic=True, **kwargs) + self.samples = tf.Variable(initial_value=samples, trainable=True) + + def build(self, input_shape): + return self + + def call(self, diagrams): + """ + Apply TentPerslayPhi on a ragged tensor containing a list of persistence diagrams. + + Parameters: + diagrams (n x None x 2): ragged tensor containing n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points. + + Returns: + output (n x None x num_samples): ragged tensor containing the evaluations on the 1D grid of the 1D tent functions corresponding to the persistence diagram points. The second dimension is ragged since persistence diagrams can have different numbers of points. + output_shape (int numpy array): shape of the output tensor. + """ + samples_d = tf.expand_dims(tf.expand_dims(self.samples,0),0) + xs, ys = diagrams[:,:,0:1], diagrams[:,:,1:2] + output = tf.math.maximum(.5*(ys-xs) - tf.math.abs(samples_d-.5*(ys+xs)), np.array([0.])) + output_shape = self.samples.shape + return output, output_shape + +class FlatPerslayPhi(tf.keras.layers.Layer): + """ + This is a class for computing a transformation function for persistence diagram points. This function turns persistence diagram points into 1D constant functions (that evaluate to half of the bar length on the bar corresponding to the point and zero elsewhere), that are then evaluated on a regular 1D grid. + """ + def __init__(self, samples, theta, **kwargs): + """ + Constructor for the FlatPerslayPhi class. + + Parameters: + samples (float numpy array): grid elements on which to evaluate the constant functions, of the form [x_1, ..., x_n]. + theta (float): sigmoid parameter used to approximate the constant function with a differentiable sigmoid function. The bigger the theta, the closer to a constant function the output will be. + """ + super().__init__(dynamic=True, **kwargs) + self.samples = tf.Variable(initial_value=samples, trainable=True) + self.theta = tf.Variable(initial_value=theta, trainable=True) + + def build(self, input_shape): + return self + + def call(self, diagrams): + """ + Apply FlatPerslayPhi on a ragged tensor containing a list of persistence diagrams. + + Parameters: + diagrams (n x None x 2): ragged tensor containing n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points. + + Returns: + output (n x None x num_samples): ragged tensor containing the evaluations on the 1D grid of the 1D constant functions corresponding to the persistence diagram points. The second dimension is ragged since persistence diagrams can have different numbers of points. + output_shape (int numpy array): shape of the output tensor. + """ + samples_d = tf.expand_dims(tf.expand_dims(self.samples,0),0) + xs, ys = diagrams[:,:,0:1], diagrams[:,:,1:2] + output = 1./(1.+tf.math.exp(-self.theta*(.5*(ys-xs)-tf.math.abs(samples_d-.5*(ys+xs))))) + output_shape = self.samples.shape + return output, output_shape + +class Perslay(tf.keras.layers.Layer): + """ + This is a TensorFlow layer for vectorizing persistence diagrams in a differentiable way within a neural network. This function implements the PersLay equation, see `the corresponding article `_. + """ + def __init__(self, weight, phi, perm_op, rho, **kwargs): + """ + Constructor for the Perslay class. + + Parameters: + weight (function): weight function for the persistence diagram points. Can be either :class:`~gudhi.tensorflow.GridPerslayWeight`, :class:`~gudhi.tensorflow.GaussianMixturePerslayWeight`, :class:`~gudhi.tensorflow.PowerPerslayWeight`, or a custom function. + phi (function): transformation function for the persistence diagram points. Can be either :class:`~gudhi.tensorflow.GaussianPerslayPhi`, :class:`~gudhi.tensorflow.TentPerslayPhi`, :class:`~gudhi.tensorflow.FlatPerslayPhi`, or a custom function. + perm_op (function): permutation invariant function, such as `tf.math.reduce_sum`, `tf.math.reduce_mean`, `tf.math.reduce_max`, `tf.math.reduce_min`, or a custom function. If perm_op is the string "topk" (where k is a number), this function will be computed as `tf.math.top_k` with parameter `int(k)`. + rho (function): postprocessing function that is applied after the permutation invariant operation. Can be any TensorFlow layer. + """ + super().__init__(dynamic=True, **kwargs) + self.weight = weight + self.phi = phi + self.pop = perm_op + self.rho = rho + + def build(self, input_shape): + return self + + def call(self, diagrams): + """ + Apply Perslay on a ragged tensor containing a list of persistence diagrams. + + Parameters: + diagrams (n x None x 2): ragged tensor containing n persistence diagrams. The second dimension is ragged since persistence diagrams can have different numbers of points. + + Returns: + vector (n x output_shape): tensor containing the vectorizations of the persistence diagrams. + """ + vector, dim = self.phi(diagrams) + weight = self.weight(diagrams) + for _ in range(len(dim)): + weight = tf.expand_dims(weight, -1) + vector = tf.math.multiply(vector, weight) + + permop = self.pop + if type(permop) == str and permop[:3] == 'top': + k = int(permop[3:]) + vector = vector.to_tensor(default_value=-1e10) + vector = tf.math.top_k(tf.transpose(vector, perm=[0, 2, 1]), k=k).values + vector = tf.reshape(vector, [-1,k*dim[0]]) + else: + vector = permop(vector, axis=1) + + vector = self.rho(vector) + + return vector diff --git a/src/python/test/test_perslay.py b/src/python/test/test_perslay.py index c0e363ec..bf0d54af 100644 --- a/src/python/test/test_perslay.py +++ b/src/python/test/test_perslay.py @@ -1,9 +1,8 @@ -import sys import numpy as np import tensorflow as tf -import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler -import gudhi.representations as gdr +from gudhi.tensorflow import * +import gudhi.representations as gdr def test_gaussian_perslay(): @@ -12,13 +11,48 @@ def test_gaussian_perslay(): diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32)) rho = tf.identity - phi = gdr.GaussianPerslayPhi((100, 100), ((-.5, 1.5), (-.5, 1.5)), .1) - weight = gdr.PowerPerslayWeight(1.,0.) + phi = GaussianPerslayPhi((5, 5), ((-.5, 1.5), (-.5, 1.5)), .1) + weight = PowerPerslayWeight(1.,0.) perm_op = tf.math.reduce_sum - perslay = gdr.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) + perslay = Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) vectors = perslay(diagrams) + print(vectors.shape) + + assert np.linalg.norm(vectors.numpy() - np.array( +[[[[1.7266072e-16], + [4.1706043e-09], + [1.1336876e-08], + [8.5738821e-12], + [2.1243891e-14]], + + [[4.1715076e-09], + [1.0074080e-01], + [2.7384272e-01], + [3.0724244e-02], + [7.6157507e-05]], + + [[8.0382870e-06], + [1.5802664e+00], + [8.2997030e-01], + [1.2395413e+01], + [3.0724116e-02]], + + [[8.0269419e-06], + [1.3065740e+00], + [9.0923014e+00], + [6.1664842e-02], + [1.3949171e-06]], + + [[9.0331329e-13], + [1.4954816e-07], + [1.5145997e-04], + [1.0205092e-06], + [7.8093526e-16]]]]) <= 1e-7) + +test_gaussian_perslay() + def test_tent_perslay(): diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] @@ -26,13 +60,29 @@ def test_tent_perslay(): diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32)) rho = tf.identity - phi = gdr.TentPerslayPhi(np.array(np.arange(-1.,2.,.001), dtype=np.float32)) - weight = gdr.PowerPerslayWeight(1.,0.) + phi = TentPerslayPhi(np.array(np.arange(-1.,2.,.1), dtype=np.float32)) + weight = PowerPerslayWeight(1.,0.) perm_op = 'top3' - perslay = gdr.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) + perslay = Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) vectors = perslay(diagrams) + assert np.linalg.norm(vectors-np.array([[0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0.09999999, 0., 0., + 0.2, 0.05, 0., 0.19999999, 0., 0., + 0.09999999, 0.02500001, 0., 0.125, 0., 0., + 0.22500002, 0., 0., 0.3, 0., 0., + 0.19999999, 0.05000001, 0., 0.10000002, 0.10000002, 0., + 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., 0. ]])) <= 1e-7 + def test_flat_perslay(): diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] @@ -40,13 +90,20 @@ def test_flat_perslay(): diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32)) rho = tf.identity - phi = gdr.FlatPerslayPhi(np.array(np.arange(-1.,2.,.001), dtype=np.float32), 100.) - weight = gdr.PowerPerslayWeight(1.,0.) + phi = FlatPerslayPhi(np.array(np.arange(-1.,2.,.1), dtype=np.float32), 100.) + weight = PowerPerslayWeight(1.,0.) perm_op = tf.math.reduce_sum - perslay = gdr.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) + perslay = Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) vectors = perslay(diagrams) + assert np.linalg.norm(vectors-np.array([[0.0000000e+00, 0.0000000e+00, 1.8048651e-35, 3.9754645e-31, 8.7565101e-27, + 1.9287571e-22, 4.2483860e-18, 9.3576392e-14, 2.0611652e-09, 4.5398087e-05, + 5.0000376e-01, 1.0758128e+00, 1.9933071e+00, 1.0072457e+00, 1.9240967e+00, + 1.4999963e+00, 1.0000458e+00, 1.0066929e+00, 1.9933071e+00, 1.9999092e+00, + 1.0000000e+00, 9.0795562e-05, 4.1222914e-09, 1.8715316e-13, 8.4967405e-18, + 3.8574998e-22, 1.7512956e-26, 7.9508388e-31, 3.6097302e-35, 0.0000000e+00]]) <= 1e-7) + def test_gmix_weight(): diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] @@ -54,13 +111,20 @@ def test_gmix_weight(): diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32)) rho = tf.identity - phi = gdr.FlatPerslayPhi(np.array(np.arange(-1.,2.,.001), dtype=np.float32), 100.) - weight = gdr.GaussianMixturePerslayWeight(np.array([[.5],[.5],[5],[5]], dtype=np.float32)) + phi = FlatPerslayPhi(np.array(np.arange(-1.,2.,.1), dtype=np.float32), 100.) + weight = GaussianMixturePerslayWeight(np.array([[.5],[.5],[5],[5]], dtype=np.float32)) perm_op = tf.math.reduce_sum - perslay = gdr.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) + perslay = Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) vectors = perslay(diagrams) + assert np.linalg.norm(vectors-np.array([[0.0000000e+00, 0.0000000e+00, 1.7869064e-35, 3.9359080e-31, 8.6693818e-27, + 1.9095656e-22, 4.2061142e-18, 9.2645292e-14, 2.0406561e-09, 4.4946366e-05, + 4.9502861e-01, 1.0652492e+00, 1.9753191e+00, 9.9723548e-01, 1.9043801e+00, + 1.4844525e+00, 9.8947650e-01, 9.9604094e-01, 1.9703994e+00, 1.9769192e+00, + 9.8850453e-01, 8.9751818e-05, 4.0749040e-09, 1.8500175e-13, 8.3990662e-18, + 3.8131562e-22, 1.7311636e-26, 7.8594399e-31, 3.5682349e-35, 0.0000000e+00]]) <= 1e-7) + def test_grid_weight(): diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] @@ -68,10 +132,16 @@ def test_grid_weight(): diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32)) rho = tf.identity - phi = gdr.FlatPerslayPhi(np.array(np.arange(-1.,2.,.001), dtype=np.float32), 100.) - weight = gdr.GridPerslayWeight(np.array(np.random.uniform(size=[100,100]),dtype=np.float32),((-0.01, 1.01),(-0.01, 1.01))) + phi = FlatPerslayPhi(np.array(np.arange(-1.,2.,.1), dtype=np.float32), 100.) + weight = GridPerslayWeight(np.array(np.random.uniform(size=[100,100]),dtype=np.float32),((-0.01, 1.01),(-0.01, 1.01))) perm_op = tf.math.reduce_sum - perslay = gdr.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) + perslay = Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) vectors = perslay(diagrams) + assert np.linalg.norm(vectors-np.array([[0.0000000e+00, 0.0000000e+00, 1.5124093e-37, 3.3314498e-33, 7.3379791e-29, + 1.6163036e-24, 3.5601592e-20, 7.8417273e-16, 1.7272621e-11, 3.8043717e-07, + 4.1902456e-03, 1.7198652e-02, 1.2386327e-01, 9.2694648e-03, 1.9515079e-01, + 2.0629172e-01, 2.0210314e-01, 2.0442720e-01, 5.4709727e-01, 5.4939687e-01, + 2.7471092e-01, 2.4942532e-05, 1.1324385e-09, 5.1413016e-14, 2.3341474e-18, + 1.0596973e-22, 4.8110000e-27, 2.1841823e-31, 9.9163230e-36, 0.0000000e+00]]) <= 1e-7) -- cgit v1.2.3 From fb49c0274418a6186eafc07c946bd681f4bb7112 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Wed, 2 Feb 2022 22:27:04 +0100 Subject: fix bug in doc --- ext/gudhi-deploy | 2 +- src/python/doc/representations.rst | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/ext/gudhi-deploy b/ext/gudhi-deploy index 290ade10..975d1bff 160000 --- a/ext/gudhi-deploy +++ b/ext/gudhi-deploy @@ -1 +1 @@ -Subproject commit 290ade1086bedbc96a35df886cadecabbf4072e6 +Subproject commit 975d1bffb317f3b84bf1a3d576cdfdbf7b45861c diff --git a/src/python/doc/representations.rst b/src/python/doc/representations.rst index 52e95230..2d66fa68 100644 --- a/src/python/doc/representations.rst +++ b/src/python/doc/representations.rst @@ -58,17 +58,18 @@ PersLay import tensorflow as tf from sklearn.preprocessing import MinMaxScaler import gudhi.representations as gdr + import gudhi.tensorflow as gdtf diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] diagrams = gdr.DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diagrams) diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32)) rho = tf.identity - phi = gdr.GaussianPerslayPhi((100, 100), ((-.5, 1.5), (-.5, 1.5)), .1) - weight = gdr.PowerPerslayWeight(1.,0.) + phi = gdtf.GaussianPerslayPhi((100, 100), ((-.5, 1.5), (-.5, 1.5)), .1) + weight = gdtf.PowerPerslayWeight(1.,0.) perm_op = tf.math.reduce_sum - perslay = gdr.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) + perslay = gdtf.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) vectors = perslay(diagrams) print(vectors) -- cgit v1.2.3 From 1348e3cc9b42b7624e41862d30d7b6cc474d427b Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Wed, 29 Jun 2022 17:35:10 +0200 Subject: fix CMake --- src/python/CMakeLists.txt | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index fb4ef6a4..5a0c9d78 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -290,13 +290,8 @@ if(PYTHONINTERP_FOUND) # Other .py files file(COPY "gudhi/persistence_graphical_tools.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/representations" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/") -<<<<<<< HEAD file(COPY "gudhi/tensorflow" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/") file(COPY "gudhi/wasserstein" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") -======= - file(COPY "gudhi/wasserstein" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") - file(COPY "gudhi/tensorflow" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") ->>>>>>> 3e0e47b81ba488f6893933d8685fc1e7eec0e501 file(COPY "gudhi/point_cloud" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/clustering" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi" FILES_MATCHING PATTERN "*.py") file(COPY "gudhi/weighted_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") -- cgit v1.2.3 From 85a93e6432771b7439ea7e2403dc702a66481033 Mon Sep 17 00:00:00 2001 From: MathieuCarriere Date: Sat, 5 Nov 2022 22:33:21 +0100 Subject: added a few comments in the doc --- src/python/doc/installation.rst | 3 --- src/python/gudhi/tensorflow/perslay.py | 18 +++++++++--------- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst index 8df3f73e..50ddabfe 100644 --- a/src/python/doc/installation.rst +++ b/src/python/doc/installation.rst @@ -401,8 +401,6 @@ mathematics, science, and engineering. :class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package `SciPy `_ as a backend if explicitly requested. -<<<<<<< HEAD -======= TensorFlow ---------- @@ -411,7 +409,6 @@ and :doc:`Rips complex ` modules require `TensorFlo for incorporating them in neural nets. `TensorFlow `_ is also used in some automatic differentiation tests. ->>>>>>> 3e0e47b81ba488f6893933d8685fc1e7eec0e501 Bug reports and contributions ***************************** diff --git a/src/python/gudhi/tensorflow/perslay.py b/src/python/gudhi/tensorflow/perslay.py index 69acc529..9976c5f3 100644 --- a/src/python/gudhi/tensorflow/perslay.py +++ b/src/python/gudhi/tensorflow/perslay.py @@ -8,7 +8,7 @@ # - YYYY/MM Author: Description of the modification import tensorflow as tf -import numpy as np +import math class GridPerslayWeight(tf.keras.layers.Layer): """ @@ -156,7 +156,7 @@ class GaussianPerslayPhi(tf.keras.layers.Layer): for _ in range(2): diagrams_d = tf.expand_dims(diagrams_d,-1) dists = tf.math.square(diagrams_d-mu) / (2*tf.math.square(self.variance)) - gauss = tf.math.exp(tf.math.reduce_sum(-dists, axis=2)) / (2*np.pi*tf.math.square(self.variance)) + gauss = tf.math.exp(tf.math.reduce_sum(-dists, axis=2)) / (2*math.pi*tf.math.square(self.variance)) output = tf.expand_dims(gauss,-1) output_shape = M[0].shape + tuple([1]) return output, output_shape @@ -191,7 +191,7 @@ class TentPerslayPhi(tf.keras.layers.Layer): """ samples_d = tf.expand_dims(tf.expand_dims(self.samples,0),0) xs, ys = diagrams[:,:,0:1], diagrams[:,:,1:2] - output = tf.math.maximum(.5*(ys-xs) - tf.math.abs(samples_d-.5*(ys+xs)), np.array([0.])) + output = tf.math.maximum(.5*(ys-xs) - tf.math.abs(samples_d-.5*(ys+xs)), tf.constant([0.])) output_shape = self.samples.shape return output, output_shape @@ -238,17 +238,17 @@ class Perslay(tf.keras.layers.Layer): def __init__(self, weight, phi, perm_op, rho, **kwargs): """ Constructor for the Perslay class. - + Parameters: - weight (function): weight function for the persistence diagram points. Can be either :class:`~gudhi.tensorflow.GridPerslayWeight`, :class:`~gudhi.tensorflow.GaussianMixturePerslayWeight`, :class:`~gudhi.tensorflow.PowerPerslayWeight`, or a custom function. - phi (function): transformation function for the persistence diagram points. Can be either :class:`~gudhi.tensorflow.GaussianPerslayPhi`, :class:`~gudhi.tensorflow.TentPerslayPhi`, :class:`~gudhi.tensorflow.FlatPerslayPhi`, or a custom function. - perm_op (function): permutation invariant function, such as `tf.math.reduce_sum`, `tf.math.reduce_mean`, `tf.math.reduce_max`, `tf.math.reduce_min`, or a custom function. If perm_op is the string "topk" (where k is a number), this function will be computed as `tf.math.top_k` with parameter `int(k)`. + weight (function): weight function for the persistence diagram points. Can be either :class:`~gudhi.tensorflow.perslay.GridPerslayWeight`, :class:`~gudhi.tensorflow.perslay.GaussianMixturePerslayWeight`, :class:`~gudhi.tensorflow.perslay.PowerPerslayWeight`, or a custom TensorFlow function that takes persistence diagrams as argument (represented as an (n x None x 2) ragged tensor, where n is the number of diagrams). + phi (function): transformation function for the persistence diagram points. Can be either :class:`~gudhi.tensorflow.perslay.GaussianPerslayPhi`, :class:`~gudhi.tensorflow.perslay.TentPerslayPhi`, :class:`~gudhi.tensorflow.perslay.FlatPerslayPhi`, or a custom TensorFlow class (that can have trainable parameters) with a method `call` that takes persistence diagrams as argument (represented as an (n x None x 2) ragged tensor, where n is the number of diagrams). + perm_op (function): permutation invariant function, such as `tf.math.reduce_sum`, `tf.math.reduce_mean`, `tf.math.reduce_max`, `tf.math.reduce_min`, or a custom TensorFlow function that takes two arguments: a tensor and an axis on which to apply the permutation invariant operation. If perm_op is the string "topk" (where k is a number), this function will be computed as `tf.math.top_k` with parameter `int(k)`. rho (function): postprocessing function that is applied after the permutation invariant operation. Can be any TensorFlow layer. """ super().__init__(dynamic=True, **kwargs) self.weight = weight self.phi = phi - self.pop = perm_op + self.perm_op = perm_op self.rho = rho def build(self, input_shape): @@ -270,7 +270,7 @@ class Perslay(tf.keras.layers.Layer): weight = tf.expand_dims(weight, -1) vector = tf.math.multiply(vector, weight) - permop = self.pop + permop = self.perm_op if type(permop) == str and permop[:3] == 'top': k = int(permop[3:]) vector = vector.to_tensor(default_value=-1e10) -- cgit v1.2.3 From 224b13d5884598c5d4613ca2c8fc321e39153165 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 18 Nov 2022 13:20:14 +0100 Subject: Organize doc + link syntax --- src/python/doc/installation.rst | 14 ++++-------- src/python/doc/representations.rst | 47 ++++++++++++++++++++++++++++++++------ 2 files changed, 44 insertions(+), 17 deletions(-) diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst index c0077e89..7200b2f0 100644 --- a/src/python/doc/installation.rst +++ b/src/python/doc/installation.rst @@ -371,14 +371,6 @@ PyTorch `PyTorch `_ is currently only used as a dependency of `PyKeOps`_, and in some tests. -TensorFlow ----------- - -:class:`~gudhi.tensorflow.perslay` from the :doc:`persistence representations ` module -requires `TensorFlow `_. - -`TensorFlow `_ is also used in some automatic differentiation tests. - Scikit-learn ------------ @@ -404,11 +396,13 @@ mathematics, science, and engineering. TensorFlow ---------- +:class:`~gudhi.tensorflow.perslay.Perslay` from the :doc:`persistence representations ` module +requires `TensorFlow `_. The :doc:`cubical complex `, :doc:`simplex tree ` -and :doc:`Rips complex ` modules require `TensorFlow `_ +and :doc:`Rips complex ` modules require `TensorFlow`_ for incorporating them in neural nets. -`TensorFlow `_ is also used in some automatic differentiation tests. +`TensorFlow`_ is also used in some automatic differentiation tests. Bug reports and contributions ***************************** diff --git a/src/python/doc/representations.rst b/src/python/doc/representations.rst index 2d66fa68..c7be1555 100644 --- a/src/python/doc/representations.rst +++ b/src/python/doc/representations.rst @@ -8,14 +8,14 @@ Representations manual .. include:: representations_sum.inc -This module aims at bridging the gap between persistence diagrams and machine learning, by providing implementations of most of the vector representations for persistence diagrams in the literature, in a scikit-learn format. More specifically, it provides tools, using the scikit-learn standard interface, to compute distances and kernels on persistence diagrams, and to convert these diagrams into vectors in Euclidean space. Moreover, this module also contains `PersLay `_, which is a general neural network layer for performing deep learning with persistence diagrams, implemented in TensorFlow. +This module aims at bridging the gap between persistence diagrams and machine learning, by providing implementations of most of the vector representations for persistence diagrams in the literature, in a scikit-learn format. More specifically, it provides tools, using the scikit-learn standard interface, to compute distances and kernels on persistence diagrams, and to convert these diagrams into vectors in Euclidean space. Moreover, this module also contains `PersLay `_, which is a general neural network layer for performing deep learning with persistence diagrams, implemented in TensorFlow. A diagram is represented as a numpy array of shape (n,2), as can be obtained from :func:`~gudhi.SimplexTree.persistence_intervals_in_dimension` for instance. Points at infinity are represented as a numpy array of shape (n,1), storing only the birth time. The classes in this module can handle several persistence diagrams at once. In that case, the diagrams are provided as a list of numpy arrays. Note that it is not necessary for the diagrams to have the same number of points, i.e., for the corresponding arrays to have the same number of rows: all classes can handle arrays with different shapes. -This `notebook `_ explains how to +This `notebook `__ explains how to efficiently combine machine learning and topological data analysis with the -:doc:`representations module` in a scikit-learn fashion. This `notebook `_ -and `this one `_ explain how to use PersLay. +:doc:`representations module` in a scikit-learn fashion. This `notebook `__ +and `this one `__ explain how to use PersLay. Examples @@ -64,11 +64,11 @@ PersLay diagrams = gdr.DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diagrams) diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32)) - rho = tf.identity + rho = tf.identity phi = gdtf.GaussianPerslayPhi((100, 100), ((-.5, 1.5), (-.5, 1.5)), .1) weight = gdtf.PowerPerslayWeight(1.,0.) perm_op = tf.math.reduce_sum - + perslay = gdtf.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) vectors = perslay(diagrams) print(vectors) @@ -136,8 +136,41 @@ Metrics PersLay ------- -.. automodule:: gudhi.tensorflow.perslay +.. autoclass:: gudhi.tensorflow.Perslay + :members: + :special-members: + :show-inheritance: + +Weight functions +^^^^^^^^^^^^^^^^ +.. autoclass:: gudhi.tensorflow.GaussianMixturePerslayWeight + :members: + :special-members: + :show-inheritance: + +.. autoclass:: gudhi.tensorflow.GridPerslayWeight + :members: + :special-members: + :show-inheritance: + +.. autoclass:: gudhi.tensorflow.PowerPerslayWeight + :members: + :special-members: + :show-inheritance: + +Phi functions +^^^^^^^^^^^^^ +.. autoclass:: gudhi.tensorflow.FlatPerslayPhi + :members: + :special-members: + :show-inheritance: + +.. autoclass:: gudhi.tensorflow.GaussianPerslayPhi :members: :special-members: :show-inheritance: +.. autoclass:: gudhi.tensorflow.TentPerslayPhi + :members: + :special-members: + :show-inheritance: -- cgit v1.2.3 From 59f971d72003b358591f6ae1686823d43ee925fe Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 18 Nov 2022 17:16:02 +0100 Subject: Fix doc test There was a mismatch between the size requested and the output. Hopefully, with this precision, the results should match on different computers... --- src/python/doc/representations.rst | 61 +++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/src/python/doc/representations.rst b/src/python/doc/representations.rst index c7be1555..7eede2df 100644 --- a/src/python/doc/representations.rst +++ b/src/python/doc/representations.rst @@ -65,46 +65,47 @@ PersLay diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32)) rho = tf.identity - phi = gdtf.GaussianPerslayPhi((100, 100), ((-.5, 1.5), (-.5, 1.5)), .1) + phi = gdtf.GaussianPerslayPhi((5, 5), ((-.5, 1.5), (-.5, 1.5)), .1) weight = gdtf.PowerPerslayWeight(1.,0.) perm_op = tf.math.reduce_sum perslay = gdtf.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) vectors = perslay(diagrams) + np.set_printoptions(precision=5) print(vectors) .. testoutput:: tf.Tensor( - [[[[1.7266072e-16] - [4.1706043e-09] - [1.1336876e-08] - [8.5738821e-12] - [2.1243891e-14]] - - [[4.1715076e-09] - [1.0074080e-01] - [2.7384272e-01] - [3.0724244e-02] - [7.6157507e-05]] - - [[8.0382870e-06] - [1.5802664e+00] - [8.2997030e-01] - [1.2395413e+01] - [3.0724116e-02]] - - [[8.0269419e-06] - [1.3065740e+00] - [9.0923014e+00] - [6.1664842e-02] - [1.3949171e-06]] - - [[9.0331329e-13] - [1.4954816e-07] - [1.5145997e-04] - [1.0205092e-06] - [7.8093526e-16]]]], shape=(1, 5, 5, 1), dtype=float32) + [[[[1.72661e-16] + [4.17060e-09] + [1.13369e-08] + [8.57388e-12] + [2.12439e-14]] + + [[4.17151e-09] + [1.00741e-01] + [2.73843e-01] + [3.07242e-02] + [7.61575e-05]] + + [[8.03829e-06] + [1.58027e+00] + [8.29970e-01] + [1.23954e+01] + [3.07241e-02]] + + [[8.02694e-06] + [1.30657e+00] + [9.09230e+00] + [6.16648e-02] + [1.39492e-06]] + + [[9.03313e-13] + [1.49548e-07] + [1.51460e-04] + [1.02051e-06] + [7.80935e-16]]]], shape=(1, 5, 5, 1), dtype=float32) Preprocessing ------------- -- cgit v1.2.3 From dd07d34e91cdfecd539ddc6afa22992fa12d38c6 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 18 Nov 2022 20:22:50 +0100 Subject: Reset print precision after this example otherwise it also affects other random examples --- src/python/doc/representations.rst | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/python/doc/representations.rst b/src/python/doc/representations.rst index 7eede2df..37d3aa53 100644 --- a/src/python/doc/representations.rst +++ b/src/python/doc/representations.rst @@ -52,7 +52,12 @@ This small example is also provided PersLay ^^^^^^^ -.. testcode:: +.. testsetup:: perslay + + import numpy + numpy.set_printoptions(precision=5) + +.. testcode:: perslay import numpy as np import tensorflow as tf @@ -71,10 +76,13 @@ PersLay perslay = gdtf.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) vectors = perslay(diagrams) - np.set_printoptions(precision=5) print(vectors) -.. testoutput:: +.. testcleanup:: perslay + + numpy.set_printoptions(precision=8) + +.. testoutput:: perslay tf.Tensor( [[[[1.72661e-16] -- cgit v1.2.3 From d43293fc7ce82315496bcecaa9359dd333bc0745 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sun, 20 Nov 2022 22:44:38 +0100 Subject: Keep perslay-related stuff under gudhi.tensorflow.perslay --- src/python/doc/representations.rst | 22 +++++++++++----------- src/python/gudhi/tensorflow/__init__.py | 3 +-- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/src/python/doc/representations.rst b/src/python/doc/representations.rst index 37d3aa53..5686974a 100644 --- a/src/python/doc/representations.rst +++ b/src/python/doc/representations.rst @@ -63,18 +63,18 @@ PersLay import tensorflow as tf from sklearn.preprocessing import MinMaxScaler import gudhi.representations as gdr - import gudhi.tensorflow as gdtf + import gudhi.tensorflow.perslay as prsl diagrams = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])] diagrams = gdr.DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diagrams) diagrams = tf.RaggedTensor.from_tensor(tf.constant(diagrams, dtype=tf.float32)) rho = tf.identity - phi = gdtf.GaussianPerslayPhi((5, 5), ((-.5, 1.5), (-.5, 1.5)), .1) - weight = gdtf.PowerPerslayWeight(1.,0.) + phi = prsl.GaussianPerslayPhi((5, 5), ((-.5, 1.5), (-.5, 1.5)), .1) + weight = prsl.PowerPerslayWeight(1.,0.) perm_op = tf.math.reduce_sum - perslay = gdtf.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) + perslay = prsl.Perslay(phi=phi, weight=weight, perm_op=perm_op, rho=rho) vectors = perslay(diagrams) print(vectors) @@ -145,41 +145,41 @@ Metrics PersLay ------- -.. autoclass:: gudhi.tensorflow.Perslay +.. autoclass:: gudhi.tensorflow.perslay.Perslay :members: :special-members: :show-inheritance: Weight functions ^^^^^^^^^^^^^^^^ -.. autoclass:: gudhi.tensorflow.GaussianMixturePerslayWeight +.. autoclass:: gudhi.tensorflow.perslay.GaussianMixturePerslayWeight :members: :special-members: :show-inheritance: -.. autoclass:: gudhi.tensorflow.GridPerslayWeight +.. autoclass:: gudhi.tensorflow.perslay.GridPerslayWeight :members: :special-members: :show-inheritance: -.. autoclass:: gudhi.tensorflow.PowerPerslayWeight +.. autoclass:: gudhi.tensorflow.perslay.PowerPerslayWeight :members: :special-members: :show-inheritance: Phi functions ^^^^^^^^^^^^^ -.. autoclass:: gudhi.tensorflow.FlatPerslayPhi +.. autoclass:: gudhi.tensorflow.perslay.FlatPerslayPhi :members: :special-members: :show-inheritance: -.. autoclass:: gudhi.tensorflow.GaussianPerslayPhi +.. autoclass:: gudhi.tensorflow.perslay.GaussianPerslayPhi :members: :special-members: :show-inheritance: -.. autoclass:: gudhi.tensorflow.TentPerslayPhi +.. autoclass:: gudhi.tensorflow.perslay.TentPerslayPhi :members: :special-members: :show-inheritance: diff --git a/src/python/gudhi/tensorflow/__init__.py b/src/python/gudhi/tensorflow/__init__.py index fe01b9cc..1599cf52 100644 --- a/src/python/gudhi/tensorflow/__init__.py +++ b/src/python/gudhi/tensorflow/__init__.py @@ -1,6 +1,5 @@ from .cubical_layer import CubicalLayer from .lower_star_simplex_tree_layer import LowerStarSimplexTreeLayer from .rips_layer import RipsLayer -from .perslay import * -__all__ = ["Perslay", "GridPerslayWeight", "GaussianMixturePerslayWeight", "PowerPerslayWeight", "GaussianPerslayPhi", "TentPerslayPhi", "FlatPerslayPhi", "LowerStarSimplexTreeLayer", "RipsLayer", "CubicalLayer"] +__all__ = ["LowerStarSimplexTreeLayer", "RipsLayer", "CubicalLayer"] -- cgit v1.2.3 From 0f755b861c3702b8516dc31eef0ad66528485778 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Mon, 21 Nov 2022 01:11:30 +0100 Subject: Also update the test --- src/python/CMakeLists.txt | 5 +++++ src/python/test/test_perslay.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index 344e988f..f6750a30 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -572,6 +572,11 @@ if(PYTHONINTERP_FOUND) add_gudhi_py_test(test_diff) endif() + # Perslay + if(TENSORFLOW_FOUND AND SKLEARN_FOUND) + add_gudhi_py_test(test_perslay) + endif() + # Betti curves if(SKLEARN_FOUND AND SCIPY_FOUND) add_gudhi_py_test(test_betti_curve_representations) diff --git a/src/python/test/test_perslay.py b/src/python/test/test_perslay.py index bf0d54af..06497712 100644 --- a/src/python/test/test_perslay.py +++ b/src/python/test/test_perslay.py @@ -1,7 +1,7 @@ import numpy as np import tensorflow as tf from sklearn.preprocessing import MinMaxScaler -from gudhi.tensorflow import * +from gudhi.tensorflow.perslay import * import gudhi.representations as gdr def test_gaussian_perslay(): -- cgit v1.2.3