summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMathieuCarriere <mathieu.carriere3@gmail.com>2020-12-07 18:49:16 +0100
committerMathieuCarriere <mathieu.carriere3@gmail.com>2020-12-07 18:49:16 +0100
commit73be9043e6a3e9541d2c5393634774ef512d4494 (patch)
tree5ba4d6953451e9ea0cb9e8c0d1026cdc4a24cef2
parentc7c6b2cae3a5c1cfa41954100740ea7a63da1189 (diff)
added more test and improved doc
-rw-r--r--src/python/doc/installation.rst13
-rw-r--r--src/python/doc/representations.rst13
-rw-r--r--src/python/doc/representations_sum.inc23
-rw-r--r--src/python/gudhi/representations/perslay.py10
-rw-r--r--src/python/gudhi/representations/perslay_params.md103
-rw-r--r--src/python/test/test_perslay.py183
6 files changed, 213 insertions, 132 deletions
diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst
index 66efe45a..9bec6c97 100644
--- a/src/python/doc/installation.rst
+++ b/src/python/doc/installation.rst
@@ -372,6 +372,15 @@ PyTorch
`PyTorch <https://pytorch.org/>`_ is currently only used as a dependency of
`PyKeOps`_, and in some tests.
+TensorFlow
+----------
+
+`TensorFlow <https://www.tensorflow.org>`_ is currently only used in some automatic differentiation tests.
+
+:class:`~gudhi.representations.PerslayModel` in the :doc:`persistence representations </representations>` module requires
+`TensorFlow 2 <https://https://www.tensorflow.org/install/>`_.
+
+
Scikit-learn
------------
@@ -394,10 +403,6 @@ mathematics, science, and engineering.
:class:`~gudhi.point_cloud.knn.KNearestNeighbors` can use the Python package
`SciPy <http://scipy.org>`_ as a backend if explicitly requested.
-TensorFlow
-----------
-
-`TensorFlow <https://www.tensorflow.org>`_ is currently only used in some automatic differentiation tests.
Bug reports and contributions
*****************************
diff --git a/src/python/doc/representations.rst b/src/python/doc/representations.rst
index b0477197..35517ebb 100644
--- a/src/python/doc/representations.rst
+++ b/src/python/doc/representations.rst
@@ -8,7 +8,7 @@ Representations manual
.. include:: representations_sum.inc
-This module, originally available at https://github.com/MathieuCarriere/sklearn-tda and named sklearn_tda, aims at bridging the gap between persistence diagrams and machine learning, by providing implementations of most of the vector representations for persistence diagrams in the literature, in a scikit-learn format. More specifically, it provides tools, using the scikit-learn standard interface, to compute distances and kernels on persistence diagrams, and to convert these diagrams into vectors in Euclidean space.
+This module, originally available at https://github.com/MathieuCarriere/sklearn-tda and named sklearn_tda, aims at bridging the gap between persistence diagrams and machine learning, by providing implementations of most of the vector representations for persistence diagrams in the literature, in a scikit-learn format. More specifically, it provides tools, using the scikit-learn standard interface, to compute distances and kernels on persistence diagrams, and to convert these diagrams into vectors in Euclidean space. Moreover, this module also contains the `PersLay architecture <http://proceedings.mlr.press/v108/carriere20a.html>`_, which is a general neural network architecture for performing deep learning with persistence diagrams. It is implemented in TensorFlow 2.
A diagram is represented as a numpy array of shape (n,2), as can be obtained from :func:`~gudhi.SimplexTree.persistence_intervals_in_dimension` for instance. Points at infinity are represented as a numpy array of shape (n,1), storing only the birth time. The classes in this module can handle several persistence diagrams at once. In that case, the diagrams are provided as a list of numpy arrays. Note that it is not necessary for the diagrams to have the same number of points, i.e., for the corresponding arrays to have the same number of rows: all classes can handle arrays with different shapes.
@@ -50,7 +50,7 @@ Machine Learning and Topological Data Analysis
This `notebook <https://github.com/GUDHI/TDA-tutorial/blob/master/Tuto-GUDHI-representations.ipynb>`_ explains how to
efficiently combine machine learning and topological data analysis with the
-:doc:`representations module<representations>`.
+:doc:`representations module<representations>` in a scikit-learn fashion. This `notebook <https://github.com/MathieuCarriere/tda-tutorials/blob/perslay/Tuto-GUDHI-perslay-expe.ipynb>`_ and `this one <https://github.com/MathieuCarriere/tda-tutorials/blob/perslay/Tuto-GUDHI-perslay-visu.ipynb>`_ explain how to use the PersLay architecture.
Preprocessing
@@ -80,3 +80,12 @@ Metrics
:members:
:special-members:
:show-inheritance:
+
+Deep Learning
+-------------
+.. automodule:: gudhi.representations.perslay
+ :members:
+ :special-members:
+ :show-inheritance:
+
+.. include:: perslay_params.md
diff --git a/src/python/doc/representations_sum.inc b/src/python/doc/representations_sum.inc
index 4298aea9..430e1c4e 100644
--- a/src/python/doc/representations_sum.inc
+++ b/src/python/doc/representations_sum.inc
@@ -1,14 +1,15 @@
.. table::
:widths: 30 40 30
- +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------+
- | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière, Martin Royer |
- | img/sklearn-tda.png | diagrams, compatible with scikit-learn. | |
- | | | :Since: GUDHI 3.1.0 |
- | | | |
- | | | :License: MIT |
- | | | |
- | | | :Requires: `Scikit-learn <installation.html#scikit-learn>`_ |
- +------------------------------------------------------------------+----------------------------------------------------------------+-------------------------------------------------------------+
- | * :doc:`representations` |
- +------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------+
+ +------------------------------------------------------------------+----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------+
+ | .. figure:: | Vectorizations, distances and kernels that work on persistence | :Author: Mathieu Carrière, Martin Royer |
+ | img/sklearn-tda.png | diagrams, compatible with scikit-learn and tensorflow. | |
+ | | | :Since: GUDHI 3.1.0 |
+ | | | |
+ | | | :License: MIT |
+ | | | |
+ | | | :Requires: `Scikit-learn <installation.html#scikit-learn>`_, `TensorFlow 2 <installation.html#tensorflow>`_|
+ | | | |
+ +------------------------------------------------------------------+----------------------------------------------------------------+------------------------------------------------------------------------------------------------------------+
+ | * :doc:`representations` |
+ +------------------------------------------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/python/gudhi/representations/perslay.py b/src/python/gudhi/representations/perslay.py
index f836a616..5e522fad 100644
--- a/src/python/gudhi/representations/perslay.py
+++ b/src/python/gudhi/representations/perslay.py
@@ -83,7 +83,7 @@ class PerslayModel(tf.keras.Model):
Attributes:
name (string): name of the layer. Used for naming variables.
diagdim (integer): dimension of persistence diagram points. Usually 2 but can handle more.
- perslay_parameters (dict): dictionary containing the PersLay parameters. See file perslay_params.md
+ perslay_parameters (dict): dictionary containing the PersLay parameters. See below.
rho (TensorFlow model): layers used to process the learned representations of persistence diagrams (for instance, a fully connected layer that outputs the number of classes). Use the string "identity" if you want to output the representations directly.
"""
def __init__(self, name, diagdim, perslay_parameters, rho):
@@ -165,7 +165,7 @@ class PerslayModel(tf.keras.Model):
elif layer == "RationalHat":
LMinit, LRinit = plp["lmean_init"], plp["lr_init"]
LMiv = LMinit if not callable(LMinit) else LMinit([self.diagdim, plp["lnum"]])
- LRiv = LRinit if not callable(LRinit) else LVinit([1])
+ LRiv = LRinit if not callable(LRinit) else LRinit([1])
LM = tf.Variable(name=Lname+"-M", initial_value=LMiv, trainable=Ltrain)
LR = tf.Variable(name=Lname+"-R", initial_value=LRiv, trainable=Ltrain)
self.vars[nf].append([LM, LR])
@@ -217,11 +217,11 @@ class PerslayModel(tf.keras.Model):
elif plp["layer"] == "Image":
tensor_diag = _image_layer(tensor_diag, plp["image_size"], plp["image_bnds"], lvars)
elif plp["layer"] == "Exponential":
- tensor_diag = _exponential_layer(tensor_diag, **lvars)
+ tensor_diag = _exponential_layer(tensor_diag, lvars[0], lvars[1])
elif plp["layer"] == "Rational":
- tensor_diag = _rational_layer(tensor_diag, **lvars)
+ tensor_diag = _rational_layer(tensor_diag, lvars[0], lvars[1], lvars[2])
elif plp["layer"] == "RationalHat":
- tensor_diag = _rational_hat_layer(tensor_diag, plp["q"], **lvars)
+ tensor_diag = _rational_hat_layer(tensor_diag, plp["q"], lvars[0], lvars[1])
# Apply weight
output_dim = len(tensor_diag.shape) - 2
diff --git a/src/python/gudhi/representations/perslay_params.md b/src/python/gudhi/representations/perslay_params.md
deleted file mode 100644
index 7cc9caf3..00000000
--- a/src/python/gudhi/representations/perslay_params.md
+++ /dev/null
@@ -1,103 +0,0 @@
-In the following description of PersLay parameters, each parameter, or dictionary key, that contains `_init` in its name is optimized and learned by PersLay during training. If you do not want to optimize the vectorization, set the keys **train_vect** and **train_weight** to False.
-
- * The following keys are mandatory:
-
- | **name** | **description** |
- | --- | --- |
- | **layer** | Either "PermutationEquivariant", "Image", "Landscape", "BettiCurve", "Entropy", "Exponential", "Rational" or "RationalHat". Type of the PersLay layer. "Image" is for [persistence images](https://arxiv.org/abs/1507.06217), "Landscape" is for [persistence landscapes](http://www.jmlr.org/papers/volume16/bubenik15a/bubenik15a.pdf), "Exponential", "Rational" and "RationalHat" are for [structure elements](http://jmlr.org/beta/papers/v20/18-358.html), "PermutationEquivariant" is for the original DeepSet layer, defined in [this article](https://arxiv.org/abs/1703.06114), "BettiCurve" is for [Betti curves](https://www.jstage.jst.go.jp/article/tjsai/32/3/32_D-G72/_pdf) and "Entropy" is for [entropy](https://arxiv.org/abs/1803.08304). |
- | **perm_op** | Either "sum", "mean", "max", "topk". Permutation invariant operation. |
- | **keep** | Number of top values to keep. Used only if **perm_op** is "topk". |
- | **pweight** | Either "power", "grid", "gmix" or None. Weight function to be applied on persistence diagram points. If "power", this function is a (trainable) coefficient times the distances to the diagonal of the points to a certain power. If "grid", this function is piecewise-constant and defined with pixel values of a grid. If "gmix", this function is defined as a mixture of Gaussians. If None, no weighting is applied. |
- | **final_model** | A Tensorflow / Keras model used to postprocess the persistence diagrams in each channel. Use "identity" if you don't want to postprocess. |
-
-Depending on what **pweight** is, the following additional keys are requested:
-
- * if **pweight** is "power":
-
- | **name** | **description** |
- | --- | --- |
- | **pweight_init** | Initializer of the coefficient of the power weight function. It can be either a single value, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). |
- | **pweight_power** | Integer used for exponentiating the distances to the diagonal of the persistence diagram points. |
-
- * if **pweight** is "grid":
-
- | **name** | **description** |
- | --- | --- |
- | **pweight_size** | Grid size of the grid weight function. It is a tuple of integer values, such as (10,10). |
- | **pweight_bnds** | Grid boundaries of the grid weight function. It is a tuple containing two tuples, each containing the minimum and maximum values of each axis of the plane. Example: ((-0.01, 1.01), (-0.01, 1.01)). |
- | **pweight_init** | Initializer for the pixel values of the grid weight function. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.).|
-
- * if **pweight** is "gmix":
-
- | **name** | **description** |
- | --- | --- |
- | **pweight_num** | Number of Gaussian functions of the mixture of Gaussians weight function. |
- | **pweight_init** | Initializer of the means and variances of the mixture of Gaussians weight function. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). |
-
-Depending on what **layer** is, the following additional keys are requested:
-
- * if **layer** is "PermutationEquivariant":
-
- | **name** | **description** |
- | --- | --- |
- | **lpeq** | Sequence of permutation equivariant operations, as defined in [the DeepSet article](). It is a list of tuples of the form (*dim*, *operation*). Each tuple defines a permutation equivariant function of dimension *dim* and second permutation operation *operation* (string, either "max", "min", "sum" or None). Second permutation operation is optional and is not applied if *operation* is set to None. Example: [(150, "max"), (75, None)]. |
- | **lweight_init** | Initializer for the weight matrices of the permutation equivariant operations. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.).|
- | **lbias_init** | Initializer for the biases of the permutation equivariant operations. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). |
- | **lgamma_init** | Initializer for the Gamma matrices of the permutation equivariant operations. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.).|
-
- * if **layer** is "Image":
-
- | **name** | **description** |
- | --- | --- |
- | **image_size** | Persistence image size. It is a tuple of integer values, such as (10,10). |
- | **image_bnds** | Persistence image boundaries. It is a tuple containing two tuples, each containing the minimum and maximum values of each axis of the plane. Example: ((-0.01, 1.01), (-0.01, 1.01)). |
- | **lvariance_init** | Initializer for the bandwidths of the Gaussian functions centered on the persistence image pixels. It can be either a single value, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 3.). |
-
- * if **layer** is "Landscape":
-
- | **name** | **description** |
- | --- | --- |
- | **lsample_num** | Number of samples of the diagonal that will be evaluated on the persistence landscapes. |
- | **lsample_init** | Initializer of the samples of the diagonal. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). |
-
- * if **layer** is "BettiCurve":
-
- | **name** | **description** |
- | --- | --- |
- | **lsample_num** | Number of samples of the diagonal that will be evaluated on the Betti curves. |
- | **lsample_init** | Initializer of the samples of the diagonal. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). |
- | **theta** | Sigmoid parameter used for approximating the piecewise constant functions associated to the persistence diagram points. |
-
- * if **layer** is "Entropy":
-
- | **name** | **description** |
- | --- | --- |
- | **lsample_num** | Number of samples on the diagonal that will be evaluated on the persistence entropies. |
- | **lsample_init** | Initializer of the samples of the diagonal. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). |
- | **theta** | Sigmoid parameter used for approximating the piecewise constant functions associated to the persistence diagram points. |
-
- * if **layer** is "Exponential":
-
- | **name** | **description** |
- | --- | --- |
- | **lnum** | Number of exponential structure elements that will be evaluated on the persistence diagram points. |
- | **lmean_init** | Initializer of the means of the exponential structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). |
- | **lvariance_init** | Initializer of the bandwidths of the exponential structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). |
-
- * if **layer** is "Rational":
-
- | **name** | **description** |
- | --- | --- |
- | **lnum** | Number of rational structure elements that will be evaluated on the persistence diagram points. |
- | **lmean_init** | Initializer of the means of the rational structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). |
- | **lvariance_init** | Initializer of the bandwidths of the rational structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). |
- | **lalpha_init** | Initializer of the exponents of the rational structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). |
-
- * if **layer** is "RationalHat":
-
- | **name** | **description** |
- | --- | --- |
- | **lnum** | Number of rational hat structure elements that will be evaluated on the persistence diagram points. |
- | **lmean_init** | Initializer of the means of the rational hat structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(0., 1.). |
- | **lr_init** | Initializer of the threshold of the rational hat structure elements. It can be either a numpy array of values, or a random initializer from tensorflow, such as tensorflow.random_uniform_initializer(3., 3.). |
- | **q** | Norm parameter. |
diff --git a/src/python/test/test_perslay.py b/src/python/test/test_perslay.py
index 72b02944..d20bfe14 100644
--- a/src/python/test/test_perslay.py
+++ b/src/python/test/test_perslay.py
@@ -6,8 +6,14 @@ import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from tensorflow import random_uniform_initializer as rui
+my_devices = tf.config.experimental.list_physical_devices(device_type='CPU')
+tf.config.experimental.set_visible_devices(devices=my_devices, device_type='CPU')
+tf.config.experimental.set_visible_devices([], 'GPU')
+
from gudhi.representations import DiagramScaler, Padding, PerslayModel
+np.random.seed(0)
+gauss_init = np.array(np.vstack([np.random.uniform(0.,10.,[2,3]), 1e-5*np.ones([2,3])]), dtype=np.float32)
def test_perslay_image():
@@ -18,20 +24,21 @@ def test_perslay_image():
diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32)
perslayParameters = {}
- perslayParameters["pweight"] = None
- perslayParameters["perm_op"] = "sum"
perslayParameters["layer"] = "Image"
perslayParameters["layer_train"] = False
perslayParameters["image_size"] = (2,2)
perslayParameters["image_bnds"] = ((-.501, 1.501), (-.501, 1.501))
perslayParameters["lvariance_init"] = .1
+
+ perslayParameters["pweight"] = None
+ perslayParameters["perm_op"] = "sum"
+
perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()])
model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity")
vector = model([diagrams, empty_feats]).numpy()
-
assert vector.shape == (1,4)
assert np.abs(vector-np.array([[0,0,5.6e-5,3.3668644]])).sum() <= 1e-6
-
+
def test_perslay_landscape():
diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])]
@@ -41,17 +48,179 @@ def test_perslay_landscape():
diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32)
perslayParameters = {}
+ perslayParameters["layer"] = "Landscape"
+ perslayParameters["layer_train"] = False
+ perslayParameters["lsample_num"] = 3
+ perslayParameters["lsample_init"] = np.array(np.arange(-.1,1.1,.5), dtype=np.float32)
+ perslayParameters["final_model"] = "identity"
+
perslayParameters["pweight"] = None
perslayParameters["perm_op"] = "topk"
perslayParameters["keep"] = 3
- perslayParameters["layer"] = "Landscape"
+
+ model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity")
+ vector = model([diagrams, empty_feats]).numpy()
+ assert vector.shape == (1,9)
+ assert np.abs(vector-np.array([[0.,0.,0.,0.1,0.025,0.,0.1,0.1,0.]])).sum() <= 1e-6
+
+ perslayParameters["pweight"] = "power"
+ perslayParameters["pweight_power"] = 2
+ perslayParameters["pweight_init"] = 1.
+ perslayParameters["pweight_train"] = False
+ perslayParameters["perm_op"] = "sum"
+
+ model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity")
+ vector = model([diagrams, empty_feats]).numpy()
+ assert vector.shape == (1,3)
+ assert np.abs(vector-np.array([[0., 0.03476562, 0.04531251]])).sum() <= 1e-6
+
+def test_perslay_betti():
+
+ diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])]
+ diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag)
+ diag = Padding(use=True).fit_transform(diag)
+ D = np.stack(np.array(diag, dtype=np.float32), 0)
+ diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32)
+ perslayParameters = {}
+
+ perslayParameters["layer"] = "BettiCurve"
perslayParameters["layer_train"] = False
perslayParameters["lsample_num"] = 3
perslayParameters["lsample_init"] = np.array(np.arange(-.1,1.1,.5), dtype=np.float32)
+ perslayParameters["theta"] = 1.
perslayParameters["final_model"] = "identity"
+
+ perslayParameters["pweight"] = "grid"
+ perslayParameters["pweight_size"] = [100,100]
+ perslayParameters["pweight_bnds"] = ((-.001, 10.001), (-.001, 10.001))
+ perslayParameters["pweight_init"] = np.tile(np.arange(0.,100.,1, dtype=np.float32)[np.newaxis,:], [100,1])
+ perslayParameters["pweight_train"] = False
+ perslayParameters["perm_op"] = "sum"
model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity")
vector = model([diagrams, empty_feats]).numpy()
+ assert vector.shape == (1,3)
+ assert np.abs(vector-np.array([[10.091741, 12.746357, 13.192123]])).sum() <= 1e-6
- assert vector.shape == (1,9)
- assert np.abs(vector-np.array([[0.,0.,0.,0.1,0.025,0.,0.1,0.1,0.]])).sum() <= 1e-6
+def test_perslay_entropy():
+
+ diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])]
+ diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag)
+ diag = Padding(use=True).fit_transform(diag)
+ D = np.stack(np.array(diag, dtype=np.float32), 0)
+ diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32)
+ perslayParameters = {}
+
+ perslayParameters["layer"] = "Entropy"
+ perslayParameters["layer_train"] = False
+ perslayParameters["lsample_num"] = 3
+ perslayParameters["lsample_init"] = np.array(np.arange(-.1,1.1,.5), dtype=np.float32)
+ perslayParameters["theta"] = 1.
+ perslayParameters["final_model"] = "identity"
+
+ perslayParameters["pweight"] = "gmix"
+ perslayParameters["pweight_num"] = 3
+ perslayParameters["pweight_init"] = gauss_init
+ perslayParameters["pweight_train"] = False
+ perslayParameters["perm_op"] = "sum"
+
+ model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity")
+ vector = model([diagrams, empty_feats]).numpy()
+ assert vector.shape == (1,3)
+ assert np.abs(vector-np.array([[1.4855406, 1.7884576, 1.6987829]])).sum() <= 1e-6
+
+def test_perslay_rational():
+
+ diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])]
+ diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag)
+ diag = Padding(use=True).fit_transform(diag)
+ D = np.stack(np.array(diag, dtype=np.float32), 0)
+ diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32)
+ perslayParameters = {}
+
+ perslayParameters["layer"] = "Rational"
+ perslayParameters["layer_train"] = False
+ perslayParameters["lnum"] = 3
+ perslayParameters["lmean_init"] = gauss_init[:2,:]
+ perslayParameters["lvariance_init"] = gauss_init[2:,:]
+ perslayParameters["lalpha_init"] = rui(1., 1.)
+
+ perslayParameters["pweight"] = "power"
+ perslayParameters["pweight_power"] = 2
+ perslayParameters["pweight_init"] = 1.
+ perslayParameters["pweight_train"] = False
+ perslayParameters["perm_op"] = "sum"
+
+ perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()])
+ model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity")
+ vector = model([diagrams, empty_feats]).numpy()
+ assert vector.shape == (1,3)
+ assert np.abs(vector-np.array([[0.7186792, 0.7186759, 0.718668]])).sum() <= 1e-6
+ perslayParameters["layer"] = "RationalHat"
+ perslayParameters["layer_train"] = False
+ perslayParameters["lnum"] = 3
+ perslayParameters["q"] = 1.
+ perslayParameters["lmean_init"] = gauss_init[:2,:]
+ perslayParameters["lr_init"] = rui(1., 1.)
+
+ perslayParameters["pweight"] = "power"
+ perslayParameters["pweight_power"] = 2
+ perslayParameters["pweight_init"] = 1.
+ perslayParameters["pweight_train"] = False
+ perslayParameters["perm_op"] = "sum"
+
+ perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()])
+ model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity")
+ vector = model([diagrams, empty_feats]).numpy()
+ assert vector.shape == (1,3)
+ assert np.abs(vector-np.array([[-0.00675799, -0.00620097, -0.00510298]])).sum() <= 1e-6
+
+def test_perslay_exponential():
+
+ diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])]
+ diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag)
+ diag = Padding(use=True).fit_transform(diag)
+ D = np.stack(np.array(diag, dtype=np.float32), 0)
+ diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32)
+ perslayParameters = {}
+
+ perslayParameters["layer"] = "Exponential"
+ perslayParameters["layer_train"] = False
+ perslayParameters["lnum"] = 3
+ perslayParameters["lmean_init"] = 1e3 * gauss_init[:2,:]
+ perslayParameters["lvariance_init"] = gauss_init[2:,:]
+
+ perslayParameters["pweight"] = None
+ perslayParameters["perm_op"] = "max"
+
+ perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()])
+ model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity")
+ vector = model([diagrams, empty_feats]).numpy()
+ assert vector.shape == (1,3)
+ assert np.abs(vector-np.array([[0.9940388, 0.99311596, 0.99222755]])).sum() <= 1e-6
+
+def test_perslay_peq():
+
+ diag = [np.array([[0.,4.],[1.,2.],[3.,8.],[6.,8.]])]
+ diag = DiagramScaler(use=True, scalers=[([0,1], MinMaxScaler())]).fit_transform(diag)
+ diag = Padding(use=True).fit_transform(diag)
+ D = np.stack(np.array(diag, dtype=np.float32), 0)
+ diagrams, empty_feats = [D], np.empty([1,0], dtype=np.float32)
+ perslayParameters = {}
+
+ perslayParameters["layer"] = "PermutationEquivariant"
+ perslayParameters["layer_train"] = False
+ perslayParameters["lpeq"] = [(5, "sum"), (5, "sum")]
+ perslayParameters["lweight_init"] = rui(1e-1, 1e-1)
+ perslayParameters["lbias_init"] = rui(0.1, 0.1)
+ perslayParameters["lgamma_init"] = rui(1e-1, 1e-1)
+
+ perslayParameters["pweight"] = None
+ perslayParameters["perm_op"] = "topk"
+ perslayParameters["keep"] = 3
+
+ perslayParameters["final_model"] = tf.keras.Sequential([tf.keras.layers.Flatten()])
+ model = PerslayModel(name="perslay", diagdim=2, perslay_parameters=[perslayParameters], rho="identity")
+ vector = model([diagrams, empty_feats]).numpy()
+ assert vector.shape == (1,15)
+ assert np.abs(vector-np.array([[0.4375, 0.41875, 0.375, 0.4375, 0.41875, 0.375, 0.4375, 0.41875, 0.375, 0.4375, 0.41875, 0.375, 0.4375, 0.41875, 0.375]])).sum() <= 1e-6