From 951e23a37eb12eaa0e804c7d3d5b4e135c415691 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 6 Jul 2020 17:35:55 +0200 Subject: adding essential parts management in wasserstein distance --- src/python/gudhi/wasserstein/wasserstein.py | 146 ++++++++++++++++++++++++++-- 1 file changed, 138 insertions(+), 8 deletions(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index b37d30bb..283ecd9d 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -79,6 +79,9 @@ def _perstot(X, order, internal_p, enable_autodiff): transparent to automatic differentiation. :type enable_autodiff: bool :returns: float, the total persistence of the diagram (that is, its distance to the empty diagram). + + .. note:: + Can be +infty if the diagram has an essential part (points with infinite coordinates). ''' if enable_autodiff: import eagerpy as ep @@ -88,32 +91,136 @@ def _perstot(X, order, internal_p, enable_autodiff): return np.linalg.norm(_dist_to_diag(X, internal_p), ord=order) -def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enable_autodiff=False): +def _get_essential_parts(a): + ''' + :param a: (n x 2) numpy.array (point of a diagram) + :retuns: five lists of indices (between 0 and len(a)) accounting for the five types of points with infinite + coordinates that can occur in a diagram, namely: + type0 : (-inf, finite) + type1 : (finite, +inf) + type2 : (-inf, +inf) + type3 : (-inf, -inf) + type4 : (+inf, +inf) + .. note:: + For instance, a[_get_essential_parts(a)[0]] returns the points in a of coordinates (-inf, x) for some finite x. + ''' + if len(a): + ess_first_type = np.where(np.isfinite(a[:,1]) & (a[:,0] == -np.inf))[0] # coord (-inf, x) + ess_second_type = np.where(np.isfinite(a[:,0]) & (a[:,1] == np.inf))[0] # coord (x, +inf) + ess_third_type = np.where((a[:,0] == -np.inf) & (a[:,1] == np.inf))[0] # coord (-inf, +inf) + ess_fourth_type = np.where((a[:,0] == -np.inf) & (a[:,1] == -np.inf))[0] # coord (-inf, -inf) + ess_fifth_type = np.where((a[:,0] == np.inf) & (a[:,1] == np.inf))[0] # coord (+inf, +inf) + return ess_first_type, ess_second_type, ess_third_type, ess_fourth_type, ess_fifth_type + else: + return [], [], [], [], [] + + +def _cost_and_match_essential_parts(X, Y, idX, idY, order, axis): + ''' + :param X: (n x 2) numpy.array (dgm points) + :param Y: (n x 2) numpy.array (dgm points) + :param idX: indices to consider for this one dimensional OT problem (in X) + :param idY: indices to consider for this one dimensional OT problem (in Y) + :param order: exponent for Wasserstein distanc ecomputation + :param axis: must be 0 or 1, correspond to the coordinate which is finite. + :returns: cost (float) and match for points with *one* infinite coordinate. + + .. note:: + Assume idX, idY come when calling _handle_essential_parts, thus have same length. ''' - :param X: (n x 2) numpy.array encoding the (finite points of the) first diagram. Must not contain essential points - (i.e. with infinite coordinate). + u = X[idX, axis] + v = Y[idY, axis] + + cost = np.sum(np.abs(np.sort(u) - np.sort(v))**(order)) # OT cost in 1D + + sortidX = idX[np.argsort(u)] + sortidY = idY[np.argsort(v)] + # We return [i,j] sorted per value, and then [i, -1] (or [-1, j]) to account for essential points matched to the diagonal + match = list(zip(sortidX, sortidY)) + + return cost, match + + +def _handle_essential_parts(X, Y, order): + ''' + :param X: (n x 2) numpy array, first diagram. + :param Y: (n x 2) numpy array, second diagram. + :order: Wasserstein order for cost computation. + :returns: cost and matching due to essential parts. If cost is +inf, matching will be set to None. + ''' + c = 0 + m = [] + + ess_parts_X = _get_essential_parts(X) + ess_parts_Y = _get_essential_parts(Y) + + # Treats the case of infinite cost (cardinalities of essential parts differ). + for u, v in zip(ess_parts_X, ess_parts_Y): + if len(u) != len(v): + return np.inf, None + + # Now we know each essential part has the same number of points in both diagrams. + # Handle type 0 and type 1 essential parts (those with one finite coordinates) + c1, m1 = _cost_and_match_essential_parts(X, Y, ess_parts_X[0], ess_parts_Y[0], axis=1, order=order) + c2, m2 = _cost_and_match_essential_parts(X, Y, ess_parts_X[1], ess_parts_Y[1], axis=0, order=order) + + c += c1 + c2 + m += m1 + m2 + + # Handle type >= 2 (both coordinates are infinite, so we essentially just align points) + for u, v in zip(ess_parts_X[2:], ess_parts_Y[2:]): + m += list(zip(u, v)) # cost is 0 + + return c, np.array(m) + + +def _offdiag(X): + ''' + :param X: (n x 2) numpy array encoding a persistence diagram. + :returns: The off-diagonal part of a diagram `X` (points with finite coordinates). + ''' + return X[np.where(np.isfinite(X[:,0]) & np.isfinite(X[:,1]))] + + +def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enable_autodiff=False, + keep_essential_parts=True): + ''' + :param X: (n x 2) numpy.array encoding the first diagram. Can now contain essential parts (points with infinite + coordinates). :param Y: (m x 2) numpy.array encoding the second diagram. :param matching: if True, computes and returns the optimal matching between X and Y, encoded as a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to the j-th point in Y, with the convention (-1) represents the diagonal. + Note that if the cost is +inf (essential parts have different number of points, + then the optimal matching will be set to `None`. :param order: exponent for Wasserstein; Default value is 1. :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); - Default value is `np.inf`. + default value is `np.inf`. :param enable_autodiff: If X and Y are torch.tensor, tensorflow.Tensor or jax.numpy.ndarray, make the computation transparent to automatic differentiation. This requires the package EagerPy and is currently incompatible - with `matching=True`. + with `matching=True` and with `keep_essential_parts=True`. .. note:: This considers the function defined on the coordinates of the off-diagonal points of X and Y and lets the various frameworks compute its gradient. It never pulls new points from the diagonal. :type enable_autodiff: bool + :param keep_essential_parts: If False, only considers the off-diagonal points in the diagrams. + Otherwise, computes the distance between the essential parts separately. + :type keep_essential_parts: bool :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with respect to the internal_p-norm as ground metric. If matching is set to True, also returns the optimal matching between X and Y. + If cost is +inf, any matching is optimal and thus it returns `None` instead. ''' + # Zeroth step: check compatibility of arguments + if keep_essential_parts and enable_autodiff: + import warnings + warnings.warn("enable_autodiff does not handle essential parts yet. These will be ignored in the following computations") + keep_essential_parts = False + + # First step: handle empty diagrams n = len(X) m = len(Y) - # handle empty diagrams if n == 0: if m == 0: if not matching: @@ -132,6 +239,25 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab else: return _perstot(X, order, internal_p, enable_autodiff), np.array([[i, -1] for i in range(n)]) + + # Second step: handle essential parts + if keep_essential_parts: + essential_cost, essential_matching = _handle_essential_parts(X, Y, order=order) + if (essential_cost == np.inf): + if matching: + return np.inf, None + else: + return np.inf # avoid computing off-diagonal transport cost if essential parts do not match (saves time) + + else: + essential_cost = 0 + essential_matching = None + + X, Y = _offdiag(X), _offdiag(Y) + n = len(X) + m = len(Y) + + # Now the standard pipeline for off-diagonal parts if enable_autodiff: import eagerpy as ep @@ -139,6 +265,7 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab Y_orig = ep.astensor(Y) X = X_orig.numpy() Y = Y_orig.numpy() + M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p) a = np.ones(n+1) # weight vector of the input diagram. Uniform here. a[-1] = m @@ -154,7 +281,10 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab # Now we turn to -1 points encoding the diagonal match[:,0][match[:,0] >= n] = -1 match[:,1][match[:,1] >= m] = -1 - return ot_cost ** (1./order) , match + # Finally incorporate the essential part matching + if essential_matching is not None: + match = np.concatenate([match, essential_matching]) if essential_matching.size else match + return (ot_cost + essential_cost) ** (1./order) , match if enable_autodiff: P = ot.emd(a=a, b=b, M=M, numItermax=2000000) @@ -178,4 +308,4 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab # The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value? ot_cost = ot.emd2(a, b, M, numItermax=2000000) - return ot_cost ** (1./order) + return (ot_cost + essential_cost) ** (1./order) -- cgit v1.2.3 From 91a9d77ed48847a8859e6bdd759390001910d411 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 6 Jul 2020 17:52:47 +0200 Subject: update doc (examples) with essential parts --- src/python/doc/wasserstein_distance_user.rst | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index 96ec7872..d747344b 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -44,7 +44,7 @@ Basic example ************* This example computes the 1-Wasserstein distance from 2 persistence diagrams with Euclidean ground metric. -Note that persistence diagrams must be submitted as (n x 2) numpy arrays and must not contain inf values. +Note that persistence diagrams must be submitted as (n x 2) numpy arrays. .. testcode:: @@ -67,14 +67,16 @@ We can also have access to the optimal matching by letting `matching=True`. It is encoded as a list of indices (i,j), meaning that the i-th point in X is mapped to the j-th point in Y. An index of -1 represents the diagonal. +It handles essential parts (points with infinite coordinates). However if the cardinalities of the essential parts differ, +any matching has a cost +inf and thus can be considered to be optimal. In such a case, the function returns `(np.inf, None)`. .. testcode:: import gudhi.wasserstein import numpy as np - dgm1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974]]) - dgm2 = np.array([[2.8, 4.45], [5, 6], [9.5, 14.1]]) + dgm1 = np.array([[2.7, 3.7],[9.6, 14.],[34.2, 34.974], [3, np.inf]]) + dgm2 = np.array([[2.8, 4.45], [5, 6], [9.5, 14.1], [4, np.inf]]) cost, matchings = gudhi.wasserstein.wasserstein_distance(dgm1, dgm2, matching=True, order=1, internal_p=2) message_cost = "Wasserstein distance value = %.2f" %cost @@ -90,16 +92,30 @@ An index of -1 represents the diagonal. for j in dgm2_to_diagonal: print("point %s in dgm2 is matched to the diagonal" %j) + dgm3 = np.array([[1, 2], [0, np.inf]]) + dgm4 = np.array([[1, 2], [0, np.inf], [1, np.inf]]) + cost, matchings = gudhi.wasserstein.wasserstein_distance(dgm3, dgm4, matching=True, order=1, internal_p=2) + print("\nSecond example:") + print("cost:", cost) + print("matchings:", matchings) + + The output is: .. testoutput:: - Wasserstein distance value = 2.15 + Wasserstein distance value = 3.15 point 0 in dgm1 is matched to point 0 in dgm2 point 1 in dgm1 is matched to point 2 in dgm2 + point 3 in dgm1 is matched to point 3 in dgm2 point 2 in dgm1 is matched to the diagonal point 1 in dgm2 is matched to the diagonal + Second example: + cost: inf + matchings: None + + Barycenters ----------- -- cgit v1.2.3 From fe3e6a3a47828841ba3cb4a0721e5d8c16ab126f Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 6 Jul 2020 18:27:52 +0200 Subject: update test including essential parts --- src/python/gudhi/wasserstein/wasserstein.py | 18 +++++-- src/python/test/test_wasserstein_distance.py | 72 +++++++++++++++++++++++++--- 2 files changed, 78 insertions(+), 12 deletions(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 283ecd9d..2a1dee7a 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -105,10 +105,10 @@ def _get_essential_parts(a): For instance, a[_get_essential_parts(a)[0]] returns the points in a of coordinates (-inf, x) for some finite x. ''' if len(a): - ess_first_type = np.where(np.isfinite(a[:,1]) & (a[:,0] == -np.inf))[0] # coord (-inf, x) + ess_first_type = np.where(np.isfinite(a[:,1]) & (a[:,0] == -np.inf))[0] # coord (-inf, x) ess_second_type = np.where(np.isfinite(a[:,0]) & (a[:,1] == np.inf))[0] # coord (x, +inf) - ess_third_type = np.where((a[:,0] == -np.inf) & (a[:,1] == np.inf))[0] # coord (-inf, +inf) - ess_fourth_type = np.where((a[:,0] == -np.inf) & (a[:,1] == -np.inf))[0] # coord (-inf, -inf) + ess_third_type = np.where((a[:,0] == -np.inf) & (a[:,1] == np.inf))[0] # coord (-inf, +inf) + ess_fourth_type = np.where((a[:,0] == -np.inf) & (a[:,1] == -np.inf))[0] # coord (-inf, -inf) ess_fifth_type = np.where((a[:,0] == np.inf) & (a[:,1] == np.inf))[0] # coord (+inf, +inf) return ess_first_type, ess_second_type, ess_third_type, ess_fourth_type, ess_fifth_type else: @@ -232,12 +232,20 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab if not matching: return _perstot(Y, order, internal_p, enable_autodiff) else: - return _perstot(Y, order, internal_p, enable_autodiff), np.array([[-1, j] for j in range(m)]) + cost = _perstot(Y, order, internal_p, enable_autodiff) + if cost == np.inf: # We had some essential part here. + return cost, None + else: + return cost, np.array([[-1, j] for j in range(m)]) elif m == 0: if not matching: return _perstot(X, order, internal_p, enable_autodiff) else: - return _perstot(X, order, internal_p, enable_autodiff), np.array([[i, -1] for i in range(n)]) + cost = _perstot(X, order, internal_p, enable_autodiff) + if cost == np.inf: + return cost, None + else: + return np.array([[i, -1] for i in range(n)]) # Second step: handle essential parts diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 90d26809..24be228b 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -5,10 +5,11 @@ Copyright (C) 2019 Inria Modification(s): + - 2020/07 Théo Lacombe: Added tests about handling essential parts in diagrams. - YYYY/MM Author: Description of the modification """ -from gudhi.wasserstein.wasserstein import _proj_on_diag +from gudhi.wasserstein.wasserstein import _proj_on_diag, _offdiag, _handle_essential_parts from gudhi.wasserstein import wasserstein_distance as pot from gudhi.hera import wasserstein_distance as hera import numpy as np @@ -18,12 +19,62 @@ __author__ = "Theo Lacombe" __copyright__ = "Copyright (C) 2019 Inria" __license__ = "MIT" + def test_proj_on_diag(): dgm = np.array([[1., 1.], [1., 2.], [3., 5.]]) assert np.array_equal(_proj_on_diag(dgm), [[1., 1.], [1.5, 1.5], [4., 4.]]) empty = np.empty((0, 2)) assert np.array_equal(_proj_on_diag(empty), empty) + +def test_offdiag(): + diag = np.array([[0, 1], [3, 5], [2, np.inf], [3, np.inf], [-np.inf, 8], [-np.inf, 12], [-np.inf, -np.inf], + [np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]]) + assert np.array_equal(_offdiag(diag), [[0, 1], [3, 5]]) + + +def test_handle_essential_parts(): + diag1 = np.array([[0, 1], [3, 5], + [2, np.inf], [3, np.inf], + [-np.inf, 8], [-np.inf, 12], + [-np.inf, -np.inf], + [np.inf, np.inf], + [-np.inf, np.inf], [-np.inf, np.inf]]) + + diag2 = np.array([[0, 2], [3, 5], + [2, np.inf], [4, np.inf], + [-np.inf, 8], [-np.inf, 11], + [-np.inf, -np.inf], + [np.inf, np.inf], + [-np.inf, np.inf], [-np.inf, np.inf]]) + + diag3 = np.array([[0, 2], [3, 5], + [2, np.inf], [4, np.inf], + [-np.inf, 8], [-np.inf, 11], + [-np.inf, -np.inf], [-np.inf, -np.inf], + [np.inf, np.inf], + [-np.inf, np.inf], [-np.inf, np.inf]]) + + c, m = _handle_essential_parts(diag1, diag2, matching=True, order=1) + assert c == pytest.approx(3, 0.0001) + assert np.array_equal(m, [[0,0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9]]) + c, m = _handle_essential_parts(diag1, diag3, matching=True, order=1) + assert c == np.inf + assert (m is None) + + +def test_get_essential_parts(): + diag = np.array([[0, 1], [3, 5], [2, np.inf], [3, np.inf], [-np.inf, 8], [-np.inf, 12], [-np.inf, -np.inf], + [np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]]) + + res = _get_essential_parts(diag) + assert res[0] = [4, 5] + assert res[1] = [2, 3] + assert res[2] = [8, 9] + assert res[3] = [6] + assert res[4] = [7] + + def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_matching=True): diag1 = np.array([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]]) diag2 = np.array([[2.8, 4.45], [9.5, 14.1]]) @@ -64,7 +115,7 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_mat assert wasserstein_distance(diag4, diag5) == np.inf assert wasserstein_distance(diag5, diag6, order=1, internal_p=np.inf) == approx(4.) - + assert wasserstein_distance(diag5, emptydiag) == np.inf if test_matching: match = wasserstein_distance(emptydiag, emptydiag, matching=True, internal_p=1., order=2)[1] @@ -78,6 +129,13 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_mat match = wasserstein_distance(diag1, diag2, matching=True, internal_p=2., order=2.)[1] assert np.array_equal(match, [[0, 0], [1, 1], [2, -1]]) + if test_matching and test_infinity: + diag7 = np.array([[0, 3], [4, np.inf], [5, np.inf]]) + + match = wasserstein_distance(diag5, diag6, matching=True, internal_p=2., order=2.)[1] + assert np.array_equal(match, [[0, -1], [-1,0], [-1, 1], [1, 2]]) + match = wasserstein_distance(diag5, diag7, matching=True, internal_p=2., order=2.)[1] + assert (match is None) def hera_wrap(**extra): @@ -92,7 +150,7 @@ def pot_wrap(**extra): def test_wasserstein_distance_pot(): _basic_wasserstein(pot, 1e-15, test_infinity=False, test_matching=True) - _basic_wasserstein(pot_wrap(enable_autodiff=True), 1e-15, test_infinity=False, test_matching=False) + _basic_wasserstein(pot_wrap(enable_autodiff=True, keep_essential_parts=False), 1e-15, test_infinity=False, test_matching=False) def test_wasserstein_distance_hera(): _basic_wasserstein(hera_wrap(delta=1e-12), 1e-12, test_matching=False) @@ -105,19 +163,19 @@ def test_wasserstein_distance_grad(): diag2 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True) diag3 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True) assert diag1.grad is None and diag2.grad is None and diag3.grad is None - dist12 = pot(diag1, diag2, internal_p=2, order=2, enable_autodiff=True) - dist30 = pot(diag3, torch.tensor([]), internal_p=2, order=2, enable_autodiff=True) + dist12 = pot(diag1, diag2, internal_p=2, order=2, enable_autodiff=True, keep_essential_parts=False) + dist30 = pot(diag3, torch.tensor([]), internal_p=2, order=2, enable_autodiff=True, keep_essential_parts=False) dist12.backward() dist30.backward() assert not torch.isnan(diag1.grad).any() and not torch.isnan(diag2.grad).any() and not torch.isnan(diag3.grad).any() diag4 = torch.tensor([[0., 10.]], requires_grad=True) diag5 = torch.tensor([[1., 11.], [3., 4.]], requires_grad=True) - dist45 = pot(diag4, diag5, internal_p=1, order=1, enable_autodiff=True) + dist45 = pot(diag4, diag5, internal_p=1, order=1, enable_autodiff=True, keep_essential_parts=False) assert dist45 == 3. dist45.backward() assert np.array_equal(diag4.grad, [[-1., -1.]]) assert np.array_equal(diag5.grad, [[1., 1.], [-1., 1.]]) diag6 = torch.tensor([[5., 10.]], requires_grad=True) - pot(diag6, diag6, internal_p=2, order=2, enable_autodiff=True).backward() + pot(diag6, diag6, internal_p=2, order=2, enable_autodiff=True, keep_essential_parts=False).backward() # https://github.com/jonasrauber/eagerpy/issues/6 # assert np.array_equal(diag6.grad, [[0., 0.]]) -- cgit v1.2.3 From e0eba14109e02676825f8c24563872a5b49c6120 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 7 Jul 2020 11:52:35 +0200 Subject: correction typo in test wdist --- src/python/gudhi/wasserstein/wasserstein.py | 2 +- src/python/test/test_wasserstein_distance.py | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 2a1dee7a..009c1bf7 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -245,7 +245,7 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab if cost == np.inf: return cost, None else: - return np.array([[i, -1] for i in range(n)]) + return cost, np.array([[i, -1] for i in range(n)]) # Second step: handle essential parts diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 24be228b..e50091e9 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -55,10 +55,10 @@ def test_handle_essential_parts(): [np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]]) - c, m = _handle_essential_parts(diag1, diag2, matching=True, order=1) + c, m = _handle_essential_parts(diag1, diag2, order=1) assert c == pytest.approx(3, 0.0001) assert np.array_equal(m, [[0,0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9]]) - c, m = _handle_essential_parts(diag1, diag3, matching=True, order=1) + c, m = _handle_essential_parts(diag1, diag3, order=1) assert c == np.inf assert (m is None) @@ -68,11 +68,11 @@ def test_get_essential_parts(): [np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]]) res = _get_essential_parts(diag) - assert res[0] = [4, 5] - assert res[1] = [2, 3] - assert res[2] = [8, 9] - assert res[3] = [6] - assert res[4] = [7] + assert res[0] == [4, 5] + assert res[1] == [2, 3] + assert res[2] == [8, 9] + assert res[3] == [6] + assert res[4] == [7] def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_matching=True): -- cgit v1.2.3 From 42a399c273fde7c76ec23d2993957fcbb492ee79 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 7 Jul 2020 12:37:51 +0200 Subject: correction mistake in tests --- src/python/gudhi/wasserstein/wasserstein.py | 4 ++-- src/python/test/test_wasserstein_distance.py | 19 +++++++++++-------- 2 files changed, 13 insertions(+), 10 deletions(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 009c1bf7..981bbf08 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -214,7 +214,7 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab # Zeroth step: check compatibility of arguments if keep_essential_parts and enable_autodiff: import warnings - warnings.warn("enable_autodiff does not handle essential parts yet. These will be ignored in the following computations") + warnings.warn("enable_autodiff does not handle essential parts yet. keep_essential_parts set to False.") keep_essential_parts = False # First step: handle empty diagrams @@ -256,11 +256,11 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab return np.inf, None else: return np.inf # avoid computing off-diagonal transport cost if essential parts do not match (saves time) - else: essential_cost = 0 essential_matching = None + # Extract off-diaognal points of the diagrams. X, Y = _offdiag(X), _offdiag(Y) n = len(X) m = len(Y) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index e50091e9..285b95c9 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -9,12 +9,13 @@ - YYYY/MM Author: Description of the modification """ -from gudhi.wasserstein.wasserstein import _proj_on_diag, _offdiag, _handle_essential_parts +from gudhi.wasserstein.wasserstein import _proj_on_diag, _offdiag, _handle_essential_parts, _get_essential_parts from gudhi.wasserstein import wasserstein_distance as pot from gudhi.hera import wasserstein_distance as hera import numpy as np import pytest + __author__ = "Theo Lacombe" __copyright__ = "Copyright (C) 2019 Inria" __license__ = "MIT" @@ -56,8 +57,10 @@ def test_handle_essential_parts(): [-np.inf, np.inf], [-np.inf, np.inf]]) c, m = _handle_essential_parts(diag1, diag2, order=1) - assert c == pytest.approx(3, 0.0001) - assert np.array_equal(m, [[0,0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6], [7, 7], [8, 8], [9, 9]]) + assert c == pytest.approx(2, 0.0001) # Note: here c is only the cost due to essential part (thus 2, not 3) + # Similarly, the matching only corresponds to essential parts. + assert np.array_equal(m, [[4, 4], [5, 5], [2, 2], [3, 3], [8, 8], [9, 9], [6, 6], [7, 7]]) + c, m = _handle_essential_parts(diag1, diag3, order=1) assert c == np.inf assert (m is None) @@ -68,11 +71,11 @@ def test_get_essential_parts(): [np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]]) res = _get_essential_parts(diag) - assert res[0] == [4, 5] - assert res[1] == [2, 3] - assert res[2] == [8, 9] - assert res[3] == [6] - assert res[4] == [7] + assert np.array_equal(res[0], [4, 5]) + assert np.array_equal(res[1], [2, 3]) + assert np.array_equal(res[2], [8, 9]) + assert np.array_equal(res[3], [6] ) + assert np.array_equal(res[4], [7] ) def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_matching=True): -- cgit v1.2.3 From 107f8e6668509f5fd36e179f9a538b460d3941a9 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 7 Jul 2020 18:15:17 +0200 Subject: added enable autodiff management in _offdiag utils function --- src/python/gudhi/wasserstein/wasserstein.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 981bbf08..495142c4 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -70,6 +70,7 @@ def _perstot_autodiff(X, order, internal_p): ''' return _dist_to_diag(X, internal_p).norms.lp(order) + def _perstot(X, order, internal_p, enable_autodiff): ''' :param X: (n x 2) numpy.array (points of a given diagram). @@ -174,12 +175,18 @@ def _handle_essential_parts(X, Y, order): return c, np.array(m) -def _offdiag(X): +def _offdiag(X, enable_autodiff): ''' :param X: (n x 2) numpy array encoding a persistence diagram. :returns: The off-diagonal part of a diagram `X` (points with finite coordinates). ''' - return X[np.where(np.isfinite(X[:,0]) & np.isfinite(X[:,1]))] + if enable_autodiff: + import eagerpy as ep + + return ep.astensor(X[np.where(np.isfinite(X[:,0]) & np.isfinite(X[:,1]))]) + + else: + return X[np.where(np.isfinite(X[:,0]) & np.isfinite(X[:,1]))] def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enable_autodiff=False, @@ -261,7 +268,7 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab essential_matching = None # Extract off-diaognal points of the diagrams. - X, Y = _offdiag(X), _offdiag(Y) + X, Y = _offdiag(X, enable_autodiff), _offdiag(Y, enable_autodiff) n = len(X) m = len(Y) -- cgit v1.2.3 From e94892f972357283e70c7534f84662dfaa21cc3e Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 20 Jul 2020 11:41:13 +0200 Subject: update test enable_autodiff and _offdiag --- src/python/gudhi/wasserstein/wasserstein.py | 16 ++++++---------- src/python/test/test_wasserstein_distance.py | 2 +- 2 files changed, 7 insertions(+), 11 deletions(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 495142c4..142385b1 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -178,13 +178,13 @@ def _handle_essential_parts(X, Y, order): def _offdiag(X, enable_autodiff): ''' :param X: (n x 2) numpy array encoding a persistence diagram. + :param enable_autodiff: boolean, to handle the case where X is a eagerpy tensor. :returns: The off-diagonal part of a diagram `X` (points with finite coordinates). ''' if enable_autodiff: - import eagerpy as ep - - return ep.astensor(X[np.where(np.isfinite(X[:,0]) & np.isfinite(X[:,1]))]) - + # Assumes the diagrams only have finite coordinates. Thus, return X directly. + # TODO improve this to get rid of essential parts if there are any. + return X else: return X[np.where(np.isfinite(X[:,0]) & np.isfinite(X[:,1]))] @@ -218,11 +218,6 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab If matching is set to True, also returns the optimal matching between X and Y. If cost is +inf, any matching is optimal and thus it returns `None` instead. ''' - # Zeroth step: check compatibility of arguments - if keep_essential_parts and enable_autodiff: - import warnings - warnings.warn("enable_autodiff does not handle essential parts yet. keep_essential_parts set to False.") - keep_essential_parts = False # First step: handle empty diagrams n = len(X) @@ -267,7 +262,8 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab essential_cost = 0 essential_matching = None - # Extract off-diaognal points of the diagrams. + # Extract off-diaognal points of the diagrams. Note that if enable_autodiff is True, nothing is done here (X,Y are + # assumed to be tensors with only finite coordinates). X, Y = _offdiag(X, enable_autodiff), _offdiag(Y, enable_autodiff) n = len(X) m = len(Y) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 285b95c9..6701c7ba 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -31,7 +31,7 @@ def test_proj_on_diag(): def test_offdiag(): diag = np.array([[0, 1], [3, 5], [2, np.inf], [3, np.inf], [-np.inf, 8], [-np.inf, 12], [-np.inf, -np.inf], [np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]]) - assert np.array_equal(_offdiag(diag), [[0, 1], [3, 5]]) + assert np.array_equal(_offdiag(diag, enable_autodiff=False), [[0, 1], [3, 5]]) def test_handle_essential_parts(): -- cgit v1.2.3 From 89bb3d11064de40f2b4fda958aa2e2e8cfa5b489 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 12 Apr 2021 10:45:32 +0200 Subject: change name _offdiag to _finite_part --- src/python/gudhi/wasserstein/wasserstein.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 572d4249..d64d433e 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -175,11 +175,11 @@ def _handle_essential_parts(X, Y, order): return c, np.array(m) -def _offdiag(X, enable_autodiff): +def _finite_part(X, enable_autodiff): ''' :param X: (n x 2) numpy array encoding a persistence diagram. :param enable_autodiff: boolean, to handle the case where X is a eagerpy tensor. - :returns: The off-diagonal part of a diagram `X` (points with finite coordinates). + :returns: The finite part of a diagram `X` (points with finite coordinates). ''' if enable_autodiff: # Assumes the diagrams only have finite coordinates. Thus, return X directly. @@ -262,13 +262,13 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab essential_cost = 0 essential_matching = None - # Extract off-diaognal points of the diagrams. Note that if enable_autodiff is True, nothing is done here (X,Y are + # Extract finite points of the diagrams. Note that if enable_autodiff is True, nothing is done here (X,Y are # assumed to be tensors with only finite coordinates). - X, Y = _offdiag(X, enable_autodiff), _offdiag(Y, enable_autodiff) + X, Y = _finite_part(X, enable_autodiff), _finite_part(Y, enable_autodiff) n = len(X) m = len(Y) - # Now the standard pipeline for off-diagonal parts + # Now the standard pipeline for finite parts if enable_autodiff: import eagerpy as ep -- cgit v1.2.3 From 01bd9eef85b0d93eb1629f1a0c5a28a359e4e7b9 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 12 Apr 2021 10:47:18 +0200 Subject: change name _offdiag to _finite_part in test file --- src/python/test/test_wasserstein_distance.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 6701c7ba..12bf71df 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -9,7 +9,7 @@ - YYYY/MM Author: Description of the modification """ -from gudhi.wasserstein.wasserstein import _proj_on_diag, _offdiag, _handle_essential_parts, _get_essential_parts +from gudhi.wasserstein.wasserstein import _proj_on_diag, _finite_part, _handle_essential_parts, _get_essential_parts from gudhi.wasserstein import wasserstein_distance as pot from gudhi.hera import wasserstein_distance as hera import numpy as np @@ -28,10 +28,10 @@ def test_proj_on_diag(): assert np.array_equal(_proj_on_diag(empty), empty) -def test_offdiag(): +def test_finite_part(): diag = np.array([[0, 1], [3, 5], [2, np.inf], [3, np.inf], [-np.inf, 8], [-np.inf, 12], [-np.inf, -np.inf], [np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]]) - assert np.array_equal(_offdiag(diag, enable_autodiff=False), [[0, 1], [3, 5]]) + assert np.array_equal(_finite_part(diag, enable_autodiff=False), [[0, 1], [3, 5]]) def test_handle_essential_parts(): -- cgit v1.2.3 From 777522b82bde16b55f15c21471bad06038849fd1 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 12 Apr 2021 15:52:36 +0200 Subject: improved essential part and enable autodiff management --- src/python/gudhi/wasserstein/wasserstein.py | 75 ++++++++++++++++------------- 1 file changed, 41 insertions(+), 34 deletions(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index d64d433e..2911f826 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -95,7 +95,7 @@ def _perstot(X, order, internal_p, enable_autodiff): def _get_essential_parts(a): ''' :param a: (n x 2) numpy.array (point of a diagram) - :retuns: five lists of indices (between 0 and len(a)) accounting for the five types of points with infinite + :returns: five lists of indices (between 0 and len(a)) accounting for the five types of points with infinite coordinates that can occur in a diagram, namely: type0 : (-inf, finite) type1 : (finite, +inf) @@ -104,13 +104,20 @@ def _get_essential_parts(a): type4 : (+inf, +inf) .. note:: For instance, a[_get_essential_parts(a)[0]] returns the points in a of coordinates (-inf, x) for some finite x. + Note also that points with (+inf, -inf) are not handled (points (x,y) in dgm satisfy by assumption (y >= x)). ''' if len(a): - ess_first_type = np.where(np.isfinite(a[:,1]) & (a[:,0] == -np.inf))[0] # coord (-inf, x) - ess_second_type = np.where(np.isfinite(a[:,0]) & (a[:,1] == np.inf))[0] # coord (x, +inf) - ess_third_type = np.where((a[:,0] == -np.inf) & (a[:,1] == np.inf))[0] # coord (-inf, +inf) - ess_fourth_type = np.where((a[:,0] == -np.inf) & (a[:,1] == -np.inf))[0] # coord (-inf, -inf) - ess_fifth_type = np.where((a[:,0] == np.inf) & (a[:,1] == np.inf))[0] # coord (+inf, +inf) + first_coord_finite = np.isfinite(a[:,0]) + second_coord_finite = np.isfinite(a[:,1]) + first_coord_infinite_positive = (a[:,0] == np.inf) + second_coord_infinite_positive = (a[:,1] == np.inf) + first_coord_infinite_negative = (a[:,0] == -np.inf) + second_coord_infinite_negative = (a[:,1] == -np.inf) + ess_first_type = np.where(second_coord_finite & first_coord_infinite_negative)[0] # coord (-inf, x) + ess_second_type = np.where(first_coord_finite & second_coord_infinite_positive)[0] # coord (x, +inf) + ess_third_type = np.where(first_coord_infinite_negative & second_coord_infinite_positive)[0] # coord (-inf, +inf) + ess_fourth_type = np.where(first_coord_infinite_negative & second_coord_infinite_negative)[0] # coord (-inf, -inf) + ess_fifth_type = np.where(first_coord_infinite_positive & second_coord_infinite_positive)[0] # coord (+inf, +inf) return ess_first_type, ess_second_type, ess_third_type, ess_fourth_type, ess_fifth_type else: return [], [], [], [], [] @@ -136,7 +143,7 @@ def _cost_and_match_essential_parts(X, Y, idX, idY, order, axis): sortidX = idX[np.argsort(u)] sortidY = idY[np.argsort(v)] - # We return [i,j] sorted per value, and then [i, -1] (or [-1, j]) to account for essential points matched to the diagonal + # We return [i,j] sorted per value match = list(zip(sortidX, sortidY)) return cost, match @@ -149,9 +156,6 @@ def _handle_essential_parts(X, Y, order): :order: Wasserstein order for cost computation. :returns: cost and matching due to essential parts. If cost is +inf, matching will be set to None. ''' - c = 0 - m = [] - ess_parts_X = _get_essential_parts(X) ess_parts_Y = _get_essential_parts(Y) @@ -165,8 +169,8 @@ def _handle_essential_parts(X, Y, order): c1, m1 = _cost_and_match_essential_parts(X, Y, ess_parts_X[0], ess_parts_Y[0], axis=1, order=order) c2, m2 = _cost_and_match_essential_parts(X, Y, ess_parts_X[1], ess_parts_Y[1], axis=0, order=order) - c += c1 + c2 - m += m1 + m2 + c = c1 + c2 + m = m1 + m2 # Handle type >= 2 (both coordinates are infinite, so we essentially just align points) for u, v in zip(ess_parts_X[2:], ess_parts_Y[2:]): @@ -175,24 +179,18 @@ def _handle_essential_parts(X, Y, order): return c, np.array(m) -def _finite_part(X, enable_autodiff): +def _finite_part(X): ''' :param X: (n x 2) numpy array encoding a persistence diagram. - :param enable_autodiff: boolean, to handle the case where X is a eagerpy tensor. :returns: The finite part of a diagram `X` (points with finite coordinates). ''' - if enable_autodiff: - # Assumes the diagrams only have finite coordinates. Thus, return X directly. - # TODO improve this to get rid of essential parts if there are any. - return X - else: - return X[np.where(np.isfinite(X[:,0]) & np.isfinite(X[:,1]))] + return X[np.where(np.isfinite(X[:,0]) & np.isfinite(X[:,1]))] def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enable_autodiff=False, keep_essential_parts=True): ''' - :param X: (n x 2) numpy.array encoding the first diagram. Can now contain essential parts (points with infinite + :param X: (n x 2) numpy.array encoding the first diagram. Can contain essential parts (points with infinite coordinates). :param Y: (m x 2) numpy.array encoding the second diagram. :param matching: if True, computes and returns the optimal matching between X and Y, encoded as @@ -200,17 +198,17 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab the j-th point in Y, with the convention (-1) represents the diagonal. Note that if the cost is +inf (essential parts have different number of points, then the optimal matching will be set to `None`. - :param order: exponent for Wasserstein; Default value is 1. - :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2); + :param order: exponent for Wasserstein. Default value is 1. + :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2). Default value is `np.inf`. :param enable_autodiff: If X and Y are torch.tensor or tensorflow.Tensor, make the computation transparent to automatic differentiation. This requires the package EagerPy and is currently incompatible with `matching=True` and with `keep_essential_parts=True`. - .. note:: This considers the function defined on the coordinates of the off-diagonal points of X and Y + .. note:: This considers the function defined on the coordinates of the off-diagonal finite points of X and Y and lets the various frameworks compute its gradient. It never pulls new points from the diagonal. :type enable_autodiff: bool - :param keep_essential_parts: If False, only considers the off-diagonal points in the diagrams. + :param keep_essential_parts: If False, only considers the finite points in the diagrams. Otherwise, computes the distance between the essential parts separately. :type keep_essential_parts: bool :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with @@ -235,7 +233,7 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab return _perstot(Y, order, internal_p, enable_autodiff) else: cost = _perstot(Y, order, internal_p, enable_autodiff) - if cost == np.inf: # We had some essential part here. + if cost == np.inf: # We had some essential part in Y. return cost, None else: return cost, np.array([[-1, j] for j in range(m)]) @@ -250,24 +248,28 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab return cost, np.array([[i, -1] for i in range(n)]) - # Second step: handle essential parts + # Check essential part and enable autodiff together + if enable_autodiff and keep_essential_parts: + import warnings # should it be done at the top of the file? + warnings.warn('''enable_autodiff=True and keep_essential_parts=True are incompatible together. + keep_essential_parts is set to False: only points with finite coordiantes are considered + in the following. + ''') + keep_essential_parts = False + + # Second step: handle essential parts if needed. if keep_essential_parts: essential_cost, essential_matching = _handle_essential_parts(X, Y, order=order) if (essential_cost == np.inf): if matching: return np.inf, None else: - return np.inf # avoid computing off-diagonal transport cost if essential parts do not match (saves time) + return np.inf # avoid computing transport cost between the finite parts if essential parts + # cardinalities do not match (saves time) else: essential_cost = 0 essential_matching = None - # Extract finite points of the diagrams. Note that if enable_autodiff is True, nothing is done here (X,Y are - # assumed to be tensors with only finite coordinates). - X, Y = _finite_part(X, enable_autodiff), _finite_part(Y, enable_autodiff) - n = len(X) - m = len(Y) - # Now the standard pipeline for finite parts if enable_autodiff: import eagerpy as ep @@ -277,6 +279,11 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab X = X_orig.numpy() Y = Y_orig.numpy() + # Extract finite points of the diagrams. + X, Y = _finite_part(X), _finite_part(Y) + n = len(X) + m = len(Y) + M = _build_dist_matrix(X, Y, order=order, internal_p=internal_p) a = np.ones(n+1) # weight vector of the input diagram. Uniform here. a[-1] = m -- cgit v1.2.3 From 2a11e3651c2d66df8371a9aa1d23dff69ffbc31c Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 12 Apr 2021 15:54:26 +0200 Subject: removed test_wasserstein_distance_grad to be consistent with master --- src/python/test/test_wasserstein_distance.py | 23 ----------------------- 1 file changed, 23 deletions(-) (limited to 'src') diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 12bf71df..14d5c2ca 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -159,26 +159,3 @@ def test_wasserstein_distance_hera(): _basic_wasserstein(hera_wrap(delta=1e-12), 1e-12, test_matching=False) _basic_wasserstein(hera_wrap(delta=.1), .1, test_matching=False) -def test_wasserstein_distance_grad(): - import torch - - diag1 = torch.tensor([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]], requires_grad=True) - diag2 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True) - diag3 = torch.tensor([[2.8, 4.45], [9.5, 14.1]], requires_grad=True) - assert diag1.grad is None and diag2.grad is None and diag3.grad is None - dist12 = pot(diag1, diag2, internal_p=2, order=2, enable_autodiff=True, keep_essential_parts=False) - dist30 = pot(diag3, torch.tensor([]), internal_p=2, order=2, enable_autodiff=True, keep_essential_parts=False) - dist12.backward() - dist30.backward() - assert not torch.isnan(diag1.grad).any() and not torch.isnan(diag2.grad).any() and not torch.isnan(diag3.grad).any() - diag4 = torch.tensor([[0., 10.]], requires_grad=True) - diag5 = torch.tensor([[1., 11.], [3., 4.]], requires_grad=True) - dist45 = pot(diag4, diag5, internal_p=1, order=1, enable_autodiff=True, keep_essential_parts=False) - assert dist45 == 3. - dist45.backward() - assert np.array_equal(diag4.grad, [[-1., -1.]]) - assert np.array_equal(diag5.grad, [[1., 1.], [-1., 1.]]) - diag6 = torch.tensor([[5., 10.]], requires_grad=True) - pot(diag6, diag6, internal_p=2, order=2, enable_autodiff=True, keep_essential_parts=False).backward() - # https://github.com/jonasrauber/eagerpy/issues/6 - # assert np.array_equal(diag6.grad, [[0., 0.]]) -- cgit v1.2.3 From cdab3c9e32923f83d25d2cdf207f3cddbb3f94f6 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 12 Apr 2021 17:02:34 +0200 Subject: handle essential parts test --- src/python/gudhi/wasserstein/wasserstein.py | 1 + src/python/test/test_wasserstein_distance.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 2911f826..7cb9d5d9 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -113,6 +113,7 @@ def _get_essential_parts(a): second_coord_infinite_positive = (a[:,1] == np.inf) first_coord_infinite_negative = (a[:,0] == -np.inf) second_coord_infinite_negative = (a[:,1] == -np.inf) + ess_first_type = np.where(second_coord_finite & first_coord_infinite_negative)[0] # coord (-inf, x) ess_second_type = np.where(first_coord_finite & second_coord_infinite_positive)[0] # coord (x, +inf) ess_third_type = np.where(first_coord_infinite_negative & second_coord_infinite_positive)[0] # coord (-inf, +inf) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 14d5c2ca..df7acc91 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -31,7 +31,7 @@ def test_proj_on_diag(): def test_finite_part(): diag = np.array([[0, 1], [3, 5], [2, np.inf], [3, np.inf], [-np.inf, 8], [-np.inf, 12], [-np.inf, -np.inf], [np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]]) - assert np.array_equal(_finite_part(diag, enable_autodiff=False), [[0, 1], [3, 5]]) + assert np.array_equal(_finite_part(diag), [[0, 1], [3, 5]]) def test_handle_essential_parts(): -- cgit v1.2.3 From bb0792ed7bfe9d718be3e8039e8fb89af6d160e5 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Mon, 12 Apr 2021 19:48:57 +0200 Subject: added warning when cost is infty and matching is None --- src/python/doc/wasserstein_distance_user.rst | 4 +-- src/python/gudhi/wasserstein/wasserstein.py | 44 ++++++++++++++++++---------- 2 files changed, 30 insertions(+), 18 deletions(-) (limited to 'src') diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index b3d17495..091c9fd9 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -100,7 +100,7 @@ any matching has a cost +inf and thus can be considered to be optimal. In such a print("matchings:", matchings) -The output is: +The output is: .. testoutput:: @@ -197,4 +197,4 @@ Tutorial This `notebook `_ -presents the concept of barycenter, or Fréchet mean, of a family of persistence diagrams. \ No newline at end of file +presents the concept of barycenter, or Fréchet mean, of a family of persistence diagrams. diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 7cb9d5d9..8ccbe12e 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -9,6 +9,7 @@ import numpy as np import scipy.spatial.distance as sc +import warnings try: import ot @@ -188,6 +189,20 @@ def _finite_part(X): return X[np.where(np.isfinite(X[:,0]) & np.isfinite(X[:,1]))] +def _warn_infty(matching): + ''' + Handle essential parts with different cardinalities. Warn the user about cost being infinite and (if + `matching=True`) about the returned matching being `None`. + ''' + if matching: + warnings.warn('Cardinality of essential parts differs. Distance (cost) is +infty, and the returned matching is None.') + return np.inf, None + else: + warnings.warn('Cardinality of essential parts diffes. Distance (cost) is +infty.') + return np.inf + + + def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enable_autodiff=False, keep_essential_parts=True): ''' @@ -230,28 +245,27 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab else: return 0., np.array([]) else: - if not matching: - return _perstot(Y, order, internal_p, enable_autodiff) + cost = _perstot(Y, order, internal_p, enable_autodiff) + if cost == np.inf: + return _warn_infty(matching) else: - cost = _perstot(Y, order, internal_p, enable_autodiff) - if cost == np.inf: # We had some essential part in Y. - return cost, None + if not matching: + return cost else: return cost, np.array([[-1, j] for j in range(m)]) elif m == 0: - if not matching: - return _perstot(X, order, internal_p, enable_autodiff) + cost = _perstot(X, order, internal_p, enable_autodiff) + if cost == np.inf: + return _warn_infty(matching) else: - cost = _perstot(X, order, internal_p, enable_autodiff) - if cost == np.inf: - return cost, None + if not matching: + return cost else: return cost, np.array([[i, -1] for i in range(n)]) # Check essential part and enable autodiff together if enable_autodiff and keep_essential_parts: - import warnings # should it be done at the top of the file? warnings.warn('''enable_autodiff=True and keep_essential_parts=True are incompatible together. keep_essential_parts is set to False: only points with finite coordiantes are considered in the following. @@ -262,11 +276,9 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab if keep_essential_parts: essential_cost, essential_matching = _handle_essential_parts(X, Y, order=order) if (essential_cost == np.inf): - if matching: - return np.inf, None - else: - return np.inf # avoid computing transport cost between the finite parts if essential parts - # cardinalities do not match (saves time) + return _warn_infty(matching) # Tells the user that cost is infty and matching (if True) is None. + # avoid computing transport cost between the finite parts if essential parts + # cardinalities do not match (saves time) else: essential_cost = 0 essential_matching = None -- cgit v1.2.3 From 9616e2b8f616366393bf0b74a76029ae8a95d77a Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Fri, 16 Apr 2021 22:49:55 +0200 Subject: Rewrite choose_n_farthest_points Introduce an indirection to help with points with multiplicity. Limit the use of magic values like 0 and infinity. --- .../include/gudhi/choose_n_farthest_points.h | 55 +++++++++++++++------- .../test/test_choose_n_farthest_points.cpp | 5 +- 2 files changed, 42 insertions(+), 18 deletions(-) (limited to 'src') diff --git a/src/Subsampling/include/gudhi/choose_n_farthest_points.h b/src/Subsampling/include/gudhi/choose_n_farthest_points.h index e6347d96..44c02df1 100644 --- a/src/Subsampling/include/gudhi/choose_n_farthest_points.h +++ b/src/Subsampling/include/gudhi/choose_n_farthest_points.h @@ -42,7 +42,7 @@ enum : std::size_t { * The iteration starts with the landmark `starting point` or, if `starting point==random_starting_point`, * with a random landmark. * It chooses `final_size` points from a random access range - * `input_pts` (or the number of distinct points if `final_size` is larger) + * `input_pts` (or the number of input points if `final_size` is larger) * and outputs them in the output iterator `output_it`. It also * outputs the distance from each of those points to the set of previous * points in `dist_it`. @@ -88,34 +88,57 @@ void choose_n_farthest_points(Distance dist, starting_point = dis(gen); } - std::size_t current_number_of_landmarks = 0; // counter for landmarks - static_assert(std::numeric_limits::has_infinity, "the number type needs to support infinity()"); // FIXME: don't hard-code the type as double. For Epeck_d, we also want to handle types that do not have an infinity. - const double infty = std::numeric_limits::infinity(); // infinity (see next entry) - std::vector< double > dist_to_L(nb_points, infty); // vector of current distances to L from input_pts + static_assert(std::numeric_limits::has_infinity, "the number type needs to support infinity()"); + + *output_it++ = input_pts[starting_point]; + *dist_it++ = std::numeric_limits::infinity(); + if (final_size == 1) return; + + std::vector points(nb_points); // map from remaining points to indexes in input_pts + std::vector< double > dist_to_L(nb_points); // vector of current distances to L from points + for(std::size_t i = 0; i < nb_points; ++i) { + points[i] = i; + dist_to_L[i] = dist(input_pts[i], input_pts[starting_point]); + } + // The indirection through points makes the program a bit slower. Some alternatives: + // - the original code never removed points and counted on them not + // reappearing because of a self-distance of 0. This causes unnecessary + // computations when final_size is large. It also causes trouble if there are + // input points at distance 0 from each other. + // - copy input_pts and update the local copy when removing points. std::size_t curr_max_w = starting_point; - for (current_number_of_landmarks = 0; current_number_of_landmarks != final_size; current_number_of_landmarks++) { - // curr_max_w at this point is the next landmark - *output_it++ = input_pts[curr_max_w]; - *dist_it++ = dist_to_L[curr_max_w]; + for (std::size_t current_number_of_landmarks = 1; current_number_of_landmarks != final_size; current_number_of_landmarks++) { + std::size_t latest_landmark = points[curr_max_w]; + // To remove the latest landmark at index curr_max_w, replace it + // with the last point and reduce the length of the vector. + std::size_t last = points.size() - 1; + if (curr_max_w != last) { + points[curr_max_w] = points[last]; + dist_to_L[curr_max_w] = dist_to_L[last]; + } + points.pop_back(); + + // Update distances to L. std::size_t i = 0; - for (auto&& p : input_pts) { - double curr_dist = dist(p, input_pts[curr_max_w]); + for (auto p : points) { + double curr_dist = dist(input_pts[p], input_pts[latest_landmark]); if (curr_dist < dist_to_L[i]) dist_to_L[i] = curr_dist; ++i; } - // choose the next curr_max_w - double curr_max_dist = 0; // used for defining the furhest point from L - for (i = 0; i < dist_to_L.size(); i++) + // choose the next landmark + curr_max_w = 0; + double curr_max_dist = dist_to_L[curr_max_w]; // used for defining the furthest point from L + for (i = 1; i < points.size(); i++) if (dist_to_L[i] > curr_max_dist) { curr_max_dist = dist_to_L[i]; curr_max_w = i; } - // If all that remains are duplicates of points already taken, stop. - if (curr_max_dist == 0) break; + *output_it++ = input_pts[points[curr_max_w]]; + *dist_it++ = dist_to_L[curr_max_w]; } } diff --git a/src/Subsampling/test/test_choose_n_farthest_points.cpp b/src/Subsampling/test/test_choose_n_farthest_points.cpp index 94793295..c384c61b 100644 --- a/src/Subsampling/test/test_choose_n_farthest_points.cpp +++ b/src/Subsampling/test/test_choose_n_farthest_points.cpp @@ -102,11 +102,12 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(test_choose_farthest_point_limits, Kernel, list_of BOOST_CHECK(distances[1] == 1); landmarks.clear(); distances.clear(); - // Ignore duplicated points + // Accept duplicated points points.emplace_back(point.begin(), point.end()); Gudhi::subsampling::choose_n_farthest_points(d, points, -1, -1, std::back_inserter(landmarks), std::back_inserter(distances)); - BOOST_CHECK(landmarks.size() == 2 && distances.size() == 2); + BOOST_CHECK(landmarks.size() == 3 && distances.size() == 3); BOOST_CHECK(distances[0] == std::numeric_limits::infinity()); BOOST_CHECK(distances[1] == 1); + BOOST_CHECK(distances[2] == 0); landmarks.clear(); distances.clear(); } -- cgit v1.2.3 From 0a00c46d770699bbe467ade1c619dc94c8fad7b7 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 17 Apr 2021 14:53:53 +0200 Subject: Sparse Rips: disable `mini`, optimize a bit redundant points --- .../include/gudhi/Sparse_rips_complex.h | 23 +++++++++++++++++----- src/python/test/test_rips_complex.py | 21 ++++++++++++++++++++ 2 files changed, 39 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h index a5501004..d7669dad 100644 --- a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h +++ b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h @@ -45,6 +45,7 @@ template class Sparse_rips_complex { private: // TODO(MG): use a different graph where we know we can safely insert in parallel. + // Use a graph that lets us skip some vertices, for `mini` or redundant points. typedef typename boost::adjacency_list, boost::property> @@ -58,7 +59,8 @@ class Sparse_rips_complex { * @param[in] points Range of points. * @param[in] distance Distance function that returns a `Filtration_value` from 2 given points. * @param[in] epsilon Approximation parameter. epsilon must be positive. - * @param[in] mini Minimal filtration value. Ignore anything below this scale. This is a less efficient version of `Gudhi::subsampling::sparsify_point_set()`. + * @param[in] mini Not implemented yet, and broken in previous versions. Minimal filtration value. + * Ignore anything below this scale. This is a less efficient version of `Gudhi::subsampling::sparsify_point_set()`. * @param[in] maxi Maximal filtration value. Ignore anything above this scale. * */ @@ -67,6 +69,7 @@ class Sparse_rips_complex { : epsilon_(epsilon) { GUDHI_CHECK(epsilon > 0, "epsilon must be positive"); auto dist_fun = [&](Vertex_handle i, Vertex_handle j) { return distance(points[i], points[j]); }; + // TODO: stop choose_n_farthest_points once it reaches mini? Then the graph vertices would not be [0, ..., n-1] which complicates things. subsampling::choose_n_farthest_points(dist_fun, boost::irange(0, boost::size(points)), -1, -1, std::back_inserter(sorted_points), std::back_inserter(params)); compute_sparse_graph(dist_fun, epsilon, mini, maxi); @@ -116,9 +119,9 @@ class Sparse_rips_complex { double cst = epsilon_ * (1 - epsilon_) / 2; auto block = [cst,&complex,&lambda](typename SimplicialComplexForRips::Simplex_handle sh){ auto filt = complex.filtration(sh); - auto mini = filt * cst; + auto min_f = filt * cst; for(auto v : complex.simplex_vertex_range(sh)){ - if(lambda[v] < mini) + if(lambda[v] < min_f) return true; // v died before this simplex could be born } return false; @@ -149,12 +152,22 @@ class Sparse_rips_complex { for (int i = 0; i < n; ++i) { auto&& pi = points[i]; auto li = params[i]; - if (li < mini) break; + // FIXME: see below about mini. It might be ok to uncomment just this one, but it requires a proof. + // if ((li < mini || li <= 0) && i != 0) break; + if (li <= 0 && i != 0) break; + // The parameter of the first point is not very meaningful, it is supposed to be infinite, + // but if the type does not support it... + // Points with multiplicity get connected to their first representative, no need to handle + // the redundant ones in the outer loop. for (int j = i + 1; j < n; ++j) { auto&& pj = points[j]; auto d = dist(pi, pj); auto lj = params[j]; - if (lj < mini) break; + // FIXME: It would make sense to ignore the points with low param completely, but the current graph type we are + // using implicitly inserts all the vertices 0 ... n-1, so this would create isolated vertices, which is bad. + // If we do end up ignoring those points, we should do it early, around choose_n_farthest_points. But be careful + // that the size of lambda should reflect the original number of points then. + // if (lj < mini) break; GUDHI_CHECK(lj <= li, "Bad furthest point sorting"); Filtration_value alpha; diff --git a/src/python/test/test_rips_complex.py b/src/python/test/test_rips_complex.py index b86e7498..cae21435 100755 --- a/src/python/test/test_rips_complex.py +++ b/src/python/test/test_rips_complex.py @@ -133,3 +133,24 @@ def test_filtered_rips_from_distance_matrix(): assert simplex_tree.num_simplices() == 8 assert simplex_tree.num_vertices() == 4 + + +def test_sparse_with_multiplicity(): + points = [ + [3, 4], + [0.1, 2], + [0.1, 2], + [0.1, 2], + [0.1, 2], + [0.1, 2], + [0.1, 2], + [0.1, 2], + [0.1, 2], + [0.1, 2], + [0.1, 2], + [3, 4.1], + ] + rips = RipsComplex(points=points, sparse=0.01) + simplex_tree = rips.create_simplex_tree(max_dimension=2) + assert simplex_tree.num_simplices() == 25 + diag = simplex_tree.persistence() -- cgit v1.2.3 From 21741a3a415d6bc1b552c5b621f02a50db771c22 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 17 Apr 2021 16:54:09 +0200 Subject: Don't qualify calls for the graph concept --- src/Simplex_tree/include/gudhi/Simplex_tree.h | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'src') diff --git a/src/Simplex_tree/include/gudhi/Simplex_tree.h b/src/Simplex_tree/include/gudhi/Simplex_tree.h index 85d6c3b0..85790baf 100644 --- a/src/Simplex_tree/include/gudhi/Simplex_tree.h +++ b/src/Simplex_tree/include/gudhi/Simplex_tree.h @@ -1060,8 +1060,8 @@ class Simplex_tree { * * Inserts all vertices and edges given by a OneSkeletonGraph. * OneSkeletonGraph must be a model of - * boost::EdgeListGraph - * and boost::PropertyGraph. + * boost::VertexAndEdgeListGraph + * and boost::PropertyGraph. * * The vertex filtration value is accessible through the property tag * vertex_filtration_t. @@ -1081,7 +1081,10 @@ class Simplex_tree { // the simplex tree must be empty assert(num_simplices() == 0); - if (boost::num_vertices(skel_graph) == 0) { + // is there a better way to let the compiler know that we don't mean Simplex_tree::num_vertices? + using boost::num_vertices; + + if (num_vertices(skel_graph) == 0) { return; } if (num_edges(skel_graph) == 0) { @@ -1090,18 +1093,18 @@ class Simplex_tree { dimension_ = 1; } - root_.members_.reserve(boost::num_vertices(skel_graph)); + root_.members_.reserve(num_vertices(skel_graph)); typename boost::graph_traits::vertex_iterator v_it, v_it_end; - for (std::tie(v_it, v_it_end) = boost::vertices(skel_graph); v_it != v_it_end; + for (std::tie(v_it, v_it_end) = vertices(skel_graph); v_it != v_it_end; ++v_it) { root_.members_.emplace_hint( root_.members_.end(), *v_it, - Node(&root_, boost::get(vertex_filtration_t(), skel_graph, *v_it))); + Node(&root_, get(vertex_filtration_t(), skel_graph, *v_it))); } std::pair::edge_iterator, - typename boost::graph_traits::edge_iterator> boost_edges = boost::edges(skel_graph); + typename boost::graph_traits::edge_iterator> boost_edges = edges(skel_graph); // boost_edges.first is the equivalent to boost_edges.begin() // boost_edges.second is the equivalent to boost_edges.end() for (; boost_edges.first != boost_edges.second; boost_edges.first++) { @@ -1123,7 +1126,7 @@ class Simplex_tree { } sh->second.children()->members().emplace(v, - Node(sh->second.children(), boost::get(edge_filtration_t(), skel_graph, edge))); + Node(sh->second.children(), get(edge_filtration_t(), skel_graph, edge))); } } -- cgit v1.2.3 From 20bee15a2e7dc68deb3141ebab7a30a3edcfb401 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 17 Apr 2021 16:54:38 +0200 Subject: Introduce a custom graph type --- .../include/gudhi/Sparse_rips_complex.h | 76 ++++++++++++++++++---- 1 file changed, 63 insertions(+), 13 deletions(-) (limited to 'src') diff --git a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h index d7669dad..30afb1d0 100644 --- a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h +++ b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h @@ -17,9 +17,68 @@ #include #include +#include #include +namespace Gudhi { +namespace rips_complex { +// A custom graph class, because boost::adjacency_list does not conveniently allow to choose vertex descriptors +template +struct Graph { + typedef std::vector VList; + typedef std::vector> EList; + typedef typename VList::const_iterator vertex_iterator; + typedef boost::counting_iterator edge_iterator; + VList vlist; + EList elist; +}; +template +void add_vertex(Vertex_handle v, Graph&g) { g.vlist.push_back(v); } +template +void add_edge(Vertex_handle u, Vertex_handle v, Filtration_value f, Graph&g) { g.elist.emplace_back(u, v, f); } +template +std::size_t num_vertices(Graph const&g) { return g.vlist.size(); } +template +std::size_t num_edges(Graph const&g) { return g.elist.size(); } +template ::vertex_iterator> +std::pair +vertices(Graph const&g) { + return { g.vlist.begin(), g.vlist.end() }; +} +template +std::pair, boost::counting_iterator> +edges(Graph const&g) { + typedef boost::counting_iterator I; + return { I(0), I(g.elist.size()) }; +} +template +std::size_t source(std::size_t e, Graph const&g) { return std::get<0>(g.elist[e]); } +template +std::size_t target(std::size_t e, Graph const&g) { return std::get<1>(g.elist[e]); } +template +Filtration_value get(vertex_filtration_t, Graph const&, Vertex_handle) { return 0; } +template +Filtration_value get(edge_filtration_t, Graph const&g, std::size_t e) { return std::get<2>(g.elist[e]); } +} // namespace rips_complex +} // namespace Gudhi +namespace boost { +template +struct graph_traits> { + typedef Gudhi::rips_complex::Graph G; + struct traversal_category : vertex_list_graph_tag, edge_list_graph_tag {}; + typedef Vertex_handle vertex_descriptor; + typedef typename G::vertex_iterator vertex_iterator; + typedef std::size_t vertices_size_type; + typedef std::size_t edge_descriptor; + typedef typename G::edge_iterator edge_iterator; + typedef std::size_t edges_size_type; + typedef directed_tag directed_category; + typedef disallow_parallel_edge_tag edge_parallel_category; +}; +// Etc, since we don't expose this graph to the world, we know we are not going to query property_traits. +} + namespace Gudhi { namespace rips_complex { @@ -45,13 +104,8 @@ template class Sparse_rips_complex { private: // TODO(MG): use a different graph where we know we can safely insert in parallel. - // Use a graph that lets us skip some vertices, for `mini` or redundant points. - typedef typename boost::adjacency_list, - boost::property> - Graph; - typedef int Vertex_handle; + typedef rips_complex::Graph Graph; public: /** \brief Sparse_rips_complex constructor from a list of points. @@ -137,13 +191,9 @@ class Sparse_rips_complex { const int n = boost::size(points); double cst = epsilon * (1 - epsilon) / 2; graph_.~Graph(); - new (&graph_) Graph(n); - // for(auto v : vertices(g)) // doesn't work :-( - typename boost::graph_traits::vertex_iterator v_i, v_e; - for (std::tie(v_i, v_e) = vertices(graph_); v_i != v_e; ++v_i) { - auto v = *v_i; - // This whole loop might not be necessary, leave it until someone investigates if it is safe to remove. - put(vertex_filtration_t(), graph_, v, 0); + new (&graph_) Graph(); + for (int i = 0; i < n; ++i) { + add_vertex(i, graph_); } // TODO(MG): -- cgit v1.2.3 From 71337179d95d1e330902b431907cb07698abcdc9 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Sat, 17 Apr 2021 17:31:23 +0200 Subject: Safely drop some vertices in sparse Rips --- .../include/gudhi/Sparse_rips_complex.h | 48 ++++++++++------------ src/python/test/test_rips_complex.py | 2 +- 2 files changed, 23 insertions(+), 27 deletions(-) (limited to 'src') diff --git a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h index 30afb1d0..28031e68 100644 --- a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h +++ b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h @@ -76,7 +76,7 @@ struct graph_traits> typedef directed_tag directed_category; typedef disallow_parallel_edge_tag edge_parallel_category; }; -// Etc, since we don't expose this graph to the world, we know we are not going to query property_traits. +// Etc, since we don't expose this graph to the world, we know we are not going to query property_traits for instance. } namespace Gudhi { @@ -113,8 +113,7 @@ class Sparse_rips_complex { * @param[in] points Range of points. * @param[in] distance Distance function that returns a `Filtration_value` from 2 given points. * @param[in] epsilon Approximation parameter. epsilon must be positive. - * @param[in] mini Not implemented yet, and broken in previous versions. Minimal filtration value. - * Ignore anything below this scale. This is a less efficient version of `Gudhi::subsampling::sparsify_point_set()`. + * @param[in] mini Minimal filtration value. Ignore anything below this scale. This is a less efficient version of `Gudhi::subsampling::sparsify_point_set()`. * @param[in] maxi Maximal filtration value. Ignore anything above this scale. * */ @@ -123,7 +122,7 @@ class Sparse_rips_complex { : epsilon_(epsilon) { GUDHI_CHECK(epsilon > 0, "epsilon must be positive"); auto dist_fun = [&](Vertex_handle i, Vertex_handle j) { return distance(points[i], points[j]); }; - // TODO: stop choose_n_farthest_points once it reaches mini? Then the graph vertices would not be [0, ..., n-1] which complicates things. + // TODO: stop choose_n_farthest_points once it reaches mini or 0? subsampling::choose_n_farthest_points(dist_fun, boost::irange(0, boost::size(points)), -1, -1, std::back_inserter(sorted_points), std::back_inserter(params)); compute_sparse_graph(dist_fun, epsilon, mini, maxi); @@ -165,10 +164,10 @@ class Sparse_rips_complex { complex.expansion(dim_max); return; } - const int n = boost::size(params); - std::vector lambda(n); + const Vertex_handle n = num_vertices(graph_); + std::vector lambda(max_v + 1); // lambda[original_order]=params[sorted_order] - for(int i=0;i void compute_sparse_graph(Distance& dist, double epsilon, Filtration_value mini, Filtration_value maxi) { const auto& points = sorted_points; // convenience alias - const int n = boost::size(points); + Vertex_handle n = boost::size(points); double cst = epsilon * (1 - epsilon) / 2; - graph_.~Graph(); - new (&graph_) Graph(); - for (int i = 0; i < n; ++i) { - add_vertex(i, graph_); + max_v = -1; // Useful for the size of the map lambda. + for (Vertex_handle i = 0; i < n; ++i) { + if ((params[i] < mini || params[i] <= 0) && i != 0) break; + // The parameter of the first point is not very meaningful, it is supposed to be infinite, + // but if the type does not support it... + // It would be better to do this reduction of the number of points earlier, around choose_n_farthest_points. + add_vertex(points[i], graph_); + max_v = std::max(max_v, points[i]); } + n = num_vertices(graph_); // TODO(MG): // - make it parallel // - only test near-enough neighbors - for (int i = 0; i < n; ++i) { + for (Vertex_handle i = 0; i < n; ++i) { auto&& pi = points[i]; auto li = params[i]; - // FIXME: see below about mini. It might be ok to uncomment just this one, but it requires a proof. - // if ((li < mini || li <= 0) && i != 0) break; - if (li <= 0 && i != 0) break; - // The parameter of the first point is not very meaningful, it is supposed to be infinite, - // but if the type does not support it... - // Points with multiplicity get connected to their first representative, no need to handle - // the redundant ones in the outer loop. - for (int j = i + 1; j < n; ++j) { + // If we inserted all the points, points with multiplicity would get connected to their first representative, + // no need to handle the redundant ones in the outer loop. + // if (li <= 0 && i != 0) break; + for (Vertex_handle j = i + 1; j < n; ++j) { auto&& pj = points[j]; auto d = dist(pi, pj); auto lj = params[j]; - // FIXME: It would make sense to ignore the points with low param completely, but the current graph type we are - // using implicitly inserts all the vertices 0 ... n-1, so this would create isolated vertices, which is bad. - // If we do end up ignoring those points, we should do it early, around choose_n_farthest_points. But be careful - // that the size of lambda should reflect the original number of points then. - // if (lj < mini) break; GUDHI_CHECK(lj <= li, "Bad furthest point sorting"); Filtration_value alpha; @@ -241,6 +236,7 @@ class Sparse_rips_complex { Graph graph_; double epsilon_; + Vertex_handle max_v; // Because of the arbitrary split between constructor and create_complex // sorted_points[sorted_order]=original_order std::vector sorted_points; diff --git a/src/python/test/test_rips_complex.py b/src/python/test/test_rips_complex.py index cae21435..a2f43a1b 100755 --- a/src/python/test/test_rips_complex.py +++ b/src/python/test/test_rips_complex.py @@ -152,5 +152,5 @@ def test_sparse_with_multiplicity(): ] rips = RipsComplex(points=points, sparse=0.01) simplex_tree = rips.create_simplex_tree(max_dimension=2) - assert simplex_tree.num_simplices() == 25 + assert simplex_tree.num_simplices() == 7 diag = simplex_tree.persistence() -- cgit v1.2.3 From 604b2cde0c7951c81d1c510f3038e2c65c19e6fe Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 20 Apr 2021 19:06:56 +0200 Subject: update doc and tests --- src/python/doc/wasserstein_distance_user.rst | 1 + src/python/test/test_wasserstein_distance.py | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/python/doc/wasserstein_distance_user.rst b/src/python/doc/wasserstein_distance_user.rst index 091c9fd9..76eb1469 100644 --- a/src/python/doc/wasserstein_distance_user.rst +++ b/src/python/doc/wasserstein_distance_user.rst @@ -92,6 +92,7 @@ any matching has a cost +inf and thus can be considered to be optimal. In such a for j in dgm2_to_diagonal: print("point %s in dgm2 is matched to the diagonal" %j) + # An example where essential part cardinalities differ dgm3 = np.array([[1, 2], [0, np.inf]]) dgm4 = np.array([[1, 2], [0, np.inf], [1, np.inf]]) cost, matchings = gudhi.wasserstein.wasserstein_distance(dgm3, dgm4, matching=True, order=1, internal_p=2) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index df7acc91..121ba065 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -67,16 +67,25 @@ def test_handle_essential_parts(): def test_get_essential_parts(): - diag = np.array([[0, 1], [3, 5], [2, np.inf], [3, np.inf], [-np.inf, 8], [-np.inf, 12], [-np.inf, -np.inf], + diag1 = np.array([[0, 1], [3, 5], [2, np.inf], [3, np.inf], [-np.inf, 8], [-np.inf, 12], [-np.inf, -np.inf], [np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]]) - res = _get_essential_parts(diag) + diag2 = np.array([[0, 1], [3, 5], [2, np.inf], [3, np.inf]]) + + res = _get_essential_parts(diag1) + res2 = _get_essential_parts(diag2) assert np.array_equal(res[0], [4, 5]) assert np.array_equal(res[1], [2, 3]) assert np.array_equal(res[2], [8, 9]) assert np.array_equal(res[3], [6] ) assert np.array_equal(res[4], [7] ) + assert np.array_equal(res2[0], [] ) + assert np.array_equal(res2[1], [2, 3]) + assert np.array_equal(res2[2], [] ) + assert np.array_equal(res2[3], [] ) + assert np.array_equal(res2[4], [] ) + def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_matching=True): diag1 = np.array([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]]) @@ -152,7 +161,7 @@ def pot_wrap(**extra): return fun def test_wasserstein_distance_pot(): - _basic_wasserstein(pot, 1e-15, test_infinity=False, test_matching=True) + _basic_wasserstein(pot, 1e-15, test_infinity=False, test_matching=True) # pot with its standard args _basic_wasserstein(pot_wrap(enable_autodiff=True, keep_essential_parts=False), 1e-15, test_infinity=False, test_matching=False) def test_wasserstein_distance_hera(): -- cgit v1.2.3 From e3865868cd36f27e57f75be64749429773a1734f Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Tue, 20 Apr 2021 21:54:35 +0200 Subject: Targeted include, more const --- src/Rips_complex/include/gudhi/Sparse_rips_complex.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h index 28031e68..9c5993c5 100644 --- a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h +++ b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include @@ -118,7 +118,7 @@ class Sparse_rips_complex { * */ template - Sparse_rips_complex(const RandomAccessPointRange& points, Distance distance, double epsilon, Filtration_value mini=-std::numeric_limits::infinity(), Filtration_value maxi=std::numeric_limits::infinity()) + Sparse_rips_complex(const RandomAccessPointRange& points, Distance distance, double const epsilon, Filtration_value const mini=-std::numeric_limits::infinity(), Filtration_value const maxi=std::numeric_limits::infinity()) : epsilon_(epsilon) { GUDHI_CHECK(epsilon > 0, "epsilon must be positive"); auto dist_fun = [&](Vertex_handle i, Vertex_handle j) { return distance(points[i], points[j]); }; @@ -139,7 +139,7 @@ class Sparse_rips_complex { * @param[in] maxi Maximal filtration value. Ignore anything above this scale. */ template - Sparse_rips_complex(const DistanceMatrix& distance_matrix, double epsilon, Filtration_value mini=-std::numeric_limits::infinity(), Filtration_value maxi=std::numeric_limits::infinity()) + Sparse_rips_complex(const DistanceMatrix& distance_matrix, double const epsilon, Filtration_value const mini=-std::numeric_limits::infinity(), Filtration_value const maxi=std::numeric_limits::infinity()) : Sparse_rips_complex(boost::irange(0, boost::size(distance_matrix)), [&](Vertex_handle i, Vertex_handle j) { return (i==j) ? 0 : (i - void create_complex(SimplicialComplexForRips& complex, int dim_max) { + void create_complex(SimplicialComplexForRips& complex, int const dim_max) { GUDHI_CHECK(complex.num_vertices() == 0, std::invalid_argument("Sparse_rips_complex::create_complex - simplicial complex is not empty")); @@ -185,7 +185,7 @@ class Sparse_rips_complex { private: // PointRange must be random access. template - void compute_sparse_graph(Distance& dist, double epsilon, Filtration_value mini, Filtration_value maxi) { + void compute_sparse_graph(Distance& dist, double const epsilon, Filtration_value const mini, Filtration_value const maxi) { const auto& points = sorted_points; // convenience alias Vertex_handle n = boost::size(points); double cst = epsilon * (1 - epsilon) / 2; -- cgit v1.2.3 From 2ca42207529f0f21c2cb1392ebfd2b5f41882b60 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Tue, 20 Apr 2021 22:39:41 +0200 Subject: Vertex_handle -> size_t --- src/Rips_complex/include/gudhi/Sparse_rips_complex.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'src') diff --git a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h index 9c5993c5..8024f92d 100644 --- a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h +++ b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h @@ -164,10 +164,10 @@ class Sparse_rips_complex { complex.expansion(dim_max); return; } - const Vertex_handle n = num_vertices(graph_); + const std::size_t n = num_vertices(graph_); std::vector lambda(max_v + 1); // lambda[original_order]=params[sorted_order] - for(Vertex_handle i=0;i void compute_sparse_graph(Distance& dist, double const epsilon, Filtration_value const mini, Filtration_value const maxi) { const auto& points = sorted_points; // convenience alias - Vertex_handle n = boost::size(points); + std::size_t n = boost::size(points); double cst = epsilon * (1 - epsilon) / 2; max_v = -1; // Useful for the size of the map lambda. - for (Vertex_handle i = 0; i < n; ++i) { + for (std::size_t i = 0; i < n; ++i) { if ((params[i] < mini || params[i] <= 0) && i != 0) break; // The parameter of the first point is not very meaningful, it is supposed to be infinite, // but if the type does not support it... @@ -203,13 +203,13 @@ class Sparse_rips_complex { // TODO(MG): // - make it parallel // - only test near-enough neighbors - for (Vertex_handle i = 0; i < n; ++i) { + for (std::size_t i = 0; i < n; ++i) { auto&& pi = points[i]; auto li = params[i]; // If we inserted all the points, points with multiplicity would get connected to their first representative, // no need to handle the redundant ones in the outer loop. // if (li <= 0 && i != 0) break; - for (Vertex_handle j = i + 1; j < n; ++j) { + for (std::size_t j = i + 1; j < n; ++j) { auto&& pj = points[j]; auto d = dist(pi, pj); auto lj = params[j]; -- cgit v1.2.3 From 0360f02ec1778daae53b50c50f223049fa294328 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Wed, 21 Apr 2021 10:26:05 +0200 Subject: typo corrected --- src/python/gudhi/wasserstein/wasserstein.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 8ccbe12e..a89c7efd 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -198,7 +198,7 @@ def _warn_infty(matching): warnings.warn('Cardinality of essential parts differs. Distance (cost) is +infty, and the returned matching is None.') return np.inf, None else: - warnings.warn('Cardinality of essential parts diffes. Distance (cost) is +infty.') + warnings.warn('Cardinality of essential parts differs. Distance (cost) is +infty.') return np.inf -- cgit v1.2.3 From bf4625f877aee83325812c6c479af4df36a2c7e9 Mon Sep 17 00:00:00 2001 From: Hind Date: Thu, 22 Apr 2021 16:14:43 +0200 Subject: Replace hardcoded PI with M_PI from cmath --- src/common/include/gudhi/random_point_generators.h | 39 +++++++++++----------- 1 file changed, 20 insertions(+), 19 deletions(-) (limited to 'src') diff --git a/src/common/include/gudhi/random_point_generators.h b/src/common/include/gudhi/random_point_generators.h index 9dd88ac4..e02c1ed2 100644 --- a/src/common/include/gudhi/random_point_generators.h +++ b/src/common/include/gudhi/random_point_generators.h @@ -18,6 +18,7 @@ #include // for CGAL_VERSION_NR #include // for vector<> +#include // for M_PI // Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10 #if CGAL_VERSION_NR < 1041101000 @@ -164,11 +165,11 @@ std::vector generate_points_on_torus_3D(std::size_t nu if (uniform) { std::size_t k1 = i / num_lines; std::size_t k2 = i % num_lines; - u = 6.2832 * k1 / num_lines; - v = 6.2832 * k2 / num_lines; + u = 2 * M_PI * k1 / num_lines; + v = 2 * M_PI * k2 / num_lines; } else { - u = rng.get_double(0, 6.2832); - v = rng.get_double(0, 6.2832); + u = rng.get_double(0, 2 * M_PI); + v = rng.get_double(0, 2 * M_PI); } Point p = construct_point(k, (R + r * std::cos(u)) * std::cos(v), @@ -200,7 +201,7 @@ static void generate_uniform_points_on_torus_d(const Kernel &k, int dim, std::si (100. + radius_noise_percentage) / 100.); } std::vector cp2 = current_point; - double alpha = 6.2832 * slice_idx / num_slices; + double alpha = 2 * M_PI * slice_idx / num_slices; cp2.push_back(radius_noise_ratio * std::cos(alpha)); cp2.push_back(radius_noise_ratio * std::sin(alpha)); generate_uniform_points_on_torus_d( @@ -234,7 +235,7 @@ std::vector generate_points_on_torus_d(std::size_t num std::vector pt; pt.reserve(dim * 2); for (int curdim = 0; curdim < dim; ++curdim) { - FT alpha = rng.get_double(0, 6.2832); + FT alpha = rng.get_double(0, 2 * M_PI); pt.push_back(radius_noise_ratio * std::cos(alpha)); pt.push_back(radius_noise_ratio * std::sin(alpha)); } @@ -370,7 +371,7 @@ std::vector generate_points_on_3sphere_and_circle(std: for (std::size_t i = 0; i < num_points;) { Point p_sphere = *generator++; // First 3 coords - FT alpha = rng.get_double(0, 6.2832); + FT alpha = rng.get_double(0, 2 * M_PI); std::vector pt(5); pt[0] = k_coord(p_sphere, 0); pt[1] = k_coord(p_sphere, 1); @@ -403,11 +404,11 @@ std::vector generate_points_on_klein_bottle_3D(std::si if (uniform) { std::size_t k1 = i / num_lines; std::size_t k2 = i % num_lines; - u = 6.2832 * k1 / num_lines; - v = 6.2832 * k2 / num_lines; + u = 2 * M_PI * k1 / num_lines; + v = 2 * M_PI * k2 / num_lines; } else { - u = rng.get_double(0, 6.2832); - v = rng.get_double(0, 6.2832); + u = rng.get_double(0, 2 * M_PI); + v = rng.get_double(0, 2 * M_PI); } double tmp = cos(u / 2) * sin(v) - sin(u / 2) * sin(2. * v); Point p = construct_point(k, @@ -439,11 +440,11 @@ std::vector generate_points_on_klein_bottle_4D(std::si if (uniform) { std::size_t k1 = i / num_lines; std::size_t k2 = i % num_lines; - u = 6.2832 * k1 / num_lines; - v = 6.2832 * k2 / num_lines; + u = 2 * M_PI * k1 / num_lines; + v = 2 * M_PI * k2 / num_lines; } else { - u = rng.get_double(0, 6.2832); - v = rng.get_double(0, 6.2832); + u = rng.get_double(0, 2 * M_PI); + v = rng.get_double(0, 2 * M_PI); } Point p = construct_point(k, (a + b * cos(v)) * cos(u) + (noise == 0. ? 0. : rng.get_double(0, noise)), @@ -478,11 +479,11 @@ generate_points_on_klein_bottle_variant_5D( if (uniform) { std::size_t k1 = i / num_lines; std::size_t k2 = i % num_lines; - u = 6.2832 * k1 / num_lines; - v = 6.2832 * k2 / num_lines; + u = 2 * M_PI * k1 / num_lines; + v = 2 * M_PI * k2 / num_lines; } else { - u = rng.get_double(0, 6.2832); - v = rng.get_double(0, 6.2832); + u = rng.get_double(0, 2 * M_PI); + v = rng.get_double(0, 2 * M_PI); } FT x1 = (a + b * cos(v)) * cos(u); FT x2 = (a + b * cos(v)) * sin(u); -- cgit v1.2.3 From 45917ecf17acacfede909994d7b3a78fc18355da Mon Sep 17 00:00:00 2001 From: Hind Date: Thu, 22 Apr 2021 17:08:17 +0200 Subject: Add random points generator on sphere in python, with an example --- src/python/CMakeLists.txt | 6 ++ .../alpha_complex_from_generated_points_example.py | 52 +++++++++++++++++ src/python/gudhi/random_point_generators.cc | 68 ++++++++++++++++++++++ 3 files changed, 126 insertions(+) create mode 100644 src/python/example/alpha_complex_from_generated_points_example.py create mode 100644 src/python/gudhi/random_point_generators.cc (limited to 'src') diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index 73303a24..8baf0f02 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -43,6 +43,7 @@ endfunction( add_gudhi_debug_info ) if(PYTHONINTERP_FOUND) if(PYBIND11_FOUND) add_gudhi_debug_info("Pybind11 version ${PYBIND11_VERSION}") + set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'random_point_generators', ") set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'hera', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'clustering', ") @@ -151,6 +152,7 @@ if(PYTHONINTERP_FOUND) set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/wasserstein', ") set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/bottleneck', ") if (NOT CGAL_VERSION VERSION_LESS 4.11.0) + set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'random_point_generators', ") set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'bottleneck', ") set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'nerve_gic', ") endif () @@ -425,6 +427,10 @@ if(PYTHONINTERP_FOUND) WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}" ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_complex_from_points_example.py") + add_test(NAME alpha_complex_from_generated_points_example_py_test + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}" + ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_complex_from_generated_points_example.py") add_test(NAME alpha_complex_diagram_persistence_from_off_file_example_py_test WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}" diff --git a/src/python/example/alpha_complex_from_generated_points_example.py b/src/python/example/alpha_complex_from_generated_points_example.py new file mode 100644 index 00000000..7a07ed42 --- /dev/null +++ b/src/python/example/alpha_complex_from_generated_points_example.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python + +from gudhi import random_point_generators +from gudhi import AlphaComplex, SimplexTree +from gudhi import plot_persistence_barcode, plot_persistence_diagram + +import matplotlib.pyplot as plt + + +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + Author(s): Hind Montassif + + Copyright (C) 2021 Inria + + Modification(s): + - YYYY/MM Author: Description of the modification +""" + +__author__ = "Hind Montassif" +__copyright__ = "Copyright (C) 2021 Inria" +__license__ = "MIT" + +print("#####################################################################") +print("AlphaComplex creation from generated points") + + +# Generate a circle: 50 points; dim 2; radius 1 +points = random_point_generators.generate_points_on_sphere_d(50, 2, 1) + +# Plot the generated points (to uncomment if wished) +#plt.scatter(points[:,0], points[:,1]) +#plt.show() + +# Create an alpha complex +alpha_complex = AlphaComplex(points=points) +simplex_tree = alpha_complex.create_simplex_tree() + +result_str = 'Alpha complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \ + repr(simplex_tree.num_simplices()) + ' simplices - ' + \ + repr(simplex_tree.num_vertices()) + ' vertices.' +print(result_str) + + +# Compute the persistence +diag = simplex_tree.persistence() + +# Plot the barcode and diagram (to uncomment if wished) +#plot_persistence_barcode(diag) +#plt.show() +#plot_persistence_diagram(diag) +#plt.show() diff --git a/src/python/gudhi/random_point_generators.cc b/src/python/gudhi/random_point_generators.cc new file mode 100644 index 00000000..39b09a6d --- /dev/null +++ b/src/python/gudhi/random_point_generators.cc @@ -0,0 +1,68 @@ +/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + * Author(s): Hind Montassif + * + * Copyright (C) 2021 Inria + * + * Modification(s): + * - YYYY/MM Author: Description of the modification + */ + +#include +#include + +#include + +#include + +namespace py = pybind11; + + +typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Kern; + +template +py::array_t generate_points_on_sphere(py::object num_points, py::object dim, py::object radius) { + int npoints = num_points.cast(); + int d = dim.cast(); + double rad = radius.cast(); + + py::gil_scoped_release release; + + auto points_generated = Gudhi::generate_points_on_sphere_d(npoints, d, rad); + + py::gil_scoped_acquire acquire; + + py::array_t points({npoints, d}); + + py::buffer_info buf = points.request(); + + double *ptr = static_cast(buf.ptr); + + assert(npoints == buf.shape[0]); + assert(d == buf.shape[1]); + + + for (size_t i = 0; i < (size_t)npoints; i++) + for (size_t j = 0; j < (size_t)d; j++) + ptr[i*d+j] = points_generated.at(i).at(j); + + return points; +} + +PYBIND11_MODULE(random_point_generators, m) { + m.attr("__license__") = "LGPL v3"; + m.def("generate_points_on_sphere_d", &generate_points_on_sphere, + py::arg("num_points"), py::arg("dim"), py::arg("radius"), + R"pbdoc( + Generate points on a sphere + + :param num_points: The number of points to be generated. + :type num_points: integer + :param dim: The sphere dimension. + :type dim: integer + :param radius: The sphere radius. + :type radius: float + :rtype: numpy array of points + :returns: the generated points on a sphere. + )pbdoc"); +} -- cgit v1.2.3 From 33cb5826e62abf8dd84d2adb59d99fc1f54a2aa1 Mon Sep 17 00:00:00 2001 From: Marc Glisse Date: Thu, 22 Apr 2021 22:16:33 +0200 Subject: Fix return type of source/target --- src/Rips_complex/include/gudhi/Sparse_rips_complex.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h index 8024f92d..7ae7b317 100644 --- a/src/Rips_complex/include/gudhi/Sparse_rips_complex.h +++ b/src/Rips_complex/include/gudhi/Sparse_rips_complex.h @@ -53,9 +53,9 @@ edges(Graph const&g) { return { I(0), I(g.elist.size()) }; } template -std::size_t source(std::size_t e, Graph const&g) { return std::get<0>(g.elist[e]); } +Vertex_handle source(std::size_t e, Graph const&g) { return std::get<0>(g.elist[e]); } template -std::size_t target(std::size_t e, Graph const&g) { return std::get<1>(g.elist[e]); } +Vertex_handle target(std::size_t e, Graph const&g) { return std::get<1>(g.elist[e]); } template Filtration_value get(vertex_filtration_t, Graph const&, Vertex_handle) { return 0; } template -- cgit v1.2.3 From 9df34f942df8417db11c324fb0c4e2c475a5211f Mon Sep 17 00:00:00 2001 From: Hind Date: Fri, 23 Apr 2021 09:41:29 +0200 Subject: Get pi constant from boost instead of cmath (to be replaced with the C++20 standard one eventually) --- src/common/include/gudhi/random_point_generators.h | 42 +++++++++++----------- 1 file changed, 22 insertions(+), 20 deletions(-) (limited to 'src') diff --git a/src/common/include/gudhi/random_point_generators.h b/src/common/include/gudhi/random_point_generators.h index e02c1ed2..25a10232 100644 --- a/src/common/include/gudhi/random_point_generators.h +++ b/src/common/include/gudhi/random_point_generators.h @@ -18,7 +18,7 @@ #include // for CGAL_VERSION_NR #include // for vector<> -#include // for M_PI +#include // for pi constant // Make compilation fail - required for external projects - https://github.com/GUDHI/gudhi-devel/issues/10 #if CGAL_VERSION_NR < 1041101000 @@ -27,6 +27,8 @@ namespace Gudhi { +constexpr double pi = boost::math::constants::pi(); + /////////////////////////////////////////////////////////////////////////////// // Note: All these functions have been tested with the CGAL::Epick_d kernel /////////////////////////////////////////////////////////////////////////////// @@ -165,11 +167,11 @@ std::vector generate_points_on_torus_3D(std::size_t nu if (uniform) { std::size_t k1 = i / num_lines; std::size_t k2 = i % num_lines; - u = 2 * M_PI * k1 / num_lines; - v = 2 * M_PI * k2 / num_lines; + u = 2 * pi * k1 / num_lines; + v = 2 * pi * k2 / num_lines; } else { - u = rng.get_double(0, 2 * M_PI); - v = rng.get_double(0, 2 * M_PI); + u = rng.get_double(0, 2 * pi); + v = rng.get_double(0, 2 * pi); } Point p = construct_point(k, (R + r * std::cos(u)) * std::cos(v), @@ -201,7 +203,7 @@ static void generate_uniform_points_on_torus_d(const Kernel &k, int dim, std::si (100. + radius_noise_percentage) / 100.); } std::vector cp2 = current_point; - double alpha = 2 * M_PI * slice_idx / num_slices; + double alpha = 2 * pi * slice_idx / num_slices; cp2.push_back(radius_noise_ratio * std::cos(alpha)); cp2.push_back(radius_noise_ratio * std::sin(alpha)); generate_uniform_points_on_torus_d( @@ -235,7 +237,7 @@ std::vector generate_points_on_torus_d(std::size_t num std::vector pt; pt.reserve(dim * 2); for (int curdim = 0; curdim < dim; ++curdim) { - FT alpha = rng.get_double(0, 2 * M_PI); + FT alpha = rng.get_double(0, 2 * pi); pt.push_back(radius_noise_ratio * std::cos(alpha)); pt.push_back(radius_noise_ratio * std::sin(alpha)); } @@ -371,7 +373,7 @@ std::vector generate_points_on_3sphere_and_circle(std: for (std::size_t i = 0; i < num_points;) { Point p_sphere = *generator++; // First 3 coords - FT alpha = rng.get_double(0, 2 * M_PI); + FT alpha = rng.get_double(0, 2 * pi); std::vector pt(5); pt[0] = k_coord(p_sphere, 0); pt[1] = k_coord(p_sphere, 1); @@ -404,11 +406,11 @@ std::vector generate_points_on_klein_bottle_3D(std::si if (uniform) { std::size_t k1 = i / num_lines; std::size_t k2 = i % num_lines; - u = 2 * M_PI * k1 / num_lines; - v = 2 * M_PI * k2 / num_lines; + u = 2 * pi * k1 / num_lines; + v = 2 * pi * k2 / num_lines; } else { - u = rng.get_double(0, 2 * M_PI); - v = rng.get_double(0, 2 * M_PI); + u = rng.get_double(0, 2 * pi); + v = rng.get_double(0, 2 * pi); } double tmp = cos(u / 2) * sin(v) - sin(u / 2) * sin(2. * v); Point p = construct_point(k, @@ -440,11 +442,11 @@ std::vector generate_points_on_klein_bottle_4D(std::si if (uniform) { std::size_t k1 = i / num_lines; std::size_t k2 = i % num_lines; - u = 2 * M_PI * k1 / num_lines; - v = 2 * M_PI * k2 / num_lines; + u = 2 * pi * k1 / num_lines; + v = 2 * pi * k2 / num_lines; } else { - u = rng.get_double(0, 2 * M_PI); - v = rng.get_double(0, 2 * M_PI); + u = rng.get_double(0, 2 * pi); + v = rng.get_double(0, 2 * pi); } Point p = construct_point(k, (a + b * cos(v)) * cos(u) + (noise == 0. ? 0. : rng.get_double(0, noise)), @@ -479,11 +481,11 @@ generate_points_on_klein_bottle_variant_5D( if (uniform) { std::size_t k1 = i / num_lines; std::size_t k2 = i % num_lines; - u = 2 * M_PI * k1 / num_lines; - v = 2 * M_PI * k2 / num_lines; + u = 2 * pi * k1 / num_lines; + v = 2 * pi * k2 / num_lines; } else { - u = rng.get_double(0, 2 * M_PI); - v = rng.get_double(0, 2 * M_PI); + u = rng.get_double(0, 2 * pi); + v = rng.get_double(0, 2 * pi); } FT x1 = (a + b * cos(v)) * cos(u); FT x2 = (a + b * cos(v)) * sin(u); -- cgit v1.2.3 From db7ce3487e526741c0408b00c2cffda0048b0026 Mon Sep 17 00:00:00 2001 From: Hind Date: Fri, 23 Apr 2021 11:27:59 +0200 Subject: Make adjustments according to the received reviews --- src/python/CMakeLists.txt | 2 +- src/python/gudhi/random_point_generators.cc | 45 +++++++++++++---------------- 2 files changed, 21 insertions(+), 26 deletions(-) (limited to 'src') diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index 8baf0f02..87f10a1a 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -43,10 +43,10 @@ endfunction( add_gudhi_debug_info ) if(PYTHONINTERP_FOUND) if(PYBIND11_FOUND) add_gudhi_debug_info("Pybind11 version ${PYBIND11_VERSION}") - set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'random_point_generators', ") set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'hera', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'clustering', ") + set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'random_point_generators', ") endif() if(CYTHON_FOUND) set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'off_reader', ") diff --git a/src/python/gudhi/random_point_generators.cc b/src/python/gudhi/random_point_generators.cc index 39b09a6d..4306ba98 100644 --- a/src/python/gudhi/random_point_generators.cc +++ b/src/python/gudhi/random_point_generators.cc @@ -21,30 +21,25 @@ namespace py = pybind11; typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Kern; template -py::array_t generate_points_on_sphere(py::object num_points, py::object dim, py::object radius) { - int npoints = num_points.cast(); - int d = dim.cast(); - double rad = radius.cast(); - - py::gil_scoped_release release; - - auto points_generated = Gudhi::generate_points_on_sphere_d(npoints, d, rad); - - py::gil_scoped_acquire acquire; - - py::array_t points({npoints, d}); +py::array_t generate_points_on_sphere(size_t num_points, int dim, double radius) { + + py::array_t points({(int)num_points, dim}); py::buffer_info buf = points.request(); - double *ptr = static_cast(buf.ptr); - assert(npoints == buf.shape[0]); - assert(d == buf.shape[1]); + assert(num_points == buf.shape[0]); + assert(dim == buf.shape[1]); - - for (size_t i = 0; i < (size_t)npoints; i++) - for (size_t j = 0; j < (size_t)d; j++) - ptr[i*d+j] = points_generated.at(i).at(j); + std::vector points_generated; + { + py::gil_scoped_release release; + points_generated = Gudhi::generate_points_on_sphere_d(num_points, dim, radius); + + for (size_t i = 0; i < num_points; i++) + for (size_t j = 0; j < (size_t)dim; j++) + ptr[i*dim+j] = points_generated[i][j]; + } return points; } @@ -52,17 +47,17 @@ py::array_t generate_points_on_sphere(py::object num_points, py::object PYBIND11_MODULE(random_point_generators, m) { m.attr("__license__") = "LGPL v3"; m.def("generate_points_on_sphere_d", &generate_points_on_sphere, - py::arg("num_points"), py::arg("dim"), py::arg("radius"), + py::arg("num_points"), py::arg("dim"), py::arg("radius") = 1, R"pbdoc( - Generate points on a sphere + Generate random i.i.d. points uniformly on a (d-1)-sphere in Rd :param num_points: The number of points to be generated. - :type num_points: integer - :param dim: The sphere dimension. + :type num_points: unsigned integer + :param dim: The dimension. :type dim: integer - :param radius: The sphere radius. + :param radius: The radius. :type radius: float - :rtype: numpy array of points + :rtype: numpy array of float :returns: the generated points on a sphere. )pbdoc"); } -- cgit v1.2.3 From 245354222ed6090f9828dba24b3db9ad17f8dfbf Mon Sep 17 00:00:00 2001 From: Hind Date: Fri, 23 Apr 2021 15:17:28 +0200 Subject: Use double_constants instead of templated constants Use the boost double_constants namespace in each function that needs two_pi --- src/common/include/gudhi/random_point_generators.h | 54 +++++++++++++--------- 1 file changed, 33 insertions(+), 21 deletions(-) (limited to 'src') diff --git a/src/common/include/gudhi/random_point_generators.h b/src/common/include/gudhi/random_point_generators.h index 25a10232..33fb182d 100644 --- a/src/common/include/gudhi/random_point_generators.h +++ b/src/common/include/gudhi/random_point_generators.h @@ -27,8 +27,6 @@ namespace Gudhi { -constexpr double pi = boost::math::constants::pi(); - /////////////////////////////////////////////////////////////////////////////// // Note: All these functions have been tested with the CGAL::Epick_d kernel /////////////////////////////////////////////////////////////////////////////// @@ -152,6 +150,8 @@ std::vector generate_points_on_moment_curve(std::size_ template std::vector generate_points_on_torus_3D(std::size_t num_points, double R, double r, bool uniform = false) { + using namespace boost::math::double_constants; + typedef typename Kernel::Point_d Point; typedef typename Kernel::FT FT; Kernel k; @@ -167,11 +167,11 @@ std::vector generate_points_on_torus_3D(std::size_t nu if (uniform) { std::size_t k1 = i / num_lines; std::size_t k2 = i % num_lines; - u = 2 * pi * k1 / num_lines; - v = 2 * pi * k2 / num_lines; + u = two_pi * k1 / num_lines; + v = two_pi * k2 / num_lines; } else { - u = rng.get_double(0, 2 * pi); - v = rng.get_double(0, 2 * pi); + u = rng.get_double(0, two_pi); + v = rng.get_double(0, two_pi); } Point p = construct_point(k, (R + r * std::cos(u)) * std::cos(v), @@ -190,6 +190,8 @@ static void generate_uniform_points_on_torus_d(const Kernel &k, int dim, std::si double radius_noise_percentage = 0., std::vector current_point = std::vector()) { + using namespace boost::math::double_constants; + CGAL::Random rng; int point_size = static_cast(current_point.size()); if (point_size == 2 * dim) { @@ -203,7 +205,7 @@ static void generate_uniform_points_on_torus_d(const Kernel &k, int dim, std::si (100. + radius_noise_percentage) / 100.); } std::vector cp2 = current_point; - double alpha = 2 * pi * slice_idx / num_slices; + double alpha = two_pi * slice_idx / num_slices; cp2.push_back(radius_noise_ratio * std::cos(alpha)); cp2.push_back(radius_noise_ratio * std::sin(alpha)); generate_uniform_points_on_torus_d( @@ -215,6 +217,8 @@ static void generate_uniform_points_on_torus_d(const Kernel &k, int dim, std::si template std::vector generate_points_on_torus_d(std::size_t num_points, int dim, bool uniform = false, double radius_noise_percentage = 0.) { + using namespace boost::math::double_constants; + typedef typename Kernel::Point_d Point; typedef typename Kernel::FT FT; Kernel k; @@ -237,7 +241,7 @@ std::vector generate_points_on_torus_d(std::size_t num std::vector pt; pt.reserve(dim * 2); for (int curdim = 0; curdim < dim; ++curdim) { - FT alpha = rng.get_double(0, 2 * pi); + FT alpha = rng.get_double(0, two_pi); pt.push_back(radius_noise_ratio * std::cos(alpha)); pt.push_back(radius_noise_ratio * std::sin(alpha)); } @@ -360,6 +364,8 @@ std::vector generate_points_on_two_spheres_d(std::size template std::vector generate_points_on_3sphere_and_circle(std::size_t num_points, double sphere_radius) { + using namespace boost::math::double_constants; + typedef typename Kernel::FT FT; typedef typename Kernel::Point_d Point; Kernel k; @@ -373,7 +379,7 @@ std::vector generate_points_on_3sphere_and_circle(std: for (std::size_t i = 0; i < num_points;) { Point p_sphere = *generator++; // First 3 coords - FT alpha = rng.get_double(0, 2 * pi); + FT alpha = rng.get_double(0, two_pi); std::vector pt(5); pt[0] = k_coord(p_sphere, 0); pt[1] = k_coord(p_sphere, 1); @@ -391,6 +397,8 @@ std::vector generate_points_on_3sphere_and_circle(std: template std::vector generate_points_on_klein_bottle_3D(std::size_t num_points, double a, double b, bool uniform = false) { + using namespace boost::math::double_constants; + typedef typename Kernel::Point_d Point; typedef typename Kernel::FT FT; Kernel k; @@ -406,11 +414,11 @@ std::vector generate_points_on_klein_bottle_3D(std::si if (uniform) { std::size_t k1 = i / num_lines; std::size_t k2 = i % num_lines; - u = 2 * pi * k1 / num_lines; - v = 2 * pi * k2 / num_lines; + u = two_pi * k1 / num_lines; + v = two_pi * k2 / num_lines; } else { - u = rng.get_double(0, 2 * pi); - v = rng.get_double(0, 2 * pi); + u = rng.get_double(0, two_pi); + v = rng.get_double(0, two_pi); } double tmp = cos(u / 2) * sin(v) - sin(u / 2) * sin(2. * v); Point p = construct_point(k, @@ -427,6 +435,8 @@ std::vector generate_points_on_klein_bottle_3D(std::si template std::vector generate_points_on_klein_bottle_4D(std::size_t num_points, double a, double b, double noise = 0., bool uniform = false) { + using namespace boost::math::double_constants; + typedef typename Kernel::Point_d Point; typedef typename Kernel::FT FT; Kernel k; @@ -442,11 +452,11 @@ std::vector generate_points_on_klein_bottle_4D(std::si if (uniform) { std::size_t k1 = i / num_lines; std::size_t k2 = i % num_lines; - u = 2 * pi * k1 / num_lines; - v = 2 * pi * k2 / num_lines; + u = two_pi * k1 / num_lines; + v = two_pi * k2 / num_lines; } else { - u = rng.get_double(0, 2 * pi); - v = rng.get_double(0, 2 * pi); + u = rng.get_double(0, two_pi); + v = rng.get_double(0, two_pi); } Point p = construct_point(k, (a + b * cos(v)) * cos(u) + (noise == 0. ? 0. : rng.get_double(0, noise)), @@ -466,6 +476,8 @@ template std::vector generate_points_on_klein_bottle_variant_5D( std::size_t num_points, double a, double b, bool uniform = false) { + using namespace boost::math::double_constants; + typedef typename Kernel::Point_d Point; typedef typename Kernel::FT FT; Kernel k; @@ -481,11 +493,11 @@ generate_points_on_klein_bottle_variant_5D( if (uniform) { std::size_t k1 = i / num_lines; std::size_t k2 = i % num_lines; - u = 2 * pi * k1 / num_lines; - v = 2 * pi * k2 / num_lines; + u = two_pi * k1 / num_lines; + v = two_pi * k2 / num_lines; } else { - u = rng.get_double(0, 2 * pi); - v = rng.get_double(0, 2 * pi); + u = rng.get_double(0, two_pi); + v = rng.get_double(0, two_pi); } FT x1 = (a + b * cos(v)) * cos(u); FT x2 = (a + b * cos(v)) * sin(u); -- cgit v1.2.3 From e59b1cfd338a80a769c0e2b6d677b9474b07beb3 Mon Sep 17 00:00:00 2001 From: Hind Date: Mon, 26 Apr 2021 11:47:36 +0200 Subject: Replace assert with GUDHI_CHECK Make the function non-template Change typing and casting --- src/python/gudhi/random_point_generators.cc | 34 ++++++++++++++--------------- 1 file changed, 16 insertions(+), 18 deletions(-) (limited to 'src') diff --git a/src/python/gudhi/random_point_generators.cc b/src/python/gudhi/random_point_generators.cc index 4306ba98..6eb40429 100644 --- a/src/python/gudhi/random_point_generators.cc +++ b/src/python/gudhi/random_point_generators.cc @@ -12,6 +12,7 @@ #include #include +#include #include @@ -20,36 +21,33 @@ namespace py = pybind11; typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Kern; -template py::array_t generate_points_on_sphere(size_t num_points, int dim, double radius) { - - py::array_t points({(int)num_points, dim}); - + + py::array_t points({num_points, (size_t)dim}); + py::buffer_info buf = points.request(); double *ptr = static_cast(buf.ptr); - assert(num_points == buf.shape[0]); - assert(dim == buf.shape[1]); - - std::vector points_generated; - { - py::gil_scoped_release release; - points_generated = Gudhi::generate_points_on_sphere_d(num_points, dim, radius); - - for (size_t i = 0; i < num_points; i++) - for (size_t j = 0; j < (size_t)dim; j++) - ptr[i*dim+j] = points_generated[i][j]; - } + GUDHI_CHECK(num_points == buf.shape[0], "Py array first dimension not matching num_points on sphere"); + GUDHI_CHECK(dim == buf.shape[1], "Py array second dimension not matching the ambient space dimension"); + + + py::gil_scoped_release release; + auto points_generated = Gudhi::generate_points_on_sphere_d(num_points, dim, radius); + + for (size_t i = 0; i < num_points; i++) + for (int j = 0; j < dim; j++) + ptr[i*dim+j] = points_generated[i][j]; return points; } PYBIND11_MODULE(random_point_generators, m) { m.attr("__license__") = "LGPL v3"; - m.def("generate_points_on_sphere_d", &generate_points_on_sphere, + m.def("generate_points_on_sphere_d", &generate_points_on_sphere, py::arg("num_points"), py::arg("dim"), py::arg("radius") = 1, R"pbdoc( - Generate random i.i.d. points uniformly on a (d-1)-sphere in Rd + Generate random i.i.d. points uniformly on a (d-1)-sphere in R^d :param num_points: The number of points to be generated. :type num_points: unsigned integer -- cgit v1.2.3 From 154596a39b2b26c90e46ec851b8f05ea08fa47d4 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 27 Apr 2021 09:17:53 +0200 Subject: Remove make install target from python and rewrite documentation accordingly --- src/common/doc/installation.h | 4 ++++ src/python/CMakeLists.txt | 2 -- src/python/doc/installation.rst | 14 ++++---------- 3 files changed, 8 insertions(+), 12 deletions(-) (limited to 'src') diff --git a/src/common/doc/installation.h b/src/common/doc/installation.h index c2e63a24..ce393c38 100644 --- a/src/common/doc/installation.h +++ b/src/common/doc/installation.h @@ -30,6 +30,10 @@ make \endverbatim * This action may require to be in the sudoer or administrator of the machine in function of the operating system and * of CMAKE_INSTALL_PREFIX. * + * \note Python module will be compiled by the `make` command, but `make install` will not install it. Please refer to + * the Python + * module installation documentation. + * * \subsection testsuites Test suites * To test your build, run the following command in a terminal: * \verbatim make test \endverbatim diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index 73303a24..a1440cbc 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -274,8 +274,6 @@ if(PYTHONINTERP_FOUND) add_custom_target(python ALL DEPENDS gudhi.so COMMENT "Do not forget to add ${CMAKE_CURRENT_BINARY_DIR}/ to your PYTHONPATH before using examples or tests") - install(CODE "execute_process(COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/setup.py install)") - set(GUDHI_PYTHON_PATH_ENV "PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}:$ENV{PYTHONPATH}") # Documentation generation is available through sphinx - requires all modules # Make it first as sphinx test is by far the longest test which is nice when testing in parallel diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst index 66efe45a..2881055f 100644 --- a/src/python/doc/installation.rst +++ b/src/python/doc/installation.rst @@ -99,20 +99,14 @@ Or install it definitely in your Python packages folder: .. code-block:: bash cd /path-to-gudhi/build/python - # May require sudo or administrator privileges - make install + python setup.py install # add --user to the command if you do not have the permission + # Or 'pip install .' .. note:: - :code:`make install` is only a - `CMake custom targets `_ - to shortcut :code:`python setup.py install` command. It does not take into account :code:`CMAKE_INSTALL_PREFIX`. - But one can use :code:`python setup.py install ...` specific options in the python directory: - -.. code-block:: bash - - python setup.py install --prefix /home/gudhi # Install in /home/gudhi directory + But one can use + `alternate location installation `_. Test suites =========== -- cgit v1.2.3 From 44085e15a0ce83d8139db7da276d656bd6381026 Mon Sep 17 00:00:00 2001 From: Théo Lacombe Date: Tue, 27 Apr 2021 09:55:13 +0200 Subject: Typo correction - Update src/python/gudhi/wasserstein/wasserstein.py Co-authored-by: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> --- src/python/gudhi/wasserstein/wasserstein.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 8ccbe12e..926dec33 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -131,7 +131,7 @@ def _cost_and_match_essential_parts(X, Y, idX, idY, order, axis): :param Y: (n x 2) numpy.array (dgm points) :param idX: indices to consider for this one dimensional OT problem (in X) :param idY: indices to consider for this one dimensional OT problem (in Y) - :param order: exponent for Wasserstein distanc ecomputation + :param order: exponent for Wasserstein distance computation :param axis: must be 0 or 1, correspond to the coordinate which is finite. :returns: cost (float) and match for points with *one* infinite coordinate. -- cgit v1.2.3 From 29ffea359c52e2813c8e6887bda51874c36a56a5 Mon Sep 17 00:00:00 2001 From: Théo Lacombe Date: Tue, 27 Apr 2021 09:56:53 +0200 Subject: Typo - Update src/python/gudhi/wasserstein/wasserstein.py Co-authored-by: Vincent Rouvreau <10407034+VincentRouvreau@users.noreply.github.com> --- src/python/gudhi/wasserstein/wasserstein.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 926dec33..90988512 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -267,7 +267,7 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab # Check essential part and enable autodiff together if enable_autodiff and keep_essential_parts: warnings.warn('''enable_autodiff=True and keep_essential_parts=True are incompatible together. - keep_essential_parts is set to False: only points with finite coordiantes are considered + keep_essential_parts is set to False: only points with finite coordinates are considered in the following. ''') keep_essential_parts = False -- cgit v1.2.3 From c1ab7c43d4797da93aa74ba823dd1a6b28fb2cfd Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 27 Apr 2021 12:16:22 +0200 Subject: now consider (inf,inf) as belonging to the diagonal ; more tests --- src/python/gudhi/wasserstein/wasserstein.py | 18 ++++++++++---- src/python/test/test_wasserstein_distance.py | 36 +++++++++++++++++++++++++--- 2 files changed, 46 insertions(+), 8 deletions(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 3abecfe6..5095e672 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -106,6 +106,8 @@ def _get_essential_parts(a): .. note:: For instance, a[_get_essential_parts(a)[0]] returns the points in a of coordinates (-inf, x) for some finite x. Note also that points with (+inf, -inf) are not handled (points (x,y) in dgm satisfy by assumption (y >= x)). + + Finally, we consider that points with coordinates (-inf,-inf) and (+inf, +inf) belong to the diagonal. ''' if len(a): first_coord_finite = np.isfinite(a[:,0]) @@ -118,6 +120,7 @@ def _get_essential_parts(a): ess_first_type = np.where(second_coord_finite & first_coord_infinite_negative)[0] # coord (-inf, x) ess_second_type = np.where(first_coord_finite & second_coord_infinite_positive)[0] # coord (x, +inf) ess_third_type = np.where(first_coord_infinite_negative & second_coord_infinite_positive)[0] # coord (-inf, +inf) + ess_fourth_type = np.where(first_coord_infinite_negative & second_coord_infinite_negative)[0] # coord (-inf, -inf) ess_fifth_type = np.where(first_coord_infinite_positive & second_coord_infinite_positive)[0] # coord (+inf, +inf) return ess_first_type, ess_second_type, ess_third_type, ess_fourth_type, ess_fifth_type @@ -162,7 +165,7 @@ def _handle_essential_parts(X, Y, order): ess_parts_Y = _get_essential_parts(Y) # Treats the case of infinite cost (cardinalities of essential parts differ). - for u, v in zip(ess_parts_X, ess_parts_Y): + for u, v in list(zip(ess_parts_X, ess_parts_Y))[:3]: # ignore types 4 and 5 as they belong to the diagonal if len(u) != len(v): return np.inf, None @@ -174,9 +177,14 @@ def _handle_essential_parts(X, Y, order): c = c1 + c2 m = m1 + m2 - # Handle type >= 2 (both coordinates are infinite, so we essentially just align points) - for u, v in zip(ess_parts_X[2:], ess_parts_Y[2:]): - m += list(zip(u, v)) # cost is 0 + # Handle type3 (coordinates (-inf,+inf), so we just align points) + m += list(zip(ess_parts_X[2], ess_parts_Y[2])) + + # Handle type 4 and 5, considered as belonging to the diagonal so matched to (-1) with cost 0. + for z in ess_parts_X[3:]: + m += [(u, -1) for u in z] # points in X are matched to -1 + for z in ess_parts_Y[3:]: + m += [(-1, v) for v in z] # -1 is match to points in Y return c, np.array(m) @@ -334,7 +342,7 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab return ep.concatenate(dists).norms.lp(order).raw # We can also concatenate the 3 vectors to compute just one norm. - # Comptuation of the otcost using the ot.emd2 library. + # Comptuation of the ot cost using the ot.emd2 library. # Note: it is the Wasserstein distance to the power q. # The default numItermax=100000 is not sufficient for some examples with 5000 points, what is a good value? ot_cost = ot.emd2(a, b, M, numItermax=2000000) diff --git a/src/python/test/test_wasserstein_distance.py b/src/python/test/test_wasserstein_distance.py index 121ba065..3a004d77 100755 --- a/src/python/test/test_wasserstein_distance.py +++ b/src/python/test/test_wasserstein_distance.py @@ -10,6 +10,7 @@ """ from gudhi.wasserstein.wasserstein import _proj_on_diag, _finite_part, _handle_essential_parts, _get_essential_parts +from gudhi.wasserstein.wasserstein import _warn_infty from gudhi.wasserstein import wasserstein_distance as pot from gudhi.hera import wasserstein_distance as hera import numpy as np @@ -50,16 +51,17 @@ def test_handle_essential_parts(): [-np.inf, np.inf], [-np.inf, np.inf]]) diag3 = np.array([[0, 2], [3, 5], - [2, np.inf], [4, np.inf], + [2, np.inf], [4, np.inf], [6, np.inf], [-np.inf, 8], [-np.inf, 11], - [-np.inf, -np.inf], [-np.inf, -np.inf], + [-np.inf, -np.inf], [np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]]) c, m = _handle_essential_parts(diag1, diag2, order=1) assert c == pytest.approx(2, 0.0001) # Note: here c is only the cost due to essential part (thus 2, not 3) # Similarly, the matching only corresponds to essential parts. - assert np.array_equal(m, [[4, 4], [5, 5], [2, 2], [3, 3], [8, 8], [9, 9], [6, 6], [7, 7]]) + # Note that (-inf,-inf) and (+inf,+inf) coordinates are matched to the diagonal. + assert np.array_equal(m, [[4, 4], [5, 5], [2, 2], [3, 3], [8, 8], [9, 9], [6, -1], [7, -1], [-1, 6], [-1, 7]]) c, m = _handle_essential_parts(diag1, diag3, order=1) assert c == np.inf @@ -87,6 +89,13 @@ def test_get_essential_parts(): assert np.array_equal(res2[4], [] ) +def test_warn_infty(): + assert _warn_infty(matching=False)==np.inf + c, m = _warn_infty(matching=True) + assert (c == np.inf) + assert (m is None) + + def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_matching=True): diag1 = np.array([[2.7, 3.7], [9.6, 14.0], [34.2, 34.974]]) diag2 = np.array([[2.8, 4.45], [9.5, 14.1]]) @@ -143,11 +152,29 @@ def _basic_wasserstein(wasserstein_distance, delta, test_infinity=True, test_mat if test_matching and test_infinity: diag7 = np.array([[0, 3], [4, np.inf], [5, np.inf]]) + diag8 = np.array([[0,1], [0, np.inf], [-np.inf, -np.inf], [np.inf, np.inf]]) + diag9 = np.array([[-np.inf, -np.inf], [np.inf, np.inf]]) + diag10 = np.array([[0,1], [-np.inf, -np.inf], [np.inf, np.inf]]) match = wasserstein_distance(diag5, diag6, matching=True, internal_p=2., order=2.)[1] assert np.array_equal(match, [[0, -1], [-1,0], [-1, 1], [1, 2]]) match = wasserstein_distance(diag5, diag7, matching=True, internal_p=2., order=2.)[1] assert (match is None) + cost, match = wasserstein_distance(diag7, emptydiag, matching=True, internal_p=2., order=2.3) + assert (cost == np.inf) + assert (match is None) + cost, match = wasserstein_distance(emptydiag, diag7, matching=True, internal_p=2.42, order=2.) + assert (cost == np.inf) + assert (match is None) + cost, match = wasserstein_distance(diag8, diag9, matching=True, internal_p=2., order=2.) + assert (cost == np.inf) + assert (match is None) + cost, match = wasserstein_distance(diag9, diag10, matching=True, internal_p=1., order=1.) + assert (cost == 1) + assert (match == [[0, -1],[1, -1],[-1, 0], [-1, 1], [-1, 2]]) # type 4 and 5 are match to the diag anyway. + cost, match = wasserstein_distance(diag9, emptydiag, matching=True, internal_p=2., order=2.) + assert (cost == 0.) + assert (match == [[0, -1], [1, -1]]) def hera_wrap(**extra): @@ -155,15 +182,18 @@ def hera_wrap(**extra): return hera(*kargs,**kwargs,**extra) return fun + def pot_wrap(**extra): def fun(*kargs,**kwargs): return pot(*kargs,**kwargs,**extra) return fun + def test_wasserstein_distance_pot(): _basic_wasserstein(pot, 1e-15, test_infinity=False, test_matching=True) # pot with its standard args _basic_wasserstein(pot_wrap(enable_autodiff=True, keep_essential_parts=False), 1e-15, test_infinity=False, test_matching=False) + def test_wasserstein_distance_hera(): _basic_wasserstein(hera_wrap(delta=1e-12), 1e-12, test_matching=False) _basic_wasserstein(hera_wrap(delta=.1), .1, test_matching=False) -- cgit v1.2.3 From b5fc64b23f8c92377a86111f75178abcc171050d Mon Sep 17 00:00:00 2001 From: tlacombe Date: Tue, 27 Apr 2021 14:57:04 +0200 Subject: changed infty to inf in doc --- src/python/gudhi/wasserstein/wasserstein.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 5095e672..61505d03 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -83,7 +83,7 @@ def _perstot(X, order, internal_p, enable_autodiff): :returns: float, the total persistence of the diagram (that is, its distance to the empty diagram). .. note:: - Can be +infty if the diagram has an essential part (points with infinite coordinates). + Can be +inf if the diagram has an essential part (points with infinite coordinates). ''' if enable_autodiff: import eagerpy as ep @@ -203,10 +203,10 @@ def _warn_infty(matching): `matching=True`) about the returned matching being `None`. ''' if matching: - warnings.warn('Cardinality of essential parts differs. Distance (cost) is +infty, and the returned matching is None.') + warnings.warn('Cardinality of essential parts differs. Distance (cost) is +inf, and the returned matching is None.') return np.inf, None else: - warnings.warn('Cardinality of essential parts differs. Distance (cost) is +infty.') + warnings.warn('Cardinality of essential parts differs. Distance (cost) is +inf.') return np.inf -- cgit v1.2.3 From 7573e67c8c6c1bb3cd21fd8b9ffb8aa0168eb7f7 Mon Sep 17 00:00:00 2001 From: Hind Date: Tue, 27 Apr 2021 15:13:25 +0200 Subject: Remove the commented graphic part from the example (to be added to tutorial notebooks) --- .../alpha_complex_from_generated_points_example.py | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) (limited to 'src') diff --git a/src/python/example/alpha_complex_from_generated_points_example.py b/src/python/example/alpha_complex_from_generated_points_example.py index 7a07ed42..c2562d8a 100644 --- a/src/python/example/alpha_complex_from_generated_points_example.py +++ b/src/python/example/alpha_complex_from_generated_points_example.py @@ -1,10 +1,7 @@ #!/usr/bin/env python from gudhi import random_point_generators -from gudhi import AlphaComplex, SimplexTree -from gudhi import plot_persistence_barcode, plot_persistence_diagram - -import matplotlib.pyplot as plt +from gudhi import AlphaComplex """ This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. @@ -28,10 +25,6 @@ print("AlphaComplex creation from generated points") # Generate a circle: 50 points; dim 2; radius 1 points = random_point_generators.generate_points_on_sphere_d(50, 2, 1) -# Plot the generated points (to uncomment if wished) -#plt.scatter(points[:,0], points[:,1]) -#plt.show() - # Create an alpha complex alpha_complex = AlphaComplex(points=points) simplex_tree = alpha_complex.create_simplex_tree() @@ -41,12 +34,3 @@ result_str = 'Alpha complex is of dimension ' + repr(simplex_tree.dimension()) + repr(simplex_tree.num_vertices()) + ' vertices.' print(result_str) - -# Compute the persistence -diag = simplex_tree.persistence() - -# Plot the barcode and diagram (to uncomment if wished) -#plot_persistence_barcode(diag) -#plt.show() -#plot_persistence_diagram(diag) -#plt.show() -- cgit v1.2.3 From ce73a29d4fee67b7d20c213df81edf57b0de8770 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Tue, 27 Apr 2021 17:41:15 +0200 Subject: Doxygen documentation improvement --- .circleci/config.yml | 9 +- .../for_maintainers/new_gudhi_version_creation.md | 9 +- src/Alpha_complex/doc/Intro_alpha_complex.h | 14 +- .../doc/Intro_bottleneck_distance.h | 2 +- src/Cech_complex/doc/Intro_cech_complex.h | 6 +- src/Collapse/doc/intro_edge_collapse.h | 4 +- src/Doxyfile.in | 44 ++--- src/Nerve_GIC/doc/Intro_graph_induced_complex.h | 10 +- .../doc/Intro_persistent_cohomology.h | 14 +- src/Rips_complex/doc/Intro_rips_complex.h | 24 +-- src/Simplex_tree/doc/Intro_simplex_tree.h | 8 +- .../doc/Intro_spatial_searching.h | 2 +- src/Subsampling/doc/Intro_subsampling.h | 6 +- .../doc/Intro_tangential_complex.h | 4 +- src/Witness_complex/doc/Witness_complex_doc.h | 4 +- src/cmake/modules/GUDHI_doxygen_target.cmake | 47 +++++- src/cmake/modules/GUDHI_user_version_target.cmake | 11 +- src/common/doc/examples.h | 184 +++++++++++---------- src/common/doc/installation.h | 157 +++++++++--------- src/common/include/gudhi/Points_3D_off_io.h | 4 +- src/common/include/gudhi/Points_off_io.h | 4 +- 21 files changed, 302 insertions(+), 265 deletions(-) (limited to 'src') diff --git a/.circleci/config.yml b/.circleci/config.yml index 7fa9ae05..f6a875dd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -90,10 +90,15 @@ jobs: mkdir build cd build cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=OFF -DUSER_VERSION_DIR=version .. + make user_version + cd version + mkdir build + cd build + cmake -DCMAKE_BUILD_TYPE=Release -DWITH_GUDHI_EXAMPLE=OFF -DWITH_GUDHI_TEST=OFF -DWITH_GUDHI_UTILITIES=OFF -DWITH_GUDHI_PYTHON=OFF .. make doxygen 2>&1 | tee dox.log grep warning dox.log - cp dox.log version/doc/html/ - cp -R version/doc/html /tmp/doxygen + cp dox.log html/ + cp -R html /tmp/doxygen - store_artifacts: path: /tmp/doxygen diff --git a/.github/for_maintainers/new_gudhi_version_creation.md b/.github/for_maintainers/new_gudhi_version_creation.md index aadfae7d..d6c4cdd3 100644 --- a/.github/for_maintainers/new_gudhi_version_creation.md +++ b/.github/for_maintainers/new_gudhi_version_creation.md @@ -34,16 +34,21 @@ make -j 4 all && ctest -j 4 --output-on-failure ## Create the documentation ```bash mkdir gudhi.doc.@GUDHI_VERSION@ -make doxygen 2>&1 | tee dox.log && grep warning dox.log ``` ***[Check there are no error and the warnings]*** ```bash -cp -R gudhi.@GUDHI_VERSION@/doc/html gudhi.doc.@GUDHI_VERSION@/cpp cd gudhi.@GUDHI_VERSION@ rm -rf build; mkdir build; cd build cmake -DCMAKE_BUILD_TYPE=Release -DCGAL_DIR=/your/path/to/CGAL -DWITH_GUDHI_EXAMPLE=ON -DPython_ADDITIONAL_VERSIONS=3 .. +make doxygen 2>&1 | tee dox.log && grep warning dox.log +``` + +***[Check there are no error and the warnings]*** + +```bash +cp -R html ../../gudhi.doc.@GUDHI_VERSION@/cpp export LC_ALL=en_US.UTF-8 # cf. bug https://github.com/GUDHI/gudhi-devel/issues/111 make sphinx ``` diff --git a/src/Alpha_complex/doc/Intro_alpha_complex.h b/src/Alpha_complex/doc/Intro_alpha_complex.h index c068b268..f417ebb2 100644 --- a/src/Alpha_complex/doc/Intro_alpha_complex.h +++ b/src/Alpha_complex/doc/Intro_alpha_complex.h @@ -83,7 +83,7 @@ Table of Contents * * Then, it is asked to display information about the simplicial complex. * - * \include Alpha_complex/Alpha_complex_from_points.cpp + * \include Alpha_complex_from_points.cpp * * When launching: * @@ -92,7 +92,7 @@ Table of Contents * * the program output is: * - * \include Alpha_complex/alphaoffreader_for_doc_60.txt + * \include alphaoffreader_for_doc_60.txt * * \section createcomplexalgorithm Create complex algorithm * @@ -171,7 +171,7 @@ Table of Contents * * Then, it is asked to display information about the alpha complex. * - * \include Alpha_complex/Weighted_alpha_complex_from_points.cpp + * \include Weighted_alpha_complex_from_points.cpp * * When launching: * @@ -180,7 +180,7 @@ Table of Contents * * the program output is: * - * \include Alpha_complex/weightedalpha3dfrompoints_for_doc.txt + * \include weightedalpha3dfrompoints_for_doc.txt * * * \section offexample Example from OFF file @@ -190,7 +190,7 @@ Table of Contents * * Then, it is asked to display information about the alpha complex. * - * \include Alpha_complex/Alpha_complex_from_off.cpp + * \include Alpha_complex_from_off.cpp * * When launching: * @@ -199,7 +199,7 @@ Table of Contents * * the program output is: * - * \include Alpha_complex/alphaoffreader_for_doc_32.txt + * \include alphaoffreader_for_doc_32.txt * * * \section weighted3dexample 3d specific version @@ -215,7 +215,7 @@ Table of Contents * * Then, it is asked to display information about the alpha complex. * - * \include Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp + * \include Weighted_alpha_complex_3d_from_points.cpp * * The results will be the same as in \ref weightedversion . * diff --git a/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h b/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h index 2a988b4b..4f5a956c 100644 --- a/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h +++ b/src/Bottleneck_distance/doc/Intro_bottleneck_distance.h @@ -64,7 +64,7 @@ int main() { * \section bottleneckbasicexample Basic example * * This other example computes the bottleneck distance from 2 persistence diagrams: - * \include Bottleneck_distance/bottleneck_basic_example.cpp + * \include bottleneck_basic_example.cpp * * \code Bottleneck distance = 0.75 diff --git a/src/Cech_complex/doc/Intro_cech_complex.h b/src/Cech_complex/doc/Intro_cech_complex.h index 80c88dc6..698f9749 100644 --- a/src/Cech_complex/doc/Intro_cech_complex.h +++ b/src/Cech_complex/doc/Intro_cech_complex.h @@ -71,7 +71,7 @@ namespace cech_complex { * \ref rips_complex but it offers more topological guarantees. * * If the Cech_complex interfaces are not detailed enough for your need, please refer to - * + * * cech_complex_step_by_step.cpp example, where the graph construction over the Simplex_tree is more detailed. * * \subsection cechpointscloudexample Example from a point cloud @@ -81,7 +81,7 @@ namespace cech_complex { * * Then, it is asked to display information about the simplicial complex. * - * \include Cech_complex/cech_complex_example_from_points.cpp + * \include cech_complex_example_from_points.cpp * * When launching (maximal enclosing ball radius is 1., is expanded until dimension 2): * @@ -90,7 +90,7 @@ namespace cech_complex { * * the program output is: * - * \include Cech_complex/cech_complex_example_from_points_for_doc.txt + * \include cech_complex_example_from_points_for_doc.txt * */ /** @} */ // end defgroup cech_complex diff --git a/src/Collapse/doc/intro_edge_collapse.h b/src/Collapse/doc/intro_edge_collapse.h index 81edd79f..fde39707 100644 --- a/src/Collapse/doc/intro_edge_collapse.h +++ b/src/Collapse/doc/intro_edge_collapse.h @@ -81,7 +81,7 @@ namespace collapse { * Then it collapses edges and displays a new list of `Filtered_edge` (with less edges) * that will preserve the persistence homology computation. * - * \include Collapse/edge_collapse_basic_example.cpp + * \include edge_collapse_basic_example.cpp * * When launching the example: * @@ -90,7 +90,7 @@ namespace collapse { * * the program output is: * - * \include Collapse/edge_collapse_example_basic.txt + * \include edge_collapse_example_basic.txt */ /** @} */ // end defgroup strong_collapse diff --git a/src/Doxyfile.in b/src/Doxyfile.in index 49e781bd..4784b915 100644 --- a/src/Doxyfile.in +++ b/src/Doxyfile.in @@ -32,7 +32,7 @@ DOXYFILE_ENCODING = UTF-8 # title of most generated pages and in a few other places. # The default value is: My Project. -PROJECT_NAME = "GUDHI" +PROJECT_NAME = "@CMAKE_PROJECT_NAME@" # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version @@ -58,7 +58,7 @@ PROJECT_LOGO = # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. -OUTPUT_DIRECTORY = "doc/" +OUTPUT_DIRECTORY = # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and @@ -672,9 +672,9 @@ LAYOUT_FILE = # search path. Do not use file names with spaces, bibtex cannot handle them. See # also \cite for info how to create references. -CITE_BIB_FILES = biblio/bibliography.bib \ - biblio/how_to_cite_cgal.bib \ - biblio/how_to_cite_gudhi.bib +CITE_BIB_FILES = @CMAKE_SOURCE_DIR@/biblio/bibliography.bib \ + @CMAKE_SOURCE_DIR@/biblio/how_to_cite_cgal.bib \ + @CMAKE_SOURCE_DIR@/biblio/how_to_cite_gudhi.bib #--------------------------------------------------------------------------- # Configuration options related to warning and progress messages @@ -745,7 +745,7 @@ WARN_LOGFILE = # spaces. # Note: If this tag is empty the current directory is searched. -INPUT = +INPUT = @CMAKE_SOURCE_DIR@ # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses @@ -780,13 +780,14 @@ RECURSIVE = YES # Note that relative paths are relative to the directory from which doxygen is # run. -EXCLUDE = data/ \ - example/ \ - GudhUI/ \ - cmake/ \ - python/ \ - ext/ \ - README.md +EXCLUDE = @CMAKE_SOURCE_DIR@/data/ \ + @CMAKE_SOURCE_DIR@/ext/ \ + @CMAKE_SOURCE_DIR@/README.md \ + @CMAKE_SOURCE_DIR@/.github \ + @CMAKE_CURRENT_BINARY_DIR@/new_gudhi_version_creation.md \ + @GUDHI_DOXYGEN_SOURCE_PREFIX@/GudhUI/ \ + @GUDHI_DOXYGEN_SOURCE_PREFIX@/cmake/ \ + @GUDHI_DOXYGEN_SOURCE_PREFIX@/python/ \ # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded @@ -802,7 +803,7 @@ EXCLUDE_SYMLINKS = NO # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories for example use the pattern */test/* -EXCLUDE_PATTERNS = */utilities/*/*.md +EXCLUDE_PATTERNS = @GUDHI_DOXYGEN_SOURCE_PREFIX@/@GUDHI_DOXYGEN_UTILS_PATH@/*.md # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the @@ -819,10 +820,9 @@ EXCLUDE_SYMBOLS = # that contain example code fragments that are included (see the \include # command). -EXAMPLE_PATH = biblio/ \ - example/ \ - utilities/ \ - data/ +EXAMPLE_PATH = @CMAKE_SOURCE_DIR@/biblio/ \ + @CMAKE_SOURCE_DIR@/data/ \ + @GUDHI_DOXYGEN_EXAMPLE_PATH@ # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and @@ -890,7 +890,7 @@ FILTER_SOURCE_PATTERNS = # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. -USE_MDFILE_AS_MAINPAGE = doc/common/main_page.md +USE_MDFILE_AS_MAINPAGE = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/main_page.md #--------------------------------------------------------------------------- # Configuration options related to source browsing @@ -1046,7 +1046,7 @@ HTML_FILE_EXTENSION = .html # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_HEADER = doc/common/header.html +HTML_HEADER = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard @@ -1056,7 +1056,7 @@ HTML_HEADER = doc/common/header.html # that doxygen normally uses. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_FOOTER = doc/common/footer.html +HTML_FOOTER = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/footer.html # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style # sheet that is used by each HTML page. It can be used to fine-tune the look of @@ -1068,7 +1068,7 @@ HTML_FOOTER = doc/common/footer.html # obsolete. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_STYLESHEET = doc/common/stylesheet.css +HTML_STYLESHEET = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/stylesheet.css # The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user- # defined cascading style sheet that is included after the standard style sheets diff --git a/src/Nerve_GIC/doc/Intro_graph_induced_complex.h b/src/Nerve_GIC/doc/Intro_graph_induced_complex.h index f9441b24..a6098860 100644 --- a/src/Nerve_GIC/doc/Intro_graph_induced_complex.h +++ b/src/Nerve_GIC/doc/Intro_graph_induced_complex.h @@ -53,7 +53,7 @@ namespace cover_complex { * covering the height function (coordinate 2), * which are then refined into their connected components using the triangulation of the .OFF file. * - * \include Nerve_GIC/Nerve.cpp + * \include Nerve.cpp * * When launching: * @@ -62,7 +62,7 @@ namespace cover_complex { * * the program output is: * - * \include Nerve_GIC/Nerve.txt + * \include Nerve.txt * * The program also writes a file ../../data/points/human_sc.txt. The first three lines in this file are the location * of the input point cloud and the function used to compute the cover. @@ -96,7 +96,7 @@ namespace cover_complex { * comes from the triangulation of the human shape. Note that the resulting simplicial complex is in dimension 3 * in this example. * - * \include Nerve_GIC/VoronoiGIC.cpp + * \include VoronoiGIC.cpp * * When launching: * @@ -129,7 +129,7 @@ namespace cover_complex { * with automatic resolution and gain. Note that automatic threshold, resolution and gain * can be computed as well for the Nerve. * - * \include Nerve_GIC/CoordGIC.cpp + * \include CoordGIC.cpp * * When launching: * @@ -152,7 +152,7 @@ namespace cover_complex { * The function is now the first eigenfunction given by PCA, whose values * are written in a file (lucky_cat_PCA1). Threshold, resolution and gain are automatically selected as before. * - * \include Nerve_GIC/FuncGIC.cpp + * \include FuncGIC.cpp * * When launching: * diff --git a/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h b/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h index b4f9fd2c..a3613d0d 100644 --- a/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h +++ b/src/Persistent_cohomology/doc/Intro_persistent_cohomology.h @@ -131,7 +131,7 @@ namespace persistent_cohomology { We provide several example files: run these examples with -h for details on their use, and read the README file. -\li +\li Rips_complex/rips_persistence.cpp computes the Rips complex of a point cloud and outputs its persistence diagram. \code $> ./rips_persistence ../../data/points/tore3D_1307.off -r 0.25 -m 0.5 -d 3 -p 3 \endcode @@ -144,11 +144,11 @@ diagram. More details on the Rips complex utilities dedicated page. -\li +\li Persistent_cohomology/rips_multifield_persistence.cpp computes the Rips complex of a point cloud and outputs its persistence diagram with a family of field coefficients. -\li +\li Rips_complex/rips_distance_matrix_persistence.cpp computes the Rips complex of a distance matrix and outputs its persistence diagram. @@ -158,7 +158,7 @@ Please refer to data/distance_matrix/lower_triangular_distance_matrix.csv for an More details on the Rips complex utilities dedicated page. -\li +\li Rips_complex/rips_correlation_matrix_persistence.cpp computes the Rips complex of a correlation matrix and outputs its persistence diagram. @@ -169,7 +169,7 @@ Please refer to data/correlation_matrix/lower_triangular_correlation_matrix.csv More details on the Rips complex utilities dedicated page. -\li +\li Alpha_complex/alpha_complex_3d_persistence.cpp computes the persistent homology with \f$\mathbb{Z}/2\mathbb{Z}\f$ coefficients of the alpha complex on points sampling from an OFF file. \code $> ./alpha_complex_3d_persistence ../../data/points/tore3D_300.off -p 2 -m 0.45 \endcode @@ -235,7 +235,7 @@ Note that the lengths of the sides of the periodic cuboid have to be the same. +\li Alpha_complex/alpha_complex_persistence.cpp computes the persistent homology with \f$\mathbb{Z}/p\mathbb{Z}\f$ coefficients of the alpha complex on points sampling from an OFF file. \code $> ./alpha_complex_persistence -r 32 -p 2 -m 0.45 ../../data/points/tore3D_300.off \endcode @@ -248,7 +248,7 @@ Simplex_tree dim: 3 More details on the Alpha complex utilities dedicated page. -\li +\li Persistent_cohomology/plain_homology.cpp computes the plain homology of a simple simplicial complex without filtration values. diff --git a/src/Rips_complex/doc/Intro_rips_complex.h b/src/Rips_complex/doc/Intro_rips_complex.h index b2840686..3888ec8f 100644 --- a/src/Rips_complex/doc/Intro_rips_complex.h +++ b/src/Rips_complex/doc/Intro_rips_complex.h @@ -64,7 +64,7 @@ namespace rips_complex { * And so on for simplex (0,1,2,3). * * If the Rips_complex interfaces are not detailed enough for your need, please refer to - * + * * rips_persistence_step_by_step.cpp example, where the constructions of the graph and * the Simplex_tree are more detailed. * @@ -111,7 +111,7 @@ namespace rips_complex { * * Then, it is asked to display information about the simplicial complex. * - * \include Rips_complex/example_one_skeleton_rips_from_points.cpp + * \include example_one_skeleton_rips_from_points.cpp * * When launching (Rips maximal distance between 2 points is 12.0, is expanded * until dimension 1 - one skeleton graph in other words): @@ -121,7 +121,7 @@ namespace rips_complex { * * the program output is: * - * \include Rips_complex/one_skeleton_rips_for_doc.txt + * \include one_skeleton_rips_for_doc.txt * * \subsection ripsoffexample Example from OFF file * @@ -132,7 +132,7 @@ namespace rips_complex { * * Then, it is asked to display information about the Rips complex. * - * \include Rips_complex/example_rips_complex_from_off_file.cpp + * \include example_rips_complex_from_off_file.cpp * * When launching: * @@ -141,7 +141,7 @@ namespace rips_complex { * * the program output is: * - * \include Rips_complex/full_skeleton_rips_for_doc.txt + * \include full_skeleton_rips_for_doc.txt * * * \subsection sparseripspointscloudexample Example of a sparse Rips from a point cloud @@ -149,7 +149,7 @@ namespace rips_complex { * This example builds the full sparse Rips of a set of 2D Euclidean points, then prints some minimal * information about the complex. * - * \include Rips_complex/example_sparse_rips.cpp + * \include example_sparse_rips.cpp * * When launching: * @@ -172,7 +172,7 @@ namespace rips_complex { * * Then, it is asked to display information about the simplicial complex. * - * \include Rips_complex/example_one_skeleton_rips_from_distance_matrix.cpp + * \include example_one_skeleton_rips_from_distance_matrix.cpp * * When launching (Rips maximal distance between 2 points is 1.0, is expanded until dimension 1 - one skeleton graph * with other words): @@ -182,7 +182,7 @@ namespace rips_complex { * * the program output is: * - * \include Rips_complex/one_skeleton_rips_for_doc.txt + * \include one_skeleton_rips_for_doc.txt * * \subsection ripscsvdistanceexample Example from a distance matrix read in a csv file * @@ -192,7 +192,7 @@ namespace rips_complex { * * Then, it is asked to display information about the Rips complex. * - * \include Rips_complex/example_rips_complex_from_csv_distance_matrix_file.cpp + * \include example_rips_complex_from_csv_distance_matrix_file.cpp * * When launching: * @@ -201,7 +201,7 @@ namespace rips_complex { * * the program output is: * - * \include Rips_complex/full_skeleton_rips_for_doc.txt + * \include full_skeleton_rips_for_doc.txt * * * \section ripscorrelationematrix Correlation matrix @@ -213,7 +213,7 @@ namespace rips_complex { * * Then, it is asked to display information about the simplicial complex. * - * \include Rips_complex/example_one_skeleton_rips_from_correlation_matrix.cpp + * \include example_one_skeleton_rips_from_correlation_matrix.cpp * * When launching: * @@ -222,7 +222,7 @@ namespace rips_complex { * * the program output is: * - * \include Rips_complex/one_skeleton_rips_from_correlation_matrix_for_doc.txt + * \include one_skeleton_rips_from_correlation_matrix_for_doc.txt * * All the other constructions discussed for Rips complex for distance matrix can be also performed for Rips complexes * construction from correlation matrices. diff --git a/src/Simplex_tree/doc/Intro_simplex_tree.h b/src/Simplex_tree/doc/Intro_simplex_tree.h index 800879fe..ef8dec91 100644 --- a/src/Simplex_tree/doc/Intro_simplex_tree.h +++ b/src/Simplex_tree/doc/Intro_simplex_tree.h @@ -39,10 +39,10 @@ namespace Gudhi { * \subsubsection filteredcomplexessimplextreeexamples Examples * * Here is a list of simplex tree examples : - * \li + * \li * Simplex_tree/simple_simplex_tree.cpp - Simple simplex tree construction and basic function use. * - * \li + * \li * Simplex_tree/simplex_tree_from_cliques_of_graph.cpp - Simplex tree construction from cliques of graph read in * a file. * @@ -54,11 +54,11 @@ Expand the simplex tree in 3.8e-05 s. Information of the Simplex Tree: Number of vertices = 10 Number of simplices = 98 \endcode * - * \li + * \li * Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp - Simplex tree is computed and displayed * from a 3D alpha complex (Requires CGAL, GMP and GMPXX to be installed). * - * \li + * \li * Simplex_tree/graph_expansion_with_blocker.cpp - Simple simplex tree construction from a one-skeleton graph with * a simple blocker expansion method. * diff --git a/src/Spatial_searching/doc/Intro_spatial_searching.h b/src/Spatial_searching/doc/Intro_spatial_searching.h index 30805570..81c5a3aa 100644 --- a/src/Spatial_searching/doc/Intro_spatial_searching.h +++ b/src/Spatial_searching/doc/Intro_spatial_searching.h @@ -36,7 +36,7 @@ namespace spatial_searching { * * This example generates 500 random points, then performs all-near-neighbors searches, and queries for nearest and furthest neighbors using different methods. * - * \include Spatial_searching/example_spatial_searching.cpp + * \include example_spatial_searching.cpp * */ /** @} */ // end defgroup spatial_searching diff --git a/src/Subsampling/doc/Intro_subsampling.h b/src/Subsampling/doc/Intro_subsampling.h index 1c84fb2e..1c366fe6 100644 --- a/src/Subsampling/doc/Intro_subsampling.h +++ b/src/Subsampling/doc/Intro_subsampling.h @@ -32,20 +32,20 @@ namespace subsampling { * squared distance between any two points * is greater than or equal to 0.4. * - * \include Subsampling/example_sparsify_point_set.cpp + * \include example_sparsify_point_set.cpp * * \section farthestpointexamples Example: choose_n_farthest_points * * This example outputs a subset of 100 points obtained by González algorithm, * starting with a random point. * - * \include Subsampling/example_choose_n_farthest_points.cpp + * \include example_choose_n_farthest_points.cpp * * \section randompointexamples Example: pick_n_random_points * * This example outputs a subset of 100 points picked randomly. * - * \include Subsampling/example_pick_n_random_points.cpp + * \include example_pick_n_random_points.cpp */ /** @} */ // end defgroup subsampling diff --git a/src/Tangential_complex/doc/Intro_tangential_complex.h b/src/Tangential_complex/doc/Intro_tangential_complex.h index ce277185..cb8c6122 100644 --- a/src/Tangential_complex/doc/Intro_tangential_complex.h +++ b/src/Tangential_complex/doc/Intro_tangential_complex.h @@ -88,7 +88,7 @@ This example builds the Tangential complex of point set. Note that the dimension of the kernel here is dynamic, which is slower, but more flexible: the intrinsic and ambient dimensions does not have to be known at compile-time. -\include Tangential_complex/example_basic.cpp +\include example_basic.cpp \section example_with_perturb Example with perturbation @@ -97,7 +97,7 @@ by perturbing the positions of points involved in inconsistent simplices. Note that the dimension of the kernel here is static, which is the best choice when the dimensions are known at compile-time. -\include Tangential_complex/example_with_perturb.cpp +\include example_with_perturb.cpp */ /** @} */ // end defgroup tangential_complex diff --git a/src/Witness_complex/doc/Witness_complex_doc.h b/src/Witness_complex/doc/Witness_complex_doc.h index 202f4539..c66b106e 100644 --- a/src/Witness_complex/doc/Witness_complex_doc.h +++ b/src/Witness_complex/doc/Witness_complex_doc.h @@ -108,14 +108,14 @@ int main(int argc, char * const argv[]) { Here is an example of constructing a strong witness complex filtration and computing persistence on it: - \include Witness_complex/strong_witness_persistence.cpp + \include strong_witness_persistence.cpp \section witnessexample3 Example3: Computing relaxed witness complex persistence from a distance matrix In this example we compute the relaxed witness complex persistence from a given matrix of closest landmarks to each witness. Each landmark is given as the couple (index, distance). - \include Witness_complex/example_nearest_landmark_table.cpp + \include example_nearest_landmark_table.cpp */ diff --git a/src/cmake/modules/GUDHI_doxygen_target.cmake b/src/cmake/modules/GUDHI_doxygen_target.cmake index 7a84c4e0..0f80b187 100644 --- a/src/cmake/modules/GUDHI_doxygen_target.cmake +++ b/src/cmake/modules/GUDHI_doxygen_target.cmake @@ -8,14 +8,47 @@ if(DOXYGEN_FOUND) get_property(DOXYGEN_EXECUTABLE TARGET Doxygen::doxygen PROPERTY IMPORTED_LOCATION) endif() - add_custom_target(doxygen ${DOXYGEN_EXECUTABLE} ${GUDHI_USER_VERSION_DIR}/Doxyfile - WORKING_DIRECTORY ${GUDHI_USER_VERSION_DIR} - COMMENT "Generating API documentation with Doxygen in ${GUDHI_USER_VERSION_DIR}/doc/html/" VERBATIM) - - if(TARGET user_version) - # In dev version, doxygen target depends on user_version target. Not existing in user version - add_dependencies(doxygen user_version) + message("++ Project = ${CMAKE_PROJECT_NAME}") + if (CMAKE_PROJECT_NAME STREQUAL "GUDHIdev") + # Set Doxyfile.in variables for the developer version + set(GUDHI_DOXYGEN_SOURCE_PREFIX "${CMAKE_SOURCE_DIR}/src") + foreach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST}) + if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/doc/") + set(GUDHI_DOXYGEN_IMAGE_PATH "${GUDHI_DOXYGEN_IMAGE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/doc/ \\ \n") + endif() + if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/example/") + set(GUDHI_DOXYGEN_EXAMPLE_PATH "${GUDHI_DOXYGEN_EXAMPLE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/example/ \\ \n") + endif() + if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/utilities/") + set(GUDHI_DOXYGEN_EXAMPLE_PATH "${GUDHI_DOXYGEN_EXAMPLE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/${GUDHI_MODULE}/utilities/ \\ \n") + endif() + endforeach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST}) + set(GUDHI_DOXYGEN_COMMON_DOC_PATH "${GUDHI_DOXYGEN_SOURCE_PREFIX}/common/doc") + set(GUDHI_DOXYGEN_UTILS_PATH "*/utilities") + endif() + if (CMAKE_PROJECT_NAME STREQUAL "GUDHI") + # Set Doxyfile.in variables for the user version + set(GUDHI_DOXYGEN_SOURCE_PREFIX "${CMAKE_SOURCE_DIR}") + foreach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST}) + if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/doc/${GUDHI_MODULE}") + set(GUDHI_DOXYGEN_IMAGE_PATH "${GUDHI_DOXYGEN_IMAGE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/doc/${GUDHI_MODULE}/ \\ \n") + endif() + if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/example/${GUDHI_MODULE}") + set(GUDHI_DOXYGEN_EXAMPLE_PATH "${GUDHI_DOXYGEN_EXAMPLE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/example/${GUDHI_MODULE}/ \\ \n") + endif() + if(EXISTS "${GUDHI_DOXYGEN_SOURCE_PREFIX}/utilities/${GUDHI_MODULE}") + set(GUDHI_DOXYGEN_EXAMPLE_PATH "${GUDHI_DOXYGEN_EXAMPLE_PATH} ${GUDHI_DOXYGEN_SOURCE_PREFIX}/utilities/${GUDHI_MODULE}/ \\ \n") + endif() + endforeach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST}) + set(GUDHI_DOXYGEN_COMMON_DOC_PATH "${GUDHI_DOXYGEN_SOURCE_PREFIX}/doc/common") + set(GUDHI_DOXYGEN_UTILS_PATH "utilities/*") endif() + + configure_file(${GUDHI_DOXYGEN_SOURCE_PREFIX}/Doxyfile.in "${CMAKE_CURRENT_BINARY_DIR}/Doxyfile" @ONLY) + + add_custom_target(doxygen ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Generating API documentation with Doxygen in 'html' directory" VERBATIM) else() set(GUDHI_MISSING_MODULES ${GUDHI_MISSING_MODULES} "cpp-documentation" CACHE INTERNAL "GUDHI_MISSING_MODULES") endif() diff --git a/src/cmake/modules/GUDHI_user_version_target.cmake b/src/cmake/modules/GUDHI_user_version_target.cmake index e4f39aae..9e76c3d9 100644 --- a/src/cmake/modules/GUDHI_user_version_target.cmake +++ b/src/cmake/modules/GUDHI_user_version_target.cmake @@ -14,14 +14,7 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E make_directory ${GUDHI_USER_VERSION_DIR} COMMENT "user_version creation in ${GUDHI_USER_VERSION_DIR}") -foreach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST}) - set(GUDHI_DOXYGEN_IMAGE_PATH "${GUDHI_DOXYGEN_IMAGE_PATH} doc/${GUDHI_MODULE}/ \\ \n") -endforeach(GUDHI_MODULE ${GUDHI_MODULES_FULL_LIST}) - -# Generate Doxyfile for Doxygen - cf. root CMakeLists.txt for explanation -configure_file(${CMAKE_SOURCE_DIR}/src/Doxyfile.in "${CMAKE_CURRENT_BINARY_DIR}/src/Doxyfile" @ONLY) -add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E - copy ${CMAKE_CURRENT_BINARY_DIR}/src/Doxyfile ${GUDHI_USER_VERSION_DIR}/Doxyfile) +file(COPY "${CMAKE_SOURCE_DIR}/src/Doxyfile.in" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/") # Generate bib files for Doxygen - cf. root CMakeLists.txt for explanation string(TIMESTAMP GUDHI_VERSION_YEAR "%Y") @@ -48,6 +41,8 @@ add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/src/GUDHIConfig.cmake.in ${GUDHI_USER_VERSION_DIR}/GUDHIConfig.cmake.in) add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/CMakeGUDHIVersion.txt ${GUDHI_USER_VERSION_DIR}/CMakeGUDHIVersion.txt) +add_custom_command(TARGET user_version PRE_BUILD COMMAND ${CMAKE_COMMAND} -E + copy ${CMAKE_SOURCE_DIR}/src/Doxyfile.in ${GUDHI_USER_VERSION_DIR}/Doxyfile.in) # As cython generates .cpp files in source, we have to copy all except cpp files from python directory file(GLOB_RECURSE PYTHON_FILES ${CMAKE_SOURCE_DIR}/${GUDHI_PYTHON_PATH}/*) diff --git a/src/common/doc/examples.h b/src/common/doc/examples.h index 474f8699..b557727b 100644 --- a/src/common/doc/examples.h +++ b/src/common/doc/examples.h @@ -1,96 +1,98 @@ // List of GUDHI examples - Doxygen needs at least a file tag to analyse comments // In user_version, `find . -name "*.cpp"` in example and utilities folders /*! @file Examples - * @example Alpha_complex/Alpha_complex_from_off.cpp - * @example Alpha_complex/Alpha_complex_from_points.cpp - * @example Bottleneck_distance/bottleneck_basic_example.cpp - * @example Bottleneck_distance/alpha_rips_persistence_bottleneck_distance.cpp - * @example Witness_complex/example_nearest_landmark_table.cpp - * @example Witness_complex/example_witness_complex_off.cpp - * @example Witness_complex/example_witness_complex_sphere.cpp - * @example Witness_complex/example_strong_witness_complex_off.cpp - * @example Simplex_tree/mini_simplex_tree.cpp - * @example Simplex_tree/graph_expansion_with_blocker.cpp - * @example Simplex_tree/simple_simplex_tree.cpp - * @example Simplex_tree/simplex_tree_from_cliques_of_graph.cpp - * @example Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp - * @example Simplex_tree/cech_complex_cgal_mini_sphere_3d.cpp - * @example Persistent_cohomology/plain_homology.cpp - * @example Persistent_cohomology/persistence_from_file.cpp - * @example Persistent_cohomology/rips_persistence_step_by_step.cpp - * @example Persistent_cohomology/rips_persistence_via_boundary_matrix.cpp - * @example Persistent_cohomology/custom_persistence_sort.cpp - * @example Persistent_cohomology/persistence_from_simple_simplex_tree.cpp - * @example Persistent_cohomology/rips_multifield_persistence.cpp - * @example Skeleton_blocker/Skeleton_blocker_from_simplices.cpp - * @example Skeleton_blocker/Skeleton_blocker_iteration.cpp - * @example Skeleton_blocker/Skeleton_blocker_link.cpp - * @example Contraction/Garland_heckbert.cpp - * @example Contraction/Rips_contraction.cpp - * @example Bitmap_cubical_complex/Random_bitmap_cubical_complex.cpp - * @example common/example_CGAL_3D_points_off_reader.cpp - * @example common/example_vector_double_points_off_reader.cpp - * @example common/example_CGAL_points_off_reader.cpp - * @example Rips_complex/example_one_skeleton_rips_from_distance_matrix.cpp - * @example Rips_complex/example_one_skeleton_rips_from_points.cpp - * @example Rips_complex/example_rips_complex_from_csv_distance_matrix_file.cpp - * @example Rips_complex/example_rips_complex_from_off_file.cpp - * @example Persistence_representations/persistence_intervals.cpp - * @example Persistence_representations/persistence_vectors.cpp - * @example Persistence_representations/persistence_heat_maps.cpp - * @example Persistence_representations/persistence_landscape_on_grid.cpp - * @example Persistence_representations/persistence_landscape.cpp - * @example Tangential_complex/example_basic.cpp - * @example Tangential_complex/example_with_perturb.cpp - * @example Subsampling/example_custom_distance.cpp - * @example Subsampling/example_choose_n_farthest_points.cpp - * @example Subsampling/example_sparsify_point_set.cpp - * @example Subsampling/example_pick_n_random_points.cpp - * @example Nerve_GIC/CoordGIC.cpp - * @example Nerve_GIC/Nerve.cpp - * @example Nerve_GIC/FuncGIC.cpp - * @example Nerve_GIC/VoronoiGIC.cpp - * @example Spatial_searching/example_spatial_searching.cpp - * @example Alpha_complex/alpha_complex_3d_persistence.cpp - * @example Alpha_complex/alpha_complex_persistence.cpp - * @example Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp - * @example Bottleneck_distance/bottleneck_distance.cpp - * @example Witness_complex/weak_witness_persistence.cpp - * @example Witness_complex/strong_witness_persistence.cpp - * @example Bitmap_cubical_complex/cubical_complex_persistence.cpp - * @example Bitmap_cubical_complex/periodic_cubical_complex_persistence.cpp - * @example common/off_file_from_shape_generator.cpp - * @example Rips_complex/rips_distance_matrix_persistence.cpp - * @example Rips_complex/rips_persistence.cpp - * @example Persistence_representations/persistence_landscapes_on_grid/create_landscapes_on_grid.cpp - * @example Persistence_representations/persistence_landscapes_on_grid/plot_landscapes_on_grid.cpp - * @example Persistence_representations/persistence_landscapes_on_grid/compute_scalar_product_of_landscapes_on_grid.cpp - * @example Persistence_representations/persistence_landscapes_on_grid/compute_distance_of_landscapes_on_grid.cpp - * @example Persistence_representations/persistence_landscapes_on_grid/average_landscapes_on_grid.cpp - * @example Persistence_representations/persistence_intervals/compute_birth_death_range_in_persistence_diagram.cpp - * @example Persistence_representations/persistence_intervals/compute_number_of_dominant_intervals.cpp - * @example Persistence_representations/persistence_intervals/plot_persistence_Betti_numbers.cpp - * @example Persistence_representations/persistence_intervals/plot_persistence_intervals.cpp - * @example Persistence_representations/persistence_intervals/plot_histogram_of_intervals_lengths.cpp - * @example Persistence_representations/persistence_intervals/compute_bottleneck_distance.cpp - * @example Persistence_representations/persistence_heat_maps/create_pssk.cpp - * @example Persistence_representations/persistence_heat_maps/create_p_h_m_weighted_by_arctan_of_their_persistence.cpp - * @example Persistence_representations/persistence_heat_maps/create_p_h_m_weighted_by_squared_diag_distance.cpp - * @example Persistence_representations/persistence_heat_maps/compute_distance_of_persistence_heat_maps.cpp - * @example Persistence_representations/persistence_heat_maps/compute_scalar_product_of_persistence_heat_maps.cpp - * @example Persistence_representations/persistence_heat_maps/create_p_h_m_weighted_by_distance_from_diagonal.cpp - * @example Persistence_representations/persistence_heat_maps/average_persistence_heat_maps.cpp - * @example Persistence_representations/persistence_heat_maps/plot_persistence_heat_map.cpp - * @example Persistence_representations/persistence_heat_maps/create_persistence_heat_maps.cpp - * @example Persistence_representations/persistence_vectors/plot_persistence_vectors.cpp - * @example Persistence_representations/persistence_vectors/compute_distance_of_persistence_vectors.cpp - * @example Persistence_representations/persistence_vectors/average_persistence_vectors.cpp - * @example Persistence_representations/persistence_vectors/create_persistence_vectors.cpp - * @example Persistence_representations/persistence_vectors/compute_scalar_product_of_persistence_vectors.cpp - * @example Persistence_representations/persistence_landscapes/average_landscapes.cpp - * @example Persistence_representations/persistence_landscapes/compute_scalar_product_of_landscapes.cpp - * @example Persistence_representations/persistence_landscapes/create_landscapes.cpp - * @example Persistence_representations/persistence_landscapes/compute_distance_of_landscapes.cpp - * @example Persistence_representations/persistence_landscapes/plot_landscapes.cpp + * \section Alpha_complex_examples Alpha complex + * @example Alpha_complex_from_off.cpp + * @example Alpha_complex_from_points.cpp + * \section bottleneck_examples bottleneck + * @example bottleneck_basic_example.cpp + * @example alpha_rips_persistence_bottleneck_distance.cpp + * @example example_nearest_landmark_table.cpp + * @example example_witness_complex_off.cpp + * @example example_witness_complex_sphere.cpp + * @example example_strong_witness_complex_off.cpp + * @example mini_simplex_tree.cpp + * @example graph_expansion_with_blocker.cpp + * @example simple_simplex_tree.cpp + * @example simplex_tree_from_cliques_of_graph.cpp + * @example example_alpha_shapes_3_simplex_tree_from_off_file.cpp + * @example cech_complex_cgal_mini_sphere_3d.cpp + * @example plain_homology.cpp + * @example persistence_from_file.cpp + * @example rips_persistence_step_by_step.cpp + * @example rips_persistence_via_boundary_matrix.cpp + * @example custom_persistence_sort.cpp + * @example persistence_from_simple_simplex_tree.cpp + * @example rips_multifield_persistence.cpp + * @example Skeleton_blocker_from_simplices.cpp + * @example Skeleton_blocker_iteration.cpp + * @example Skeleton_blocker_link.cpp + * @example Garland_heckbert.cpp + * @example Rips_contraction.cpp + * @example Random_bitmap_cubical_complex.cpp + * @example example_CGAL_3D_points_off_reader.cpp + * @example example_vector_double_points_off_reader.cpp + * @example example_CGAL_points_off_reader.cpp + * @example example_one_skeleton_rips_from_distance_matrix.cpp + * @example example_one_skeleton_rips_from_points.cpp + * @example example_rips_complex_from_csv_distance_matrix_file.cpp + * @example example_rips_complex_from_off_file.cpp + * @example persistence_intervals.cpp + * @example persistence_vectors.cpp + * @example persistence_heat_maps.cpp + * @example persistence_landscape_on_grid.cpp + * @example persistence_landscape.cpp + * @example example_basic.cpp + * @example example_with_perturb.cpp + * @example example_custom_distance.cpp + * @example example_choose_n_farthest_points.cpp + * @example example_sparsify_point_set.cpp + * @example example_pick_n_random_points.cpp + * @example CoordGIC.cpp + * @example Nerve.cpp + * @example FuncGIC.cpp + * @example VoronoiGIC.cpp + * @example example_spatial_searching.cpp + * @example alpha_complex_3d_persistence.cpp + * @example alpha_complex_persistence.cpp + * @example Weighted_alpha_complex_3d_from_points.cpp + * @example bottleneck_distance.cpp + * @example weak_witness_persistence.cpp + * @example strong_witness_persistence.cpp + * @example cubical_complex_persistence.cpp + * @example periodic_cubical_complex_persistence.cpp + * @example off_file_from_shape_generator.cpp + * @example rips_distance_matrix_persistence.cpp + * @example rips_persistence.cpp + * @example persistence_landscapes_on_grid/create_landscapes_on_grid.cpp + * @example persistence_landscapes_on_grid/plot_landscapes_on_grid.cpp + * @example persistence_landscapes_on_grid/compute_scalar_product_of_landscapes_on_grid.cpp + * @example persistence_landscapes_on_grid/compute_distance_of_landscapes_on_grid.cpp + * @example persistence_landscapes_on_grid/average_landscapes_on_grid.cpp + * @example persistence_intervals/compute_birth_death_range_in_persistence_diagram.cpp + * @example persistence_intervals/compute_number_of_dominant_intervals.cpp + * @example persistence_intervals/plot_persistence_Betti_numbers.cpp + * @example persistence_intervals/plot_persistence_intervals.cpp + * @example persistence_intervals/plot_histogram_of_intervals_lengths.cpp + * @example persistence_intervals/compute_bottleneck_distance.cpp + * @example persistence_heat_maps/create_pssk.cpp + * @example persistence_heat_maps/create_p_h_m_weighted_by_arctan_of_their_persistence.cpp + * @example persistence_heat_maps/create_p_h_m_weighted_by_squared_diag_distance.cpp + * @example persistence_heat_maps/compute_distance_of_persistence_heat_maps.cpp + * @example persistence_heat_maps/compute_scalar_product_of_persistence_heat_maps.cpp + * @example persistence_heat_maps/create_p_h_m_weighted_by_distance_from_diagonal.cpp + * @example persistence_heat_maps/average_persistence_heat_maps.cpp + * @example persistence_heat_maps/plot_persistence_heat_map.cpp + * @example persistence_heat_maps/create_persistence_heat_maps.cpp + * @example persistence_vectors/plot_persistence_vectors.cpp + * @example persistence_vectors/compute_distance_of_persistence_vectors.cpp + * @example persistence_vectors/average_persistence_vectors.cpp + * @example persistence_vectors/create_persistence_vectors.cpp + * @example persistence_vectors/compute_scalar_product_of_persistence_vectors.cpp + * @example persistence_landscapes/average_landscapes.cpp + * @example persistence_landscapes/compute_scalar_product_of_landscapes.cpp + * @example persistence_landscapes/create_landscapes.cpp + * @example persistence_landscapes/compute_distance_of_landscapes.cpp + * @example persistence_landscapes/plot_landscapes.cpp */ diff --git a/src/common/doc/installation.h b/src/common/doc/installation.h index c2e63a24..313184b6 100644 --- a/src/common/doc/installation.h +++ b/src/common/doc/installation.h @@ -40,11 +40,8 @@ make \endverbatim * \subsection documentationgeneration Documentation * To generate the documentation, Doxygen is required. * Run the following command in a terminal: -\verbatim -make doxygen -# Documentation will be generated in the folder YYYY-MM-DD-hh-mm-ss_GUDHI_X.Y.Z/doc/html/ -# You can customize the directory name by calling `cmake -DUSER_VERSION_DIR=/my/custom/folder` -\endverbatim + * \verbatim make doxygen \endverbatim + * Documentation will be generated in a folder named html. * * \subsection helloworld Hello world ! * The Hello world for GUDHI @@ -57,7 +54,7 @@ make doxygen * * The following example requires the GNU Multiple Precision Arithmetic * Library (GMP) and will not be built if GMP is not installed: - * \li + * \li * Persistent_cohomology/rips_multifield_persistence.cpp * * Having GMP version 4.2 or higher installed is recommended. @@ -75,55 +72,55 @@ make doxygen * * The following examples/utilities require the Computational Geometry Algorithms * Library (CGAL \cite cgal:eb-19b) and will not be built if CGAL version 4.11.0 or higher is not installed: - * \li + * \li * Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp - * \li + * \li * Witness_complex/strong_witness_persistence.cpp - * \li + * \li * Witness_complex/weak_witness_persistence.cpp - * \li + * \li * Witness_complex/example_strong_witness_complex_off.cpp - * \li + * \li * Witness_complex/example_witness_complex_off.cpp - * \li + * \li * Witness_complex/example_witness_complex_sphere.cpp - * \li + * \li * Alpha_complex/Alpha_complex_from_off.cpp - * \li + * \li * Alpha_complex/Alpha_complex_from_points.cpp - * \li + * \li * Alpha_complex/alpha_complex_persistence.cpp - * \li + * \li * Persistent_cohomology/custom_persistence_sort.cpp - * \li + * \li * Bottleneck_distance/alpha_rips_persistence_bottleneck_distance.cpp.cpp - * \li + * \li * Bottleneck_distance/bottleneck_basic_example.cpp - * \li + * \li * Bottleneck_distance/bottleneck_distance.cpp - * \li + * \li * Nerve_GIC/CoordGIC.cpp - * \li + * \li * Nerve_GIC/FuncGIC.cpp - * \li + * \li * Nerve_GIC/Nerve.cpp - * \li + * \li * Nerve_GIC/VoronoiGIC.cpp - * \li + * \li * Spatial_searching/example_spatial_searching.cpp - * \li + * \li * Subsampling/example_choose_n_farthest_points.cpp - * \li + * \li * Subsampling/example_pick_n_random_points.cpp - * \li + * \li * Subsampling/example_sparsify_point_set.cpp - * \li + * \li * Tangential_complex/example_basic.cpp - * \li + * \li * Tangential_complex/example_with_perturb.cpp - * \li + * \li * Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp - * \li + * \li * Alpha_complex/alpha_complex_3d_persistence.cpp * * \subsection eigen Eigen @@ -133,41 +130,41 @@ make doxygen * * The following examples/utilities require the Eigen and will not be * built if Eigen is not installed: - * \li + * \li * Alpha_complex/Alpha_complex_from_off.cpp - * \li + * \li * Alpha_complex/Alpha_complex_from_points.cpp - * \li + * \li * Alpha_complex/alpha_complex_persistence.cpp - * \li + * \li * Alpha_complex/alpha_complex_3d_persistence.cpp - * \li + * \li * Alpha_complex/Weighted_alpha_complex_3d_from_points.cpp - * \li + * \li * Bottleneck_distance/alpha_rips_persistence_bottleneck_distance.cpp.cpp - * \li + * \li * Persistent_cohomology/custom_persistence_sort.cpp - * \li + * \li * Spatial_searching/example_spatial_searching.cpp - * \li + * \li * Subsampling/example_choose_n_farthest_points.cpp - * \li + * \li * Subsampling/example_pick_n_random_points.cpp - * \li + * \li * Subsampling/example_sparsify_point_set.cpp - * \li + * \li * Tangential_complex/example_basic.cpp - * \li + * \li * Tangential_complex/example_with_perturb.cpp - * \li + * \li * Witness_complex/strong_witness_persistence.cpp - * \li + * \li * Witness_complex/weak_witness_persistence.cpp - * \li + * \li * Witness_complex/example_strong_witness_complex_off.cpp - * \li + * \li * Witness_complex/example_witness_complex_off.cpp - * \li + * \li * Witness_complex/example_witness_complex_sphere.cpp * * \subsection tbb Threading Building Blocks @@ -178,67 +175,67 @@ make doxygen * Having Intel® TBB installed is recommended to parallelize and accelerate some GUDHI computations. * * The following examples/utilities are using Intel® TBB if installed: - * \li + * \li * Alpha_complex/Alpha_complex_from_off.cpp - * \li + * \li * Alpha_complex/Alpha_complex_from_points.cpp - * \li + * \li * Alpha_complex/alpha_complex_3d_persistence.cpp - * \li + * \li * Alpha_complex/alpha_complex_persistence.cpp - * \li + * \li * Bitmap_cubical_complex/cubical_complex_persistence.cpp - * \li + * \li * Bitmap_cubical_complex/periodic_cubical_complex_persistence.cpp - * \li + * \li * Bitmap_cubical_complex/Random_bitmap_cubical_complex.cpp - * \li + * \li * Nerve_GIC/CoordGIC.cpp - * \li + * \li * Nerve_GIC/FuncGIC.cpp - * \li + * \li * Nerve_GIC/Nerve.cpp - * \li + * \li * Nerve_GIC/VoronoiGIC.cpp - * \li + * \li * Simplex_tree/simple_simplex_tree.cpp - * \li + * \li * Simplex_tree/example_alpha_shapes_3_simplex_tree_from_off_file.cpp - * \li + * \li * Simplex_tree/simplex_tree_from_cliques_of_graph.cpp - * \li + * \li * Simplex_tree/graph_expansion_with_blocker.cpp - * \li + * \li * Persistent_cohomology/alpha_complex_3d_persistence.cpp - * \li + * \li * Persistent_cohomology/alpha_complex_persistence.cpp - * \li + * \li * Persistent_cohomology/rips_persistence_via_boundary_matrix.cpp - * \li + * \li * Persistent_cohomology/persistence_from_file.cpp - * \li + * \li * Persistent_cohomology/persistence_from_simple_simplex_tree.cpp - * \li + * \li * Persistent_cohomology/plain_homology.cpp - * \li + * \li * Persistent_cohomology/rips_multifield_persistence.cpp - * \li + * \li * Persistent_cohomology/rips_persistence_step_by_step.cpp - * \li + * \li * Persistent_cohomology/custom_persistence_sort.cpp - * \li + * \li * Rips_complex/example_one_skeleton_rips_from_points.cpp - * \li + * \li * Rips_complex/example_rips_complex_from_off_file.cpp - * \li + * \li * Rips_complex/rips_distance_matrix_persistence.cpp - * \li + * \li * Rips_complex/rips_persistence.cpp - * \li + * \li * Witness_complex/strong_witness_persistence.cpp - * \li + * \li * Witness_complex/weak_witness_persistence.cpp - * \li + * \li * Witness_complex/example_nearest_landmark_table.cpp * * \section Contributions Bug reports and contributions diff --git a/src/common/include/gudhi/Points_3D_off_io.h b/src/common/include/gudhi/Points_3D_off_io.h index 39b79c96..4f74fd4b 100644 --- a/src/common/include/gudhi/Points_3D_off_io.h +++ b/src/common/include/gudhi/Points_3D_off_io.h @@ -125,7 +125,7 @@ class Points_3D_off_visitor_reader { * This example loads points from an OFF file and builds a vector of CGAL points in dimension 3. * Then, it is asked to display the points. * - * @include common/example_CGAL_3D_points_off_reader.cpp + * @include example_CGAL_3D_points_off_reader.cpp * * When launching: * @@ -134,7 +134,7 @@ class Points_3D_off_visitor_reader { * * the program output is: * - * @include common/cgal3Doffreader_result.txt + * @include cgal3Doffreader_result.txt */ template class Points_3D_off_reader { diff --git a/src/common/include/gudhi/Points_off_io.h b/src/common/include/gudhi/Points_off_io.h index 9dc40568..3aa8afd8 100644 --- a/src/common/include/gudhi/Points_off_io.h +++ b/src/common/include/gudhi/Points_off_io.h @@ -107,7 +107,7 @@ class Points_off_visitor_reader { * This example loads points from an OFF file and builds a vector of points (vector of double). * Then, it is asked to display the points. * - * \include common/example_vector_double_points_off_reader.cpp + * \include example_vector_double_points_off_reader.cpp * * When launching: * @@ -116,7 +116,7 @@ class Points_off_visitor_reader { * * the program outputs a file ../../data/points/alphacomplexdoc.off.txt: * - * \include common/vectordoubleoffreader_result.txt + * \include vectordoubleoffreader_result.txt */ template class Points_off_reader { -- cgit v1.2.3 From df9daf64aa7623ac188a5842a90162d65a54b07e Mon Sep 17 00:00:00 2001 From: Hind Date: Tue, 27 Apr 2021 18:01:00 +0200 Subject: Rename and reorganize point generators module --- src/python/CMakeLists.txt | 10 ++-- .../alpha_complex_from_generated_points_example.py | 36 ------------- ...plex_from_generated_points_on_sphere_example.py | 36 +++++++++++++ src/python/gudhi/datasets/generators/__init__.py | 0 src/python/gudhi/datasets/generators/sphere.cc | 61 ++++++++++++++++++++++ src/python/gudhi/random_point_generators.cc | 61 ---------------------- 6 files changed, 103 insertions(+), 101 deletions(-) delete mode 100644 src/python/example/alpha_complex_from_generated_points_example.py create mode 100644 src/python/example/alpha_complex_from_generated_points_on_sphere_example.py create mode 100644 src/python/gudhi/datasets/generators/__init__.py create mode 100644 src/python/gudhi/datasets/generators/sphere.cc delete mode 100644 src/python/gudhi/random_point_generators.cc (limited to 'src') diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index 87f10a1a..bcdd0741 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -46,7 +46,7 @@ if(PYTHONINTERP_FOUND) set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'hera', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'clustering', ") - set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'random_point_generators', ") + set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'datasets/generators', ") endif() if(CYTHON_FOUND) set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'off_reader', ") @@ -152,7 +152,7 @@ if(PYTHONINTERP_FOUND) set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/wasserstein', ") set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/bottleneck', ") if (NOT CGAL_VERSION VERSION_LESS 4.11.0) - set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'random_point_generators', ") + set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'datasets/generators/sphere', ") set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'bottleneck', ") set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'nerve_gic', ") endif () @@ -264,6 +264,8 @@ if(PYTHONINTERP_FOUND) file(COPY "gudhi/weighted_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/dtm_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/hera/__init__.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/hera") + file(COPY "gudhi/datasets/generators/__init__.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/datasets/generators") + # Some files for pip package file(COPY "introduction.rst" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/") @@ -427,10 +429,10 @@ if(PYTHONINTERP_FOUND) WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}" ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_complex_from_points_example.py") - add_test(NAME alpha_complex_from_generated_points_example_py_test + add_test(NAME alpha_complex_from_generated_points_on_sphere_example_py_test WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}" - ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_complex_from_generated_points_example.py") + ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/alpha_complex_from_generated_points_on_sphere_example.py") add_test(NAME alpha_complex_diagram_persistence_from_off_file_example_py_test WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}" diff --git a/src/python/example/alpha_complex_from_generated_points_example.py b/src/python/example/alpha_complex_from_generated_points_example.py deleted file mode 100644 index c2562d8a..00000000 --- a/src/python/example/alpha_complex_from_generated_points_example.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python - -from gudhi import random_point_generators -from gudhi import AlphaComplex - - -""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. - See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. - Author(s): Hind Montassif - - Copyright (C) 2021 Inria - - Modification(s): - - YYYY/MM Author: Description of the modification -""" - -__author__ = "Hind Montassif" -__copyright__ = "Copyright (C) 2021 Inria" -__license__ = "MIT" - -print("#####################################################################") -print("AlphaComplex creation from generated points") - - -# Generate a circle: 50 points; dim 2; radius 1 -points = random_point_generators.generate_points_on_sphere_d(50, 2, 1) - -# Create an alpha complex -alpha_complex = AlphaComplex(points=points) -simplex_tree = alpha_complex.create_simplex_tree() - -result_str = 'Alpha complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \ - repr(simplex_tree.num_simplices()) + ' simplices - ' + \ - repr(simplex_tree.num_vertices()) + ' vertices.' -print(result_str) - diff --git a/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py b/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py new file mode 100644 index 00000000..2de9ec08 --- /dev/null +++ b/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python + +from gudhi.datasets.generators import sphere +from gudhi import AlphaComplex + + +""" This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + Author(s): Hind Montassif + + Copyright (C) 2021 Inria + + Modification(s): + - YYYY/MM Author: Description of the modification +""" + +__author__ = "Hind Montassif" +__copyright__ = "Copyright (C) 2021 Inria" +__license__ = "MIT" + +print("#####################################################################") +print("AlphaComplex creation from generated points on sphere") + + +# Generate a circle: 50 points; dim 2; radius 1 +points = sphere.generate_random_points(50, 2, 1) + +# Create an alpha complex +alpha_complex = AlphaComplex(points=points) +simplex_tree = alpha_complex.create_simplex_tree() + +result_str = 'Alpha complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \ + repr(simplex_tree.num_simplices()) + ' simplices - ' + \ + repr(simplex_tree.num_vertices()) + ' vertices.' +print(result_str) + diff --git a/src/python/gudhi/datasets/generators/__init__.py b/src/python/gudhi/datasets/generators/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/python/gudhi/datasets/generators/sphere.cc b/src/python/gudhi/datasets/generators/sphere.cc new file mode 100644 index 00000000..79392ef0 --- /dev/null +++ b/src/python/gudhi/datasets/generators/sphere.cc @@ -0,0 +1,61 @@ +/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + * Author(s): Hind Montassif + * + * Copyright (C) 2021 Inria + * + * Modification(s): + * - YYYY/MM Author: Description of the modification + */ + +#include +#include + +#include +#include + +#include + +namespace py = pybind11; + + +typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Kern; + +py::array_t generate_points_on_sphere(size_t num_points, int dim, double radius) { + + py::array_t points({num_points, (size_t)dim}); + + py::buffer_info buf = points.request(); + double *ptr = static_cast(buf.ptr); + + GUDHI_CHECK(num_points == buf.shape[0], "Py array first dimension not matching num_points on sphere"); + GUDHI_CHECK(dim == buf.shape[1], "Py array second dimension not matching the ambient space dimension"); + + + py::gil_scoped_release release; + auto points_generated = Gudhi::generate_points_on_sphere_d(num_points, dim, radius); + + for (size_t i = 0; i < num_points; i++) + for (int j = 0; j < dim; j++) + ptr[i*dim+j] = points_generated[i][j]; + + return points; +} + +PYBIND11_MODULE(sphere, m) { + m.attr("__license__") = "LGPL v3"; + m.def("generate_random_points", &generate_points_on_sphere, + py::arg("num_points"), py::arg("dim"), py::arg("radius") = 1, + R"pbdoc( + Generate random i.i.d. points uniformly on a (d-1)-sphere in R^d + + :param num_points: The number of points to be generated. + :type num_points: unsigned integer + :param dim: The dimension. + :type dim: integer + :param radius: The radius. + :type radius: float + :rtype: numpy array of float + :returns: the generated points on a sphere. + )pbdoc"); +} diff --git a/src/python/gudhi/random_point_generators.cc b/src/python/gudhi/random_point_generators.cc deleted file mode 100644 index 6eb40429..00000000 --- a/src/python/gudhi/random_point_generators.cc +++ /dev/null @@ -1,61 +0,0 @@ -/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. - * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. - * Author(s): Hind Montassif - * - * Copyright (C) 2021 Inria - * - * Modification(s): - * - YYYY/MM Author: Description of the modification - */ - -#include -#include - -#include -#include - -#include - -namespace py = pybind11; - - -typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Kern; - -py::array_t generate_points_on_sphere(size_t num_points, int dim, double radius) { - - py::array_t points({num_points, (size_t)dim}); - - py::buffer_info buf = points.request(); - double *ptr = static_cast(buf.ptr); - - GUDHI_CHECK(num_points == buf.shape[0], "Py array first dimension not matching num_points on sphere"); - GUDHI_CHECK(dim == buf.shape[1], "Py array second dimension not matching the ambient space dimension"); - - - py::gil_scoped_release release; - auto points_generated = Gudhi::generate_points_on_sphere_d(num_points, dim, radius); - - for (size_t i = 0; i < num_points; i++) - for (int j = 0; j < dim; j++) - ptr[i*dim+j] = points_generated[i][j]; - - return points; -} - -PYBIND11_MODULE(random_point_generators, m) { - m.attr("__license__") = "LGPL v3"; - m.def("generate_points_on_sphere_d", &generate_points_on_sphere, - py::arg("num_points"), py::arg("dim"), py::arg("radius") = 1, - R"pbdoc( - Generate random i.i.d. points uniformly on a (d-1)-sphere in R^d - - :param num_points: The number of points to be generated. - :type num_points: unsigned integer - :param dim: The dimension. - :type dim: integer - :param radius: The radius. - :type radius: float - :rtype: numpy array of float - :returns: the generated points on a sphere. - )pbdoc"); -} -- cgit v1.2.3 From e90121bf0c62de00e0d10548f833632f3dfdf799 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Wed, 28 Apr 2021 08:09:21 +0200 Subject: update Doxyfile --- src/Doxyfile.in | 457 ++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 311 insertions(+), 146 deletions(-) (limited to 'src') diff --git a/src/Doxyfile.in b/src/Doxyfile.in index 4784b915..ae8db1a3 100644 --- a/src/Doxyfile.in +++ b/src/Doxyfile.in @@ -1,4 +1,4 @@ -# Doxyfile 1.8.6 +# Doxyfile 1.8.13 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. @@ -46,10 +46,10 @@ PROJECT_NUMBER = "@GUDHI_VERSION@" PROJECT_BRIEF = "C++ library for Topological Data Analysis (TDA) and Higher Dimensional Geometry Understanding." -# With the PROJECT_LOGO tag one can specify an logo or icon that is included in -# the documentation. The maximum height of the logo should not exceed 55 pixels -# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo -# to the output directory. +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. PROJECT_LOGO = @@ -60,7 +60,7 @@ PROJECT_LOGO = OUTPUT_DIRECTORY = -# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and # will distribute the generated files over these directories. Enabling this # option can be useful when feeding doxygen a huge amount of source files, where @@ -70,6 +70,14 @@ OUTPUT_DIRECTORY = CREATE_SUBDIRS = NO +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. @@ -85,14 +93,14 @@ CREATE_SUBDIRS = NO OUTPUT_LANGUAGE = English -# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member # descriptions after the members that are listed in the file and class # documentation (similar to Javadoc). Set to NO to disable this. # The default value is: YES. BRIEF_MEMBER_DESC = YES -# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief # description of a member or function before the detailed description # # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the @@ -127,7 +135,7 @@ ALWAYS_DETAILED_SEC = NO INLINE_INHERITED_MEMB = NO -# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path # before files name in the file list and in the header files. If set to NO the # shortest path that makes the file name unique will be used # The default value is: YES. @@ -153,7 +161,8 @@ STRIP_FROM_PATH = # specify the list of include paths that are normally passed to the compiler # using the -I flag. -STRIP_FROM_INC_PATH = include concept +STRIP_FROM_INC_PATH = include \ + concept # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but # less readable) file names. This can be useful is your file systems doesn't @@ -197,9 +206,9 @@ MULTILINE_CPP_IS_BRIEF = NO INHERIT_DOCS = YES -# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a -# new page for each member. If set to NO, the documentation of a member will be -# part of the file/class/namespace that contains it. +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. # The default value is: NO. SEPARATE_MEMBER_PAGES = NO @@ -261,11 +270,14 @@ OPTIMIZE_OUTPUT_VHDL = NO # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and # language is one of the parsers supported by doxygen: IDL, Java, Javascript, -# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make -# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C -# (default is Fortran), use: inc=Fortran f=C. +# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: +# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: +# Fortran. In the later case the parser tries to guess whether the code is fixed +# or free formatted code, this is the default for Fortran type files), VHDL. For +# instance to make doxygen treat .inc files as Fortran files (default is PHP), +# and .f files as C (default is Fortran), use: inc=Fortran f=C. # -# Note For files without extension you can use no_extension as a placeholder. +# Note: For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise # the files are not read by doxygen. @@ -282,10 +294,19 @@ EXTENSION_MAPPING = MARKDOWN_SUPPORT = YES +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 0. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 0 + # When enabled doxygen tries to link words that correspond to documented # classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by by putting a % sign in front of the word -# or globally by setting AUTOLINK_SUPPORT to NO. +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. # The default value is: YES. AUTOLINK_SUPPORT = YES @@ -325,13 +346,20 @@ SIP_SUPPORT = NO IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first +# tag is set to YES then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. # The default value is: NO. DISTRIBUTE_GROUP_DOC = NO +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + # Set the SUBGROUPING tag to YES to allow class member groups of the same type # (for instance a group of public functions) to be put as a subgroup of that # type (e.g. under the Public Functions section). Set it to NO to prevent @@ -390,7 +418,7 @@ LOOKUP_CACHE_SIZE = 0 # Build related configuration options #--------------------------------------------------------------------------- -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in # documentation are documented, even if no documentation was available. Private # class members and static file members will be hidden unless the # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. @@ -400,35 +428,35 @@ LOOKUP_CACHE_SIZE = 0 EXTRACT_ALL = NO -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will # be included in the documentation. # The default value is: NO. EXTRACT_PRIVATE = NO -# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal # scope will be included in the documentation. # The default value is: NO. EXTRACT_PACKAGE = NO -# If the EXTRACT_STATIC tag is set to YES all static members of a file will be +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be # included in the documentation. # The default value is: NO. EXTRACT_STATIC = NO -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, # only classes defined in header files are included. Does not have any effect # for Java sources. # The default value is: YES. EXTRACT_LOCAL_CLASSES = NO -# This flag is only useful for Objective-C code. When set to YES local methods, +# This flag is only useful for Objective-C code. If set to YES, local methods, # which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO only methods in the interface are +# included in the documentation. If set to NO, only methods in the interface are # included. # The default value is: NO. @@ -453,21 +481,21 @@ HIDE_UNDOC_MEMBERS = YES # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. If set -# to NO these classes will be included in the various overviews. This option has -# no effect if EXTRACT_ALL is enabled. +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_CLASSES = YES # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# (class|struct|union) declarations. If set to NO these declarations will be +# (class|struct|union) declarations. If set to NO, these declarations will be # included in the documentation. # The default value is: NO. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO these +# documentation blocks found inside the body of a function. If set to NO, these # blocks will be appended to the function's detailed documentation block. # The default value is: NO. @@ -481,7 +509,7 @@ HIDE_IN_BODY_DOCS = NO INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES upper-case letters are also +# names in lower-case letters. If set to YES, upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. @@ -490,12 +518,19 @@ INTERNAL_DOCS = NO CASE_SENSE_NAMES = NO # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES the +# their full class and namespace scopes in the documentation. If set to YES, the # scope will be hidden. # The default value is: NO. HIDE_SCOPE_NAMES = NO +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. @@ -523,14 +558,14 @@ INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the # (detailed) documentation of file and class members alphabetically by member -# name. If set to NO the members will appear in declaration order. +# name. If set to NO, the members will appear in declaration order. # The default value is: YES. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief # descriptions of file, namespace and class members alphabetically by member -# name. If set to NO the members will appear in declaration order. Note that +# name. If set to NO, the members will appear in declaration order. Note that # this will also influence the order of the classes in the class list. # The default value is: NO. @@ -575,27 +610,25 @@ SORT_BY_SCOPE_NAME = NO STRICT_PROTO_MATCHING = NO -# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the -# todo list. This list is created by putting \todo commands in the -# documentation. +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. # The default value is: YES. GENERATE_TODOLIST = NO -# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the -# test list. This list is created by putting \test commands in the -# documentation. +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. # The default value is: YES. GENERATE_TESTLIST = NO -# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug # list. This list is created by putting \bug commands in the documentation. # The default value is: YES. GENERATE_BUGLIST = NO -# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO) +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) # the deprecated list. This list is created by putting \deprecated commands in # the documentation. # The default value is: YES. @@ -620,8 +653,8 @@ ENABLED_SECTIONS = MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES the list -# will mention the files that were used to generate the documentation. +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. # The default value is: YES. SHOW_USED_FILES = YES @@ -669,8 +702,7 @@ LAYOUT_FILE = # to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. # For LaTeX the style of the bibliography can be controlled using # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the -# search path. Do not use file names with spaces, bibtex cannot handle them. See -# also \cite for info how to create references. +# search path. See also \cite for info how to create references. CITE_BIB_FILES = @CMAKE_SOURCE_DIR@/biblio/bibliography.bib \ @CMAKE_SOURCE_DIR@/biblio/how_to_cite_cgal.bib \ @@ -688,7 +720,7 @@ CITE_BIB_FILES = @CMAKE_SOURCE_DIR@/biblio/bibliography.bib \ QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES # this implies that the warnings are on. # # Tip: Turn warnings on while writing the documentation. @@ -696,7 +728,7 @@ QUIET = NO WARNINGS = YES -# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag # will automatically be disabled. # The default value is: YES. @@ -713,12 +745,18 @@ WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return -# value. If set to NO doxygen will only warn about wrong or incomplete parameter -# documentation, but not about the absence of documentation. +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. # The default value is: NO. WARN_NO_PARAMDOC = NO +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. +# The default value is: NO. + +WARN_AS_ERROR = NO + # The WARN_FORMAT tag determines the format of the warning messages that doxygen # can produce. The string should contain the $file, $line, and $text tags, which # will be replaced by the file and line number from which the warning originated @@ -742,7 +780,7 @@ WARN_LOGFILE = # The INPUT tag is used to specify the files and/or directories that contain # documented source files. You may enter file names like myfile.cpp or # directories like /usr/src/myproject. Separate the files or directories with -# spaces. +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. INPUT = @CMAKE_SOURCE_DIR@ @@ -758,14 +796,30 @@ INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank the -# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, -# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, -# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, -# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, -# *.qsf, *.as and *.js. - -#FILE_PATTERNS = +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, +# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf. + +FILE_PATTERNS = *.c \ + *.cc \ + *.cxx \ + *.cpp \ + *.c++ \ + *.h \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.md \ + *.mm \ # The RECURSIVE tag can be used to specify whether or not subdirectories should # be searched for input files as well. @@ -780,14 +834,14 @@ RECURSIVE = YES # Note that relative paths are relative to the directory from which doxygen is # run. -EXCLUDE = @CMAKE_SOURCE_DIR@/data/ \ - @CMAKE_SOURCE_DIR@/ext/ \ +EXCLUDE = @CMAKE_SOURCE_DIR@/data/ \ + @CMAKE_SOURCE_DIR@/ext/ \ @CMAKE_SOURCE_DIR@/README.md \ @CMAKE_SOURCE_DIR@/.github \ @CMAKE_CURRENT_BINARY_DIR@/new_gudhi_version_creation.md \ @GUDHI_DOXYGEN_SOURCE_PREFIX@/GudhUI/ \ - @GUDHI_DOXYGEN_SOURCE_PREFIX@/cmake/ \ - @GUDHI_DOXYGEN_SOURCE_PREFIX@/python/ \ + @GUDHI_DOXYGEN_SOURCE_PREFIX@/cmake/ \ + @GUDHI_DOXYGEN_SOURCE_PREFIX@/python/ # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded @@ -821,7 +875,7 @@ EXCLUDE_SYMBOLS = # command). EXAMPLE_PATH = @CMAKE_SOURCE_DIR@/biblio/ \ - @CMAKE_SOURCE_DIR@/data/ \ + @CMAKE_SOURCE_DIR@/data/ \ @GUDHI_DOXYGEN_EXAMPLE_PATH@ # If the value of the EXAMPLE_PATH tag contains directories, you can use the @@ -829,7 +883,7 @@ EXAMPLE_PATH = @CMAKE_SOURCE_DIR@/biblio/ \ # *.h) to filter out the source-files in the directories. If left blank all # files are included. -EXAMPLE_PATTERNS = +EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude commands @@ -858,6 +912,10 @@ IMAGE_PATH = @GUDHI_DOXYGEN_IMAGE_PATH@ # Note that the filter must not add or remove lines; it is applied before the # code is scanned, but not when the output code is generated. If lines are added # or removed, the anchors will not be placed correctly. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. INPUT_FILTER = @@ -867,11 +925,15 @@ INPUT_FILTER = # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how # filters are used. If the FILTER_PATTERNS tag is empty or if none of the # patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER ) will also be used to filter the input files that are used for +# INPUT_FILTER) will also be used to filter the input files that are used for # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). # The default value is: NO. @@ -931,7 +993,7 @@ REFERENCED_BY_RELATION = NO REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES, then the hyperlinks from functions in REFERENCES_RELATION and +# to YES then the hyperlinks from functions in REFERENCES_RELATION and # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will # link to the documentation. # The default value is: YES. @@ -978,6 +1040,25 @@ USE_HTAGS = NO VERBATIM_HEADERS = YES +# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the +# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the +# cost of reduced performance. This can be particularly helpful with template +# rich C++ code for which doxygen's built-in parser lacks the necessary type +# information. +# Note: The availability of this option depends on whether or not doxygen was +# generated with the -Duse-libclang=ON option for CMake. +# The default value is: NO. + +CLANG_ASSISTED_PARSING = NO + +# If clang assisted parsing is enabled you can provide the compiler with command +# line options that you would normally use when invoking the compiler. Note that +# the include paths will already be set by doxygen for the files and directories +# specified with INPUT and INCLUDE_PATH. +# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. + +CLANG_OPTIONS = + #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- @@ -1008,7 +1089,7 @@ IGNORE_PREFIX = # Configuration options related to the HTML output #--------------------------------------------------------------------------- -# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output # The default value is: YES. GENERATE_HTML = YES @@ -1070,13 +1151,15 @@ HTML_FOOTER = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/footer.html HTML_STYLESHEET = @GUDHI_DOXYGEN_COMMON_DOC_PATH@/stylesheet.css -# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user- -# defined cascading style sheet that is included after the standard style sheets +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets # created by doxygen. Using this option one can overrule certain style aspects. # This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefor more robust against future updates. -# Doxygen will copy the style sheet file to the output directory. For an example -# see the documentation. +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_STYLESHEET = @@ -1092,7 +1175,7 @@ HTML_EXTRA_STYLESHEET = HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the stylesheet and background images according to +# will adjust the colors in the style sheet and background images according to # this color. Hue is specified as an angle on a colorwheel, see # http://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 @@ -1123,8 +1206,9 @@ HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting this -# to NO can help when comparing the output of multiple runs. -# The default value is: YES. +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_TIMESTAMP = YES @@ -1220,28 +1304,29 @@ GENERATE_HTMLHELP = NO CHM_FILE = # The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler ( hhc.exe). If non-empty +# including file name) of the HTML help compiler (hhc.exe). If non-empty, # doxygen will try to run the HTML help compiler on the generated index.hhp. # The file has to be specified with full path. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. HHC_LOCATION = -# The GENERATE_CHI flag controls if a separate .chi index file is generated ( -# YES) or that it should be included in the master .chm file ( NO). +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the master .chm file (NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. GENERATE_CHI = NO -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc) +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) # and project file content. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_INDEX_ENCODING = -# The BINARY_TOC flag controls whether a binary table of contents is generated ( -# YES) or a normal table of contents ( NO) in the .chm file. +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. @@ -1354,7 +1439,7 @@ DISABLE_INDEX = YES # index structure (just like the one that is generated for HTML Help). For this # to work a browser that supports JavaScript, DHTML, CSS and frames is required # (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can # further fine-tune the look of the index. As an example, the default style # sheet generated by doxygen has an example that shows how to put an image at # the root of the tree instead of the PROJECT_NAME. Since the tree basically has @@ -1382,7 +1467,7 @@ ENUM_VALUES_PER_LINE = 4 TREEVIEW_WIDTH = 250 -# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to # external symbols imported via tag files in a separate window. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. @@ -1411,7 +1496,7 @@ FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see # http://www.mathjax.org) which uses client side Javascript for the rendering -# instead of using prerendered bitmaps. Use this if you do not have LaTeX +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path # to it using the MATHJAX_RELPATH option. @@ -1448,7 +1533,8 @@ MATHJAX_RELPATH = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2 # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols # This tag requires that the tag USE_MATHJAX is set to YES. -MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +MATHJAX_EXTENSIONS = TeX/AMSmath \ + TeX/AMSsymbols # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site @@ -1481,11 +1567,11 @@ SEARCHENGINE = YES # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a web server instead of a web client using Javascript. There -# are two flavours of web server based searching depending on the -# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for -# searching and an index file used by the script. When EXTERNAL_SEARCH is -# enabled the indexing and searching needs to be provided by external tools. See -# the section "External Indexing and Searching" for details. +# are two flavors of web server based searching depending on the EXTERNAL_SEARCH +# setting. When disabled, doxygen will generate a PHP script for searching and +# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing +# and searching needs to be provided by external tools. See the section +# "External Indexing and Searching" for details. # The default value is: NO. # This tag requires that the tag SEARCHENGINE is set to YES. @@ -1497,7 +1583,7 @@ SERVER_BASED_SEARCH = NO # external search engine pointed to by the SEARCHENGINE_URL option to obtain the # search results. # -# Doxygen ships with an example indexer ( doxyindexer) and search engine +# Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library # Xapian (see: http://xapian.org/). # @@ -1510,7 +1596,7 @@ EXTERNAL_SEARCH = NO # The SEARCHENGINE_URL should point to a search engine hosted by a web server # which will return the search results when EXTERNAL_SEARCH is enabled. # -# Doxygen ships with an example indexer ( doxyindexer) and search engine +# Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library # Xapian (see: http://xapian.org/). See the section "External Indexing and # Searching" for details. @@ -1548,7 +1634,7 @@ EXTRA_SEARCH_MAPPINGS = # Configuration options related to the LaTeX output #--------------------------------------------------------------------------- -# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output. +# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output. # The default value is: YES. GENERATE_LATEX = NO @@ -1579,7 +1665,7 @@ LATEX_CMD_NAME = latex MAKEINDEX_CMD_NAME = makeindex -# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX +# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX # documents. This may be useful for small projects and may help to save some # trees in general. # The default value is: NO. @@ -1597,13 +1683,18 @@ COMPACT_LATEX = NO PAPER_TYPE = a4 # The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names -# that should be included in the LaTeX output. To get the times font for -# instance you can specify -# EXTRA_PACKAGES=times +# that should be included in the LaTeX output. The package can be specified just +# by its name or with the correct syntax as to be used with the LaTeX +# \usepackage command. To get the times font for instance you can specify : +# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times} +# To use the option intlimits with the amsmath package you can specify: +# EXTRA_PACKAGES=[intlimits]{amsmath} # If left blank no extra packages will be included. # This tag requires that the tag GENERATE_LATEX is set to YES. -EXTRA_PACKAGES = amsfonts amsmath amssymb +EXTRA_PACKAGES = amsfonts \ + amsmath \ + amssymb # The LATEX_HEADER tag can be used to specify a personal LaTeX header for the # generated LaTeX document. The header should contain everything until the first @@ -1613,23 +1704,36 @@ EXTRA_PACKAGES = amsfonts amsmath amssymb # # Note: Only use a user-defined header if you know what you are doing! The # following commands have a special meaning inside the header: $title, -# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will -# replace them by respectively the title of the page, the current date and time, -# only the current date, the version number of doxygen, the project name (see -# PROJECT_NAME), or the project number (see PROJECT_NUMBER). +# $datetime, $date, $doxygenversion, $projectname, $projectnumber, +# $projectbrief, $projectlogo. Doxygen will replace $title with the empty +# string, for the replacement values of the other commands the user is referred +# to HTML_HEADER. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the # generated LaTeX document. The footer should contain everything after the last -# chapter. If it is left blank doxygen will generate a standard footer. +# chapter. If it is left blank doxygen will generate a standard footer. See +# LATEX_HEADER for more information on how to generate a default footer and what +# special commands can be used inside the footer. # # Note: Only use a user-defined footer if you know what you are doing! # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_FOOTER = +# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# LaTeX style sheets that are included after the standard style sheets created +# by doxygen. Using this option one can overrule certain style aspects. Doxygen +# will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_EXTRA_STYLESHEET = + # The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the LATEX_OUTPUT output # directory. Note that the files will be copied as-is; there are no commands or @@ -1647,8 +1751,8 @@ LATEX_EXTRA_FILES = PDF_HYPERLINKS = YES -# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate -# the PDF file directly from the LaTeX files. Set this option to YES to get a +# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate +# the PDF file directly from the LaTeX files. Set this option to YES, to get a # higher quality PDF documentation. # The default value is: YES. # This tag requires that the tag GENERATE_LATEX is set to YES. @@ -1689,11 +1793,19 @@ LATEX_SOURCE_CODE = NO LATEX_BIB_STYLE = plain +# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated +# page will contain the date and time when the page was generated. Setting this +# to NO can help when comparing the output of multiple runs. +# The default value is: NO. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_TIMESTAMP = NO + #--------------------------------------------------------------------------- # Configuration options related to the RTF output #--------------------------------------------------------------------------- -# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The +# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The # RTF output is optimized for Word 97 and may not look too pretty with other RTF # readers/editors. # The default value is: NO. @@ -1708,7 +1820,7 @@ GENERATE_RTF = NO RTF_OUTPUT = rtf -# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF +# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF # documents. This may be useful for small projects and may help to save some # trees in general. # The default value is: NO. @@ -1745,11 +1857,21 @@ RTF_STYLESHEET_FILE = RTF_EXTENSIONS_FILE = +# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code +# with syntax highlighting in the RTF output. +# +# Note that which sources are shown also depends on other settings such as +# SOURCE_BROWSER. +# The default value is: NO. +# This tag requires that the tag GENERATE_RTF is set to YES. + +RTF_SOURCE_CODE = NO + #--------------------------------------------------------------------------- # Configuration options related to the man page output #--------------------------------------------------------------------------- -# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for +# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for # classes and files. # The default value is: NO. @@ -1773,6 +1895,13 @@ MAN_OUTPUT = man MAN_EXTENSION = .3 +# The MAN_SUBDIR tag determines the name of the directory created within +# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by +# MAN_EXTENSION with the initial . removed. +# This tag requires that the tag GENERATE_MAN is set to YES. + +MAN_SUBDIR = + # If the MAN_LINKS tag is set to YES and doxygen generates man output, then it # will generate one additional man file for each entity documented in the real # man page(s). These additional files only source the real man page, but without @@ -1786,7 +1915,7 @@ MAN_LINKS = NO # Configuration options related to the XML output #--------------------------------------------------------------------------- -# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that +# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that # captures the structure of the code including all documentation. # The default value is: NO. @@ -1800,7 +1929,7 @@ GENERATE_XML = NO XML_OUTPUT = xml -# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program +# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program # listings (including syntax highlighting and cross-referencing information) to # the XML output. Note that enabling this will significantly increase the size # of the XML output. @@ -1813,7 +1942,7 @@ XML_PROGRAMLISTING = YES # Configuration options related to the DOCBOOK output #--------------------------------------------------------------------------- -# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files +# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files # that can be used to generate PDF. # The default value is: NO. @@ -1827,14 +1956,23 @@ GENERATE_DOCBOOK = NO DOCBOOK_OUTPUT = docbook +# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the +# program listings (including syntax highlighting and cross-referencing +# information) to the DOCBOOK output. Note that enabling this will significantly +# increase the size of the DOCBOOK output. +# The default value is: NO. +# This tag requires that the tag GENERATE_DOCBOOK is set to YES. + +DOCBOOK_PROGRAMLISTING = NO + #--------------------------------------------------------------------------- # Configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- -# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen -# Definitions (see http://autogen.sf.net) file that captures the structure of -# the code including all documentation. Note that this feature is still -# experimental and incomplete at the moment. +# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an +# AutoGen Definitions (see http://autogen.sf.net) file that captures the +# structure of the code including all documentation. Note that this feature is +# still experimental and incomplete at the moment. # The default value is: NO. GENERATE_AUTOGEN_DEF = NO @@ -1843,7 +1981,7 @@ GENERATE_AUTOGEN_DEF = NO # Configuration options related to the Perl module output #--------------------------------------------------------------------------- -# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module +# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module # file that captures the structure of the code including all documentation. # # Note that this feature is still experimental and incomplete at the moment. @@ -1851,7 +1989,7 @@ GENERATE_AUTOGEN_DEF = NO GENERATE_PERLMOD = NO -# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary +# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary # Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI # output from the Perl module output. # The default value is: NO. @@ -1859,9 +1997,9 @@ GENERATE_PERLMOD = NO PERLMOD_LATEX = NO -# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely +# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely # formatted so it can be parsed by a human reader. This is useful if you want to -# understand what is going on. On the other hand, if this tag is set to NO the +# understand what is going on. On the other hand, if this tag is set to NO, the # size of the Perl module output will be much smaller and Perl will parse it # just the same. # The default value is: YES. @@ -1881,14 +2019,14 @@ PERLMOD_MAKEVAR_PREFIX = # Configuration options related to the preprocessor #--------------------------------------------------------------------------- -# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all +# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all # C-preprocessor directives found in the sources and include files. # The default value is: YES. ENABLE_PREPROCESSING = YES -# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names -# in the source code. If set to NO only conditional compilation will be +# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names +# in the source code. If set to NO, only conditional compilation will be # performed. Macro expansion can be done in a controlled way by setting # EXPAND_ONLY_PREDEF to YES. # The default value is: NO. @@ -1904,7 +2042,7 @@ MACRO_EXPANSION = YES EXPAND_ONLY_PREDEF = YES -# If the SEARCH_INCLUDES tag is set to YES the includes files in the +# If the SEARCH_INCLUDES tag is set to YES, the include files in the # INCLUDE_PATH will be searched if a #include is found. # The default value is: YES. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. @@ -1946,9 +2084,9 @@ PREDEFINED = protected=private EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will -# remove all refrences to function-like macros that are alone on a line, have an -# all uppercase name, and do not end with a semicolon. Such function macros are -# typically used for boiler-plate code, and will confuse the parser if not +# remove all references to function-like macros that are alone on a line, have +# an all uppercase name, and do not end with a semicolon. Such function macros +# are typically used for boiler-plate code, and will confuse the parser if not # removed. # The default value is: YES. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. @@ -1968,7 +2106,7 @@ SKIP_FUNCTION_MACROS = YES # where loc1 and loc2 can be relative or absolute paths or URLs. See the # section "Linking to external documentation" for more information about the use # of tag files. -# Note: Each tag file must have an unique name (where the name does NOT include +# Note: Each tag file must have a unique name (where the name does NOT include # the path). If a tag file is not located in the directory in which doxygen is # run, you must also specify the path to the tagfile here. @@ -1980,20 +2118,21 @@ TAGFILES = GENERATE_TAGFILE = -# If the ALLEXTERNALS tag is set to YES all external class will be listed in the -# class index. If set to NO only the inherited external classes will be listed. +# If the ALLEXTERNALS tag is set to YES, all external class will be listed in +# the class index. If set to NO, only the inherited external classes will be +# listed. # The default value is: NO. ALLEXTERNALS = NO -# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in -# the modules index. If set to NO, only the current project's groups will be +# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will be # listed. # The default value is: YES. EXTERNAL_GROUPS = YES -# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in +# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in # the related pages index. If set to NO, only the current project's pages will # be listed. # The default value is: YES. @@ -2010,7 +2149,7 @@ PERL_PATH = /usr/bin/perl # Configuration options related to the dot tool #--------------------------------------------------------------------------- -# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram +# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram # (in HTML and LaTeX) for classes with base or super classes. Setting the tag to # NO turns the diagrams off. Note that this option also works with HAVE_DOT # disabled, but it is recommended to install and use dot, since it yields more @@ -2035,7 +2174,7 @@ MSCGEN_PATH = DIA_PATH = -# If set to YES, the inheritance and collaboration graphs will hide inheritance +# If set to YES the inheritance and collaboration graphs will hide inheritance # and usage relations if the target is undocumented or is not a class. # The default value is: YES. @@ -2046,7 +2185,7 @@ HIDE_UNDOC_RELATIONS = YES # http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent # Bell Labs. The other options in this section have no effect if this option is # set to NO -# The default value is: NO. +# The default value is: YES. HAVE_DOT = YES @@ -2060,7 +2199,7 @@ HAVE_DOT = YES DOT_NUM_THREADS = 0 -# When you want a differently looking font n the dot files that doxygen +# When you want a differently looking font in the dot files that doxygen # generates you can specify the font name using DOT_FONTNAME. You need to make # sure dot is able to find the font, which can be done by putting it in a # standard location or by setting the DOTFONTPATH environment variable or by @@ -2108,7 +2247,7 @@ COLLABORATION_GRAPH = NO GROUP_GRAPHS = YES -# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. # The default value is: NO. @@ -2160,7 +2299,8 @@ INCLUDED_BY_GRAPH = NO # # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable call graphs for selected -# functions only using the \callgraph command. +# functions only using the \callgraph command. Disabling a call graph can be +# accomplished by means of the command \hidecallgraph. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. @@ -2171,7 +2311,8 @@ CALL_GRAPH = NO # # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable caller graphs for selected -# functions only using the \callergraph command. +# functions only using the \callergraph command. Disabling a caller graph can be +# accomplished by means of the command \hidecallergraph. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. @@ -2194,11 +2335,17 @@ GRAPHICAL_HIERARCHY = YES DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images -# generated by dot. +# generated by dot. For an explanation of the image formats see the section +# output formats in the documentation of the dot tool (Graphviz (see: +# http://www.graphviz.org/)). # Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order # to make the SVG files visible in IE 9+ (other browsers do not have this # requirement). -# Possible values are: png, jpg, gif and svg. +# Possible values are: png, png:cairo, png:cairo:cairo, png:cairo:gd, png:gd, +# png:gd:gd, jpg, jpg:cairo, jpg:cairo:gd, jpg:gd, jpg:gd:gd, gif, gif:cairo, +# gif:cairo:gd, gif:gd, gif:gd:gd, svg, png:gd, png:gd:gd, png:cairo, +# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and +# png:gdiplus:gdiplus. # The default value is: png. # This tag requires that the tag HAVE_DOT is set to YES. @@ -2241,6 +2388,24 @@ MSCFILE_DIRS = DIAFILE_DIRS = +# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the +# path where java can find the plantuml.jar file. If left blank, it is assumed +# PlantUML is not used or called during a preprocessing step. Doxygen will +# generate a warning when it encounters a \startuml command in this case and +# will not generate output for the diagram. + +PLANTUML_JAR_PATH = + +# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a +# configuration file for plantuml. + +PLANTUML_CFG_FILE = + +# When using plantuml, the specified paths are searched for files specified by +# the !include statement in a plantuml block. + +PLANTUML_INCLUDE_PATH = + # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes # that will be shown in the graph. If the number of nodes in a graph becomes # larger than this value, doxygen will truncate the graph, which is visualized @@ -2277,7 +2442,7 @@ MAX_DOT_GRAPH_DEPTH = 0 DOT_TRANSPARENT = NO -# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) support # this, this feature is disabled by default. @@ -2294,7 +2459,7 @@ DOT_MULTI_TARGETS = YES GENERATE_LEGEND = YES -# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot +# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot # files that are used to generate the various graphs. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. -- cgit v1.2.3 From e498bc8f838ab0cc433f9f67206088064b52b6fa Mon Sep 17 00:00:00 2001 From: tlacombe Date: Wed, 28 Apr 2021 10:54:55 +0200 Subject: enhancing the doc --- src/python/gudhi/wasserstein/wasserstein.py | 37 ++++++++++++++++------------- 1 file changed, 21 insertions(+), 16 deletions(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 61505d03..5196b280 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -210,32 +210,37 @@ def _warn_infty(matching): return np.inf - def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enable_autodiff=False, keep_essential_parts=True): ''' - :param X: (n x 2) numpy.array encoding the first diagram. Can contain essential parts (points with infinite - coordinates). - :param Y: (m x 2) numpy.array encoding the second diagram. - :param matching: if True, computes and returns the optimal matching between X and Y, encoded as - a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to - the j-th point in Y, with the convention (-1) represents the diagonal. - Note that if the cost is +inf (essential parts have different number of points, - then the optimal matching will be set to `None`. - :param order: exponent for Wasserstein. Default value is 1. + Compute the Wasserstein distance between persistence diagram using Python Optimal Transport backend. + Diagrams can contain points with infinity coordinates (essential parts). + Points with (-inf,-inf) and (+inf,+inf) coordinates are considered as belonging to the diagonal. + If the distance between two diagrams is +inf (which happens if the cardinalities of essential + parts differ) and optimal matching is required, it will be set to ``None``. + + :param X: The first diagram. + :type X: n x 2 numpy.array + :param Y: The second diagram. + :type Y: m x 2 numpy.array + :param matching: if ``True``, computes and returns the optimal matching between X and Y, encoded as + a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to + the j-th point in Y, with the convention that (-1) represents the diagonal. + :param order: Wasserstein exponent W_q + :type order: float :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2). - Default value is `np.inf`. - :param enable_autodiff: If X and Y are torch.tensor or tensorflow.Tensor, make the computation + :type internal_p: float + :param enable_autodiff: If X and Y are ``torch.tensor`` or ``tensorflow.Tensor``, make the computation transparent to automatic differentiation. This requires the package EagerPy and is currently incompatible - with `matching=True` and with `keep_essential_parts=True`. + with ``matching=True`` and with ``keep_essential_parts=True``. .. note:: This considers the function defined on the coordinates of the off-diagonal finite points of X and Y and lets the various frameworks compute its gradient. It never pulls new points from the diagonal. :type enable_autodiff: bool - :param keep_essential_parts: If False, only considers the finite points in the diagrams. - Otherwise, computes the distance between the essential parts separately. + :param keep_essential_parts: If ``False``, only considers the finite points in the diagrams. + Otherwise, include essential parts in cost and matching computation. :type keep_essential_parts: bool - :returns: the Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with + :returns: The Wasserstein distance of order q (1 <= q < infinity) between persistence diagrams with respect to the internal_p-norm as ground metric. If matching is set to True, also returns the optimal matching between X and Y. If cost is +inf, any matching is optimal and thus it returns `None` instead. -- cgit v1.2.3 From 9e59ca4f4497969ae6d159407e913c31dba7d6c5 Mon Sep 17 00:00:00 2001 From: tlacombe Date: Wed, 28 Apr 2021 10:56:45 +0200 Subject: enhancing the doc2 --- src/python/gudhi/wasserstein/wasserstein.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/python/gudhi/wasserstein/wasserstein.py b/src/python/gudhi/wasserstein/wasserstein.py index 5196b280..dc18806e 100644 --- a/src/python/gudhi/wasserstein/wasserstein.py +++ b/src/python/gudhi/wasserstein/wasserstein.py @@ -226,7 +226,7 @@ def wasserstein_distance(X, Y, matching=False, order=1., internal_p=np.inf, enab :param matching: if ``True``, computes and returns the optimal matching between X and Y, encoded as a (n x 2) np.array [...[i,j]...], meaning the i-th point in X is matched to the j-th point in Y, with the convention that (-1) represents the diagonal. - :param order: Wasserstein exponent W_q + :param order: Wasserstein exponent q (1 <= q < infinity). :type order: float :param internal_p: Ground metric on the (upper-half) plane (i.e. norm L^p in R^2). :type internal_p: float -- cgit v1.2.3 From d68ddc94bd82c48a4433ae0b3b1b3f10c167ed0b Mon Sep 17 00:00:00 2001 From: Hind Date: Wed, 28 Apr 2021 14:05:03 +0200 Subject: Add points (dataset type) before the underlying model (sphere) as a module --- src/python/CMakeLists.txt | 6 +-- ...plex_from_generated_points_on_sphere_example.py | 2 +- src/python/gudhi/datasets/generators/__init__.py | 0 .../gudhi/datasets/generators/points/__init__.py | 0 .../gudhi/datasets/generators/points/sphere.cc | 61 ++++++++++++++++++++++ src/python/gudhi/datasets/generators/sphere.cc | 61 ---------------------- 6 files changed, 65 insertions(+), 65 deletions(-) delete mode 100644 src/python/gudhi/datasets/generators/__init__.py create mode 100644 src/python/gudhi/datasets/generators/points/__init__.py create mode 100644 src/python/gudhi/datasets/generators/points/sphere.cc delete mode 100644 src/python/gudhi/datasets/generators/sphere.cc (limited to 'src') diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index bcdd0741..ef9dc3ab 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -46,7 +46,7 @@ if(PYTHONINTERP_FOUND) set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'hera', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'clustering', ") - set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'datasets/generators', ") + set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'datasets/generators/points', ") endif() if(CYTHON_FOUND) set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'off_reader', ") @@ -152,7 +152,7 @@ if(PYTHONINTERP_FOUND) set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/wasserstein', ") set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/bottleneck', ") if (NOT CGAL_VERSION VERSION_LESS 4.11.0) - set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'datasets/generators/sphere', ") + set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'datasets/generators/points/sphere', ") set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'bottleneck', ") set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'nerve_gic', ") endif () @@ -264,7 +264,7 @@ if(PYTHONINTERP_FOUND) file(COPY "gudhi/weighted_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/dtm_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/hera/__init__.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/hera") - file(COPY "gudhi/datasets/generators/__init__.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/datasets/generators") + file(COPY "gudhi/datasets/generators/points/__init__.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/datasets/generators/points") # Some files for pip package diff --git a/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py b/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py index 2de9ec08..2b023bbe 100644 --- a/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py +++ b/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -from gudhi.datasets.generators import sphere +from gudhi.datasets.generators.points import sphere from gudhi import AlphaComplex diff --git a/src/python/gudhi/datasets/generators/__init__.py b/src/python/gudhi/datasets/generators/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/python/gudhi/datasets/generators/points/__init__.py b/src/python/gudhi/datasets/generators/points/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/python/gudhi/datasets/generators/points/sphere.cc b/src/python/gudhi/datasets/generators/points/sphere.cc new file mode 100644 index 00000000..79392ef0 --- /dev/null +++ b/src/python/gudhi/datasets/generators/points/sphere.cc @@ -0,0 +1,61 @@ +/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + * Author(s): Hind Montassif + * + * Copyright (C) 2021 Inria + * + * Modification(s): + * - YYYY/MM Author: Description of the modification + */ + +#include +#include + +#include +#include + +#include + +namespace py = pybind11; + + +typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Kern; + +py::array_t generate_points_on_sphere(size_t num_points, int dim, double radius) { + + py::array_t points({num_points, (size_t)dim}); + + py::buffer_info buf = points.request(); + double *ptr = static_cast(buf.ptr); + + GUDHI_CHECK(num_points == buf.shape[0], "Py array first dimension not matching num_points on sphere"); + GUDHI_CHECK(dim == buf.shape[1], "Py array second dimension not matching the ambient space dimension"); + + + py::gil_scoped_release release; + auto points_generated = Gudhi::generate_points_on_sphere_d(num_points, dim, radius); + + for (size_t i = 0; i < num_points; i++) + for (int j = 0; j < dim; j++) + ptr[i*dim+j] = points_generated[i][j]; + + return points; +} + +PYBIND11_MODULE(sphere, m) { + m.attr("__license__") = "LGPL v3"; + m.def("generate_random_points", &generate_points_on_sphere, + py::arg("num_points"), py::arg("dim"), py::arg("radius") = 1, + R"pbdoc( + Generate random i.i.d. points uniformly on a (d-1)-sphere in R^d + + :param num_points: The number of points to be generated. + :type num_points: unsigned integer + :param dim: The dimension. + :type dim: integer + :param radius: The radius. + :type radius: float + :rtype: numpy array of float + :returns: the generated points on a sphere. + )pbdoc"); +} diff --git a/src/python/gudhi/datasets/generators/sphere.cc b/src/python/gudhi/datasets/generators/sphere.cc deleted file mode 100644 index 79392ef0..00000000 --- a/src/python/gudhi/datasets/generators/sphere.cc +++ /dev/null @@ -1,61 +0,0 @@ -/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. - * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. - * Author(s): Hind Montassif - * - * Copyright (C) 2021 Inria - * - * Modification(s): - * - YYYY/MM Author: Description of the modification - */ - -#include -#include - -#include -#include - -#include - -namespace py = pybind11; - - -typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Kern; - -py::array_t generate_points_on_sphere(size_t num_points, int dim, double radius) { - - py::array_t points({num_points, (size_t)dim}); - - py::buffer_info buf = points.request(); - double *ptr = static_cast(buf.ptr); - - GUDHI_CHECK(num_points == buf.shape[0], "Py array first dimension not matching num_points on sphere"); - GUDHI_CHECK(dim == buf.shape[1], "Py array second dimension not matching the ambient space dimension"); - - - py::gil_scoped_release release; - auto points_generated = Gudhi::generate_points_on_sphere_d(num_points, dim, radius); - - for (size_t i = 0; i < num_points; i++) - for (int j = 0; j < dim; j++) - ptr[i*dim+j] = points_generated[i][j]; - - return points; -} - -PYBIND11_MODULE(sphere, m) { - m.attr("__license__") = "LGPL v3"; - m.def("generate_random_points", &generate_points_on_sphere, - py::arg("num_points"), py::arg("dim"), py::arg("radius") = 1, - R"pbdoc( - Generate random i.i.d. points uniformly on a (d-1)-sphere in R^d - - :param num_points: The number of points to be generated. - :type num_points: unsigned integer - :param dim: The dimension. - :type dim: integer - :param radius: The radius. - :type radius: float - :rtype: numpy array of float - :returns: the generated points on a sphere. - )pbdoc"); -} -- cgit v1.2.3 From 8cf6bbe7e2bd7c71cb44872aba772a1c4caf06a9 Mon Sep 17 00:00:00 2001 From: ROUVREAU Vincent Date: Thu, 29 Apr 2021 11:23:43 +0200 Subject: numpy.take_along_axis used in knn requires numpy>=1.15.0 --- .github/build-requirements.txt | 5 ----- .github/test-requirements.txt | 15 --------------- src/python/doc/installation.rst | 2 +- src/python/setup.py.in | 4 ++-- 4 files changed, 3 insertions(+), 23 deletions(-) delete mode 100644 .github/build-requirements.txt delete mode 100644 .github/test-requirements.txt (limited to 'src') diff --git a/.github/build-requirements.txt b/.github/build-requirements.txt deleted file mode 100644 index 7de60d23..00000000 --- a/.github/build-requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -setuptools -wheel -numpy -Cython -pybind11 \ No newline at end of file diff --git a/.github/test-requirements.txt b/.github/test-requirements.txt deleted file mode 100644 index d0803574..00000000 --- a/.github/test-requirements.txt +++ /dev/null @@ -1,15 +0,0 @@ -pytest -pytest-cov -sphinx -sphinxcontrib-bibtex==1.0.0 -sphinx-paramlinks -matplotlib -scipy -scikit-learn -POT -tensorflow -tensorflow-addons -torch<1.5 -pykeops -hnswlib -eagerpy diff --git a/src/python/doc/installation.rst b/src/python/doc/installation.rst index 2881055f..9c16b04e 100644 --- a/src/python/doc/installation.rst +++ b/src/python/doc/installation.rst @@ -41,7 +41,7 @@ there. The library uses c++14 and requires `Boost `_ :math:`\geq` 1.56.0, `CMake `_ :math:`\geq` 3.5 to generate makefiles, -`NumPy `_, `Cython `_ and +`NumPy `_ :math:`\geq` 1.15.0, `Cython `_ and `pybind11 `_ to compile the GUDHI Python module. It is a multi-platform library and compiles on Linux, Mac OSX and Visual diff --git a/src/python/setup.py.in b/src/python/setup.py.in index 65f5446e..759ec8d8 100644 --- a/src/python/setup.py.in +++ b/src/python/setup.py.in @@ -85,7 +85,7 @@ setup( long_description_content_type='text/x-rst', long_description=long_description, ext_modules = ext_modules, - install_requires = ['numpy >= 1.9',], - setup_requires = ['cython','numpy >= 1.9','pybind11',], + install_requires = ['numpy >= 1.15.0',], + setup_requires = ['cython','numpy >= 1.15.0','pybind11',], package_data={"": ["*.dll"], }, ) -- cgit v1.2.3 From aeff21a712e488a2948d7c12f67f2f11b047ada8 Mon Sep 17 00:00:00 2001 From: Hind Montassif Date: Tue, 4 May 2021 10:40:03 +0200 Subject: Set Simplex_tree_interface_full_featured constructor name (Simplex_tree) to the class name --- src/python/gudhi/simplex_tree.pxd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/python/gudhi/simplex_tree.pxd b/src/python/gudhi/simplex_tree.pxd index 000323af..3b8ea4f9 100644 --- a/src/python/gudhi/simplex_tree.pxd +++ b/src/python/gudhi/simplex_tree.pxd @@ -44,7 +44,7 @@ cdef extern from "Simplex_tree_interface.h" namespace "Gudhi": cdef cppclass Simplex_tree_interface_full_featured "Gudhi::Simplex_tree_interface": - Simplex_tree() nogil + Simplex_tree_interface_full_featured() nogil double simplex_filtration(vector[int] simplex) nogil void assign_simplex_filtration(vector[int] simplex, double filtration) nogil void initialize_filtration() nogil -- cgit v1.2.3 From 2b694f9beae0e5fa78ae5b8923e7f2905c58777f Mon Sep 17 00:00:00 2001 From: Hind Montassif Date: Wed, 5 May 2021 13:58:30 +0200 Subject: Add __init__ files at every level in modules Remove last modules level and add sample type argument Rename num_points to n_samples --- src/python/CMakeLists.txt | 7 +-- ...plex_from_generated_points_on_sphere_example.py | 7 +-- src/python/gudhi/datasets/__init__.py | 0 src/python/gudhi/datasets/generators/__init__.py | 0 src/python/gudhi/datasets/generators/points.cc | 68 ++++++++++++++++++++++ .../gudhi/datasets/generators/points/__init__.py | 0 .../gudhi/datasets/generators/points/sphere.cc | 61 ------------------- 7 files changed, 74 insertions(+), 69 deletions(-) create mode 100644 src/python/gudhi/datasets/__init__.py create mode 100644 src/python/gudhi/datasets/generators/__init__.py create mode 100644 src/python/gudhi/datasets/generators/points.cc delete mode 100644 src/python/gudhi/datasets/generators/points/__init__.py delete mode 100644 src/python/gudhi/datasets/generators/points/sphere.cc (limited to 'src') diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index ef9dc3ab..8dd4ea5d 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -46,7 +46,7 @@ if(PYTHONINTERP_FOUND) set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'bottleneck', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'hera', ") set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'clustering', ") - set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'datasets/generators/points', ") + set(GUDHI_PYTHON_MODULES_EXTRA "${GUDHI_PYTHON_MODULES_EXTRA}'datasets', ") endif() if(CYTHON_FOUND) set(GUDHI_PYTHON_MODULES "${GUDHI_PYTHON_MODULES}'off_reader', ") @@ -152,7 +152,7 @@ if(PYTHONINTERP_FOUND) set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/wasserstein', ") set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'hera/bottleneck', ") if (NOT CGAL_VERSION VERSION_LESS 4.11.0) - set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'datasets/generators/points/sphere', ") + set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'datasets/generators/points', ") set(GUDHI_PYBIND11_MODULES "${GUDHI_PYBIND11_MODULES}'bottleneck', ") set(GUDHI_CYTHON_MODULES "${GUDHI_CYTHON_MODULES}'nerve_gic', ") endif () @@ -264,8 +264,7 @@ if(PYTHONINTERP_FOUND) file(COPY "gudhi/weighted_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/dtm_rips_complex.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi") file(COPY "gudhi/hera/__init__.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/hera") - file(COPY "gudhi/datasets/generators/points/__init__.py" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi/datasets/generators/points") - + file(COPY "gudhi/datasets" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gudhi" FILES_MATCHING PATTERN "*.py") # Some files for pip package file(COPY "introduction.rst" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/") diff --git a/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py b/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py index 2b023bbe..e73584d3 100644 --- a/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py +++ b/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -from gudhi.datasets.generators.points import sphere +from gudhi.datasets.generators import points from gudhi import AlphaComplex @@ -22,11 +22,10 @@ print("#####################################################################") print("AlphaComplex creation from generated points on sphere") -# Generate a circle: 50 points; dim 2; radius 1 -points = sphere.generate_random_points(50, 2, 1) +gen_points = points.sphere(n_samples = 50, dim = 2, radius = 1, sample = "random") # Create an alpha complex -alpha_complex = AlphaComplex(points=points) +alpha_complex = AlphaComplex(points = gen_points) simplex_tree = alpha_complex.create_simplex_tree() result_str = 'Alpha complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \ diff --git a/src/python/gudhi/datasets/__init__.py b/src/python/gudhi/datasets/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/python/gudhi/datasets/generators/__init__.py b/src/python/gudhi/datasets/generators/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/python/gudhi/datasets/generators/points.cc b/src/python/gudhi/datasets/generators/points.cc new file mode 100644 index 00000000..f02c7d73 --- /dev/null +++ b/src/python/gudhi/datasets/generators/points.cc @@ -0,0 +1,68 @@ +/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. + * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. + * Author(s): Hind Montassif + * + * Copyright (C) 2021 Inria + * + * Modification(s): + * - YYYY/MM Author: Description of the modification + */ + +#include +#include + +#include +#include + +#include + +namespace py = pybind11; + + +typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Kern; + +py::array_t generate_points_on_sphere(size_t n_samples, int dim, double radius, std::string sample) { + + if (sample != "random") { + throw pybind11::value_error("sample type is not supported"); + } + + py::array_t points({n_samples, (size_t)dim}); + + py::buffer_info buf = points.request(); + double *ptr = static_cast(buf.ptr); + + GUDHI_CHECK(n_samples == buf.shape[0], "Py array first dimension not matching n_samples on sphere"); + GUDHI_CHECK(dim == buf.shape[1], "Py array second dimension not matching the ambient space dimension"); + + + py::gil_scoped_release release; + auto points_generated = Gudhi::generate_points_on_sphere_d(n_samples, dim, radius); + + for (size_t i = 0; i < n_samples; i++) + for (int j = 0; j < dim; j++) + ptr[i*dim+j] = points_generated[i][j]; + + return points; +} + +PYBIND11_MODULE(points, m) { + m.attr("__license__") = "LGPL v3"; + m.def("sphere", &generate_points_on_sphere, + py::arg("n_samples"), py::arg("dim"), + py::arg("radius") = 1, py::arg("sample") = "random", + R"pbdoc( + Generate random i.i.d. points uniformly on a (d-1)-sphere in R^d + + :param n_samples: The number of points to be generated. + :type n_samples: integer + :param dim: The ambient dimension d. + :type dim: integer + :param radius: The radius. + :type radius: float + :param sample: The sample type. + :type sample: string + :rtype: numpy array of float + :returns: the generated points on a sphere. + )pbdoc"); +} diff --git a/src/python/gudhi/datasets/generators/points/__init__.py b/src/python/gudhi/datasets/generators/points/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/python/gudhi/datasets/generators/points/sphere.cc b/src/python/gudhi/datasets/generators/points/sphere.cc deleted file mode 100644 index 79392ef0..00000000 --- a/src/python/gudhi/datasets/generators/points/sphere.cc +++ /dev/null @@ -1,61 +0,0 @@ -/* This file is part of the Gudhi Library - https://gudhi.inria.fr/ - which is released under MIT. - * See file LICENSE or go to https://gudhi.inria.fr/licensing/ for full license details. - * Author(s): Hind Montassif - * - * Copyright (C) 2021 Inria - * - * Modification(s): - * - YYYY/MM Author: Description of the modification - */ - -#include -#include - -#include -#include - -#include - -namespace py = pybind11; - - -typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Kern; - -py::array_t generate_points_on_sphere(size_t num_points, int dim, double radius) { - - py::array_t points({num_points, (size_t)dim}); - - py::buffer_info buf = points.request(); - double *ptr = static_cast(buf.ptr); - - GUDHI_CHECK(num_points == buf.shape[0], "Py array first dimension not matching num_points on sphere"); - GUDHI_CHECK(dim == buf.shape[1], "Py array second dimension not matching the ambient space dimension"); - - - py::gil_scoped_release release; - auto points_generated = Gudhi::generate_points_on_sphere_d(num_points, dim, radius); - - for (size_t i = 0; i < num_points; i++) - for (int j = 0; j < dim; j++) - ptr[i*dim+j] = points_generated[i][j]; - - return points; -} - -PYBIND11_MODULE(sphere, m) { - m.attr("__license__") = "LGPL v3"; - m.def("generate_random_points", &generate_points_on_sphere, - py::arg("num_points"), py::arg("dim"), py::arg("radius") = 1, - R"pbdoc( - Generate random i.i.d. points uniformly on a (d-1)-sphere in R^d - - :param num_points: The number of points to be generated. - :type num_points: unsigned integer - :param dim: The dimension. - :type dim: integer - :param radius: The radius. - :type radius: float - :rtype: numpy array of float - :returns: the generated points on a sphere. - )pbdoc"); -} -- cgit v1.2.3 From 62510e70009ff2fc65028b88b56886fb53743e51 Mon Sep 17 00:00:00 2001 From: Hind Montassif Date: Mon, 10 May 2021 10:58:36 +0200 Subject: Rename dim to ambient_dim for sphere (to be consistent with dim in torus) --- ..._complex_from_generated_points_on_sphere_example.py | 2 +- src/python/gudhi/datasets/generators/points.cc | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'src') diff --git a/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py b/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py index e73584d3..267e6436 100644 --- a/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py +++ b/src/python/example/alpha_complex_from_generated_points_on_sphere_example.py @@ -22,7 +22,7 @@ print("#####################################################################") print("AlphaComplex creation from generated points on sphere") -gen_points = points.sphere(n_samples = 50, dim = 2, radius = 1, sample = "random") +gen_points = points.sphere(n_samples = 50, ambient_dim = 2, radius = 1, sample = "random") # Create an alpha complex alpha_complex = AlphaComplex(points = gen_points) diff --git a/src/python/gudhi/datasets/generators/points.cc b/src/python/gudhi/datasets/generators/points.cc index f02c7d73..e2626b09 100644 --- a/src/python/gudhi/datasets/generators/points.cc +++ b/src/python/gudhi/datasets/generators/points.cc @@ -21,27 +21,27 @@ namespace py = pybind11; typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Kern; -py::array_t generate_points_on_sphere(size_t n_samples, int dim, double radius, std::string sample) { +py::array_t generate_points_on_sphere(size_t n_samples, int ambient_dim, double radius, std::string sample) { if (sample != "random") { throw pybind11::value_error("sample type is not supported"); } - py::array_t points({n_samples, (size_t)dim}); + py::array_t points({n_samples, (size_t)ambient_dim}); py::buffer_info buf = points.request(); double *ptr = static_cast(buf.ptr); GUDHI_CHECK(n_samples == buf.shape[0], "Py array first dimension not matching n_samples on sphere"); - GUDHI_CHECK(dim == buf.shape[1], "Py array second dimension not matching the ambient space dimension"); + GUDHI_CHECK(ambient_dim == buf.shape[1], "Py array second dimension not matching the ambient space dimension"); py::gil_scoped_release release; - auto points_generated = Gudhi::generate_points_on_sphere_d(n_samples, dim, radius); + auto points_generated = Gudhi::generate_points_on_sphere_d(n_samples, ambient_dim, radius); for (size_t i = 0; i < n_samples; i++) - for (int j = 0; j < dim; j++) - ptr[i*dim+j] = points_generated[i][j]; + for (int j = 0; j < ambient_dim; j++) + ptr[i*ambient_dim+j] = points_generated[i][j]; return points; } @@ -49,15 +49,15 @@ py::array_t generate_points_on_sphere(size_t n_samples, int dim, double PYBIND11_MODULE(points, m) { m.attr("__license__") = "LGPL v3"; m.def("sphere", &generate_points_on_sphere, - py::arg("n_samples"), py::arg("dim"), + py::arg("n_samples"), py::arg("ambient_dim"), py::arg("radius") = 1, py::arg("sample") = "random", R"pbdoc( Generate random i.i.d. points uniformly on a (d-1)-sphere in R^d :param n_samples: The number of points to be generated. :type n_samples: integer - :param dim: The ambient dimension d. - :type dim: integer + :param ambient_dim: The ambient dimension d. + :type ambient_dim: integer :param radius: The radius. :type radius: float :param sample: The sample type. -- cgit v1.2.3 From a571f198535b5ab5751eb55693371e9348aa1804 Mon Sep 17 00:00:00 2001 From: Hind-M Date: Fri, 21 May 2021 17:32:43 +0200 Subject: Fix issue #489 RipsComplex out of bounds access --- ...ips_complex_diagram_persistence_from_distance_matrix_file_example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py b/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py index 236d085d..9320d904 100755 --- a/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py +++ b/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py @@ -44,7 +44,7 @@ print("RipsComplex creation from distance matrix read in a csv file") message = "RipsComplex with max_edge_length=" + repr(args.max_edge_length) print(message) -distance_matrix = gudhi.read_lower_triangular_matrix_from_csv_file(csv_file=args.file) +distance_matrix = gudhi.read_lower_triangular_matrix_from_csv_file(csv_file=args.file, separator=',') rips_complex = gudhi.RipsComplex( distance_matrix=distance_matrix, max_edge_length=args.max_edge_length ) -- cgit v1.2.3 From 8b3c55502718e4c184d828151ee6f75fd2cfc9eb Mon Sep 17 00:00:00 2001 From: Hind-M Date: Tue, 25 May 2021 18:01:54 +0200 Subject: Add a separator argument that goes with the rips_complex_diagram_persistence_from_distance_matrix_file_example input file Specify explicitly the separator when using a specific input file --- src/common/test/test_distance_matrix_reader.cpp | 2 +- src/python/CMakeLists.txt | 2 +- ..._complex_diagram_persistence_from_distance_matrix_file_example.py | 5 +++-- src/python/test/test_reader_utils.py | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/common/test/test_distance_matrix_reader.cpp b/src/common/test/test_distance_matrix_reader.cpp index 73be8104..92e899b8 100644 --- a/src/common/test/test_distance_matrix_reader.cpp +++ b/src/common/test/test_distance_matrix_reader.cpp @@ -57,7 +57,7 @@ BOOST_AUTO_TEST_CASE( full_square_distance_matrix ) { Distance_matrix from_full_square; // Read full_square_distance_matrix.csv file where the separator is the default one ';' - from_full_square = Gudhi::read_lower_triangular_matrix_from_csv_file("full_square_distance_matrix.csv"); + from_full_square = Gudhi::read_lower_triangular_matrix_from_csv_file("full_square_distance_matrix.csv", ';'); for (auto& i : from_full_square) { for (auto j : i) { std::clog << j << " "; diff --git a/src/python/CMakeLists.txt b/src/python/CMakeLists.txt index a1440cbc..bc9a3b7b 100644 --- a/src/python/CMakeLists.txt +++ b/src/python/CMakeLists.txt @@ -457,7 +457,7 @@ if(PYTHONINTERP_FOUND) WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMAND ${CMAKE_COMMAND} -E env "${GUDHI_PYTHON_PATH_ENV}" ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py" - --no-diagram -f ${CMAKE_SOURCE_DIR}/data/distance_matrix/lower_triangular_distance_matrix.csv -e 12.0 -d 3) + --no-diagram -f ${CMAKE_SOURCE_DIR}/data/distance_matrix/lower_triangular_distance_matrix.csv -s , -e 12.0 -d 3) add_test(NAME rips_complex_diagram_persistence_from_off_file_example_py_test WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} diff --git a/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py b/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py index 9320d904..8a9cc857 100755 --- a/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py +++ b/src/python/example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py @@ -21,11 +21,12 @@ parser = argparse.ArgumentParser( description="RipsComplex creation from " "a distance matrix read in a csv file.", epilog="Example: " "example/rips_complex_diagram_persistence_from_distance_matrix_file_example.py " - "-f ../data/distance_matrix/lower_triangular_distance_matrix.csv -e 12.0 -d 3" + "-f ../data/distance_matrix/lower_triangular_distance_matrix.csv -s , -e 12.0 -d 3" "- Constructs a Rips complex with the " "distance matrix from the given csv file.", ) parser.add_argument("-f", "--file", type=str, required=True) +parser.add_argument("-s", "--separator", type=str, required=True) parser.add_argument("-e", "--max_edge_length", type=float, default=0.5) parser.add_argument("-d", "--max_dimension", type=int, default=1) parser.add_argument("-b", "--band", type=float, default=0.0) @@ -44,7 +45,7 @@ print("RipsComplex creation from distance matrix read in a csv file") message = "RipsComplex with max_edge_length=" + repr(args.max_edge_length) print(message) -distance_matrix = gudhi.read_lower_triangular_matrix_from_csv_file(csv_file=args.file, separator=',') +distance_matrix = gudhi.read_lower_triangular_matrix_from_csv_file(csv_file=args.file, separator=args.separator) rips_complex = gudhi.RipsComplex( distance_matrix=distance_matrix, max_edge_length=args.max_edge_length ) diff --git a/src/python/test/test_reader_utils.py b/src/python/test/test_reader_utils.py index 90da6651..e96e0569 100755 --- a/src/python/test/test_reader_utils.py +++ b/src/python/test/test_reader_utils.py @@ -30,7 +30,7 @@ def test_full_square_distance_matrix_csv_file(): test_file.write("0;1;2;3;\n1;0;4;5;\n2;4;0;6;\n3;5;6;0;") test_file.close() matrix = gudhi.read_lower_triangular_matrix_from_csv_file( - csv_file="full_square_distance_matrix.csv" + csv_file="full_square_distance_matrix.csv", separator=";" ) assert matrix == [[], [1.0], [2.0, 4.0], [3.0, 5.0, 6.0]] -- cgit v1.2.3 From ad1145bc4ac224954055f9b9ad955c2a53ceb687 Mon Sep 17 00:00:00 2001 From: Hind-M Date: Wed, 26 May 2021 17:27:01 +0200 Subject: Change some output messages and documentation to me more explicit --- src/python/gudhi/datasets/generators/points.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/python/gudhi/datasets/generators/points.cc b/src/python/gudhi/datasets/generators/points.cc index e2626b09..d658946b 100644 --- a/src/python/gudhi/datasets/generators/points.cc +++ b/src/python/gudhi/datasets/generators/points.cc @@ -24,7 +24,7 @@ typedef CGAL::Epick_d< CGAL::Dynamic_dimension_tag > Kern; py::array_t generate_points_on_sphere(size_t n_samples, int ambient_dim, double radius, std::string sample) { if (sample != "random") { - throw pybind11::value_error("sample type is not supported"); + throw pybind11::value_error("This sample type is not supported"); } py::array_t points({n_samples, (size_t)ambient_dim}); @@ -50,7 +50,7 @@ PYBIND11_MODULE(points, m) { m.attr("__license__") = "LGPL v3"; m.def("sphere", &generate_points_on_sphere, py::arg("n_samples"), py::arg("ambient_dim"), - py::arg("radius") = 1, py::arg("sample") = "random", + py::arg("radius") = 1., py::arg("sample") = "random", R"pbdoc( Generate random i.i.d. points uniformly on a (d-1)-sphere in R^d @@ -58,9 +58,9 @@ PYBIND11_MODULE(points, m) { :type n_samples: integer :param ambient_dim: The ambient dimension d. :type ambient_dim: integer - :param radius: The radius. + :param radius: The radius. Default value is `1.`. :type radius: float - :param sample: The sample type. + :param sample: The sample type. Default and only available value is `"random"`. :type sample: string :rtype: numpy array of float :returns: the generated points on a sphere. -- cgit v1.2.3